Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dpt_i2o.c
Go to the documentation of this file.
1 /***************************************************************************
2  dpti.c - description
3  -------------------
4  begin : Thu Sep 7 2000
5  copyright : (C) 2000 by Adaptec
6 
7  July 30, 2001 First version being submitted
8  for inclusion in the kernel. V2.4
9 
10  See Documentation/scsi/dpti.txt for history, notes, license info
11  and credits
12  ***************************************************************************/
13 
14 /***************************************************************************
15  * *
16  * This program is free software; you can redistribute it and/or modify *
17  * it under the terms of the GNU General Public License as published by *
18  * the Free Software Foundation; either version 2 of the License, or *
19  * (at your option) any later version. *
20  * *
21  ***************************************************************************/
22 /***************************************************************************
23  * Sat Dec 20 2003 Go Taniguchi <[email protected]>
24  - Support 2.6 kernel and DMA-mapping
25  - ioctl fix for raid tools
26  - use schedule_timeout in long long loop
27  **************************************************************************/
28 
29 /*#define DEBUG 1 */
30 /*#define UARTDELAY 1 */
31 
32 #include <linux/module.h>
33 
34 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36 
38 
39 #include <linux/ioctl.h> /* For SCSI-Passthrough */
40 #include <asm/uaccess.h>
41 
42 #include <linux/stat.h>
43 #include <linux/slab.h> /* for kmalloc() */
44 #include <linux/pci.h> /* for PCI support */
45 #include <linux/proc_fs.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h> /* for udelay */
48 #include <linux/interrupt.h>
49 #include <linux/kernel.h> /* for printk */
50 #include <linux/sched.h>
51 #include <linux/reboot.h>
52 #include <linux/spinlock.h>
53 #include <linux/dma-mapping.h>
54 
55 #include <linux/timer.h>
56 #include <linux/string.h>
57 #include <linux/ioport.h>
58 #include <linux/mutex.h>
59 
60 #include <asm/processor.h> /* for boot_cpu_data */
61 #include <asm/pgtable.h>
62 #include <asm/io.h> /* for virt_to_bus, etc. */
63 
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_device.h>
67 #include <scsi/scsi_host.h>
68 #include <scsi/scsi_tcq.h>
69 
70 #include "dpt/dptsig.h"
71 #include "dpti.h"
72 
73 /*============================================================================
74  * Create a binary signature - this is read by dptsig
75  * Needed for our management apps
76  *============================================================================
77  */
78 static DEFINE_MUTEX(adpt_mutex);
79 static dpt_sig_S DPTI_sig = {
80  {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81 #ifdef __i386__
83 #elif defined(__ia64__)
85 #elif defined(__sparc__)
87 #elif defined(__alpha__)
89 #else
90  (-1),(-1),
91 #endif
94  DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95 };
96 
97 
98 
99 
100 /*============================================================================
101  * Globals
102  *============================================================================
103  */
104 
105 static DEFINE_MUTEX(adpt_configuration_lock);
106 
107 static struct i2o_sys_tbl *sys_tbl;
108 static dma_addr_t sys_tbl_pa;
109 static int sys_tbl_ind;
110 static int sys_tbl_len;
111 
112 static adpt_hba* hba_chain = NULL;
113 static int hba_count = 0;
114 
115 static struct class *adpt_sysfs_class;
116 
117 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118 #ifdef CONFIG_COMPAT
119 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120 #endif
121 
122 static const struct file_operations adpt_fops = {
123  .unlocked_ioctl = adpt_unlocked_ioctl,
124  .open = adpt_open,
125  .release = adpt_close,
126 #ifdef CONFIG_COMPAT
127  .compat_ioctl = compat_adpt_ioctl,
128 #endif
129  .llseek = noop_llseek,
130 };
131 
132 /* Structures and definitions for synchronous message posting.
133  * See adpt_i2o_post_wait() for description
134  * */
136 {
137  int status;
139  adpt_wait_queue_head_t *wq;
141 };
142 
143 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144 static u32 adpt_post_wait_id = 0;
145 static DEFINE_SPINLOCK(adpt_post_wait_lock);
146 
147 
148 /*============================================================================
149  * Functions
150  *============================================================================
151  */
152 
153 static inline int dpt_dma64(adpt_hba *pHba)
154 {
155  return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156 }
157 
158 static inline u32 dma_high(dma_addr_t addr)
159 {
160  return upper_32_bits(addr);
161 }
162 
163 static inline u32 dma_low(dma_addr_t addr)
164 {
165  return (u32)addr;
166 }
167 
168 static u8 adpt_read_blink_led(adpt_hba* host)
169 {
170  if (host->FwDebugBLEDflag_P) {
171  if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172  return readb(host->FwDebugBLEDvalue_P);
173  }
174  }
175  return 0;
176 }
177 
178 /*============================================================================
179  * Scsi host template interface functions
180  *============================================================================
181  */
182 
183 static struct pci_device_id dptids[] = {
186  { 0, }
187 };
188 MODULE_DEVICE_TABLE(pci,dptids);
189 
190 static int adpt_detect(struct scsi_host_template* sht)
191 {
192  struct pci_dev *pDev = NULL;
193  adpt_hba *pHba;
194  adpt_hba *next;
195 
196  PINFO("Detecting Adaptec I2O RAID controllers...\n");
197 
198  /* search for all Adatpec I2O RAID cards */
199  while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200  if(pDev->device == PCI_DPT_DEVICE_ID ||
202  if(adpt_install_hba(sht, pDev) ){
203  PERROR("Could not Init an I2O RAID device\n");
204  PERROR("Will not try to detect others.\n");
205  return hba_count-1;
206  }
207  pci_dev_get(pDev);
208  }
209  }
210 
211  /* In INIT state, Activate IOPs */
212  for (pHba = hba_chain; pHba; pHba = next) {
213  next = pHba->next;
214  // Activate does get status , init outbound, and get hrt
215  if (adpt_i2o_activate_hba(pHba) < 0) {
216  adpt_i2o_delete_hba(pHba);
217  }
218  }
219 
220 
221  /* Active IOPs in HOLD state */
222 
223 rebuild_sys_tab:
224  if (hba_chain == NULL)
225  return 0;
226 
227  /*
228  * If build_sys_table fails, we kill everything and bail
229  * as we can't init the IOPs w/o a system table
230  */
231  if (adpt_i2o_build_sys_table() < 0) {
232  adpt_i2o_sys_shutdown();
233  return 0;
234  }
235 
236  PDEBUG("HBA's in HOLD state\n");
237 
238  /* If IOP don't get online, we need to rebuild the System table */
239  for (pHba = hba_chain; pHba; pHba = pHba->next) {
240  if (adpt_i2o_online_hba(pHba) < 0) {
241  adpt_i2o_delete_hba(pHba);
242  goto rebuild_sys_tab;
243  }
244  }
245 
246  /* Active IOPs now in OPERATIONAL state */
247  PDEBUG("HBA's in OPERATIONAL state\n");
248 
249  printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250  for (pHba = hba_chain; pHba; pHba = next) {
251  next = pHba->next;
252  printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253  if (adpt_i2o_lct_get(pHba) < 0){
254  adpt_i2o_delete_hba(pHba);
255  continue;
256  }
257 
258  if (adpt_i2o_parse_lct(pHba) < 0){
259  adpt_i2o_delete_hba(pHba);
260  continue;
261  }
262  adpt_inquiry(pHba);
263  }
264 
265  adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266  if (IS_ERR(adpt_sysfs_class)) {
267  printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268  adpt_sysfs_class = NULL;
269  }
270 
271  for (pHba = hba_chain; pHba; pHba = next) {
272  next = pHba->next;
273  if (adpt_scsi_host_alloc(pHba, sht) < 0){
274  adpt_i2o_delete_hba(pHba);
275  continue;
276  }
277  pHba->initialized = TRUE;
278  pHba->state &= ~DPTI_STATE_RESET;
279  if (adpt_sysfs_class) {
280  struct device *dev = device_create(adpt_sysfs_class,
281  NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282  "dpti%d", pHba->unit);
283  if (IS_ERR(dev)) {
284  printk(KERN_WARNING"dpti%d: unable to "
285  "create device in dpt_i2o class\n",
286  pHba->unit);
287  }
288  }
289  }
290 
291  // Register our control device node
292  // nodes will need to be created in /dev to access this
293  // the nodes can not be created from within the driver
294  if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295  adpt_i2o_sys_shutdown();
296  return 0;
297  }
298  return hba_count;
299 }
300 
301 
302 /*
303  * scsi_unregister will be called AFTER we return.
304  */
305 static int adpt_release(struct Scsi_Host *host)
306 {
307  adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
308 // adpt_i2o_quiesce_hba(pHba);
309  adpt_i2o_delete_hba(pHba);
310  scsi_unregister(host);
311  return 0;
312 }
313 
314 
315 static void adpt_inquiry(adpt_hba* pHba)
316 {
317  u32 msg[17];
318  u32 *mptr;
319  u32 *lenptr;
320  int direction;
321  int scsidir;
322  u32 len;
323  u32 reqlen;
324  u8* buf;
326  u8 scb[16];
327  s32 rcode;
328 
329  memset(msg, 0, sizeof(msg));
330  buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
331  if(!buf){
332  printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
333  return;
334  }
335  memset((void*)buf, 0, 36);
336 
337  len = 36;
338  direction = 0x00000000;
339  scsidir =0x40000000; // DATA IN (iop<--dev)
340 
341  if (dpt_dma64(pHba))
342  reqlen = 17; // SINGLE SGE, 64 bit
343  else
344  reqlen = 14; // SINGLE SGE, 32 bit
345  /* Stick the headers on */
346  msg[0] = reqlen<<16 | SGL_OFFSET_12;
347  msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
348  msg[2] = 0;
349  msg[3] = 0;
350  // Adaptec/DPT Private stuff
352  msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
353  /* Direction, disconnect ok | sense data | simple queue , CDBLen */
354  // I2O_SCB_FLAG_ENABLE_DISCONNECT |
355  // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
356  // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
357  msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
358 
359  mptr=msg+7;
360 
361  memset(scb, 0, sizeof(scb));
362  // Write SCSI command into the message - always 16 byte block
363  scb[0] = INQUIRY;
364  scb[1] = 0;
365  scb[2] = 0;
366  scb[3] = 0;
367  scb[4] = 36;
368  scb[5] = 0;
369  // Don't care about the rest of scb
370 
371  memcpy(mptr, scb, sizeof(scb));
372  mptr+=4;
373  lenptr=mptr++; /* Remember me - fill in when we know */
374 
375  /* Now fill in the SGList and command */
376  *lenptr = len;
377  if (dpt_dma64(pHba)) {
378  *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
379  *mptr++ = 1 << PAGE_SHIFT;
380  *mptr++ = 0xD0000000|direction|len;
381  *mptr++ = dma_low(addr);
382  *mptr++ = dma_high(addr);
383  } else {
384  *mptr++ = 0xD0000000|direction|len;
385  *mptr++ = addr;
386  }
387 
388  // Send it on it's way
389  rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
390  if (rcode != 0) {
391  sprintf(pHba->detail, "Adaptec I2O RAID");
392  printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
393  if (rcode != -ETIME && rcode != -EINTR)
394  dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
395  } else {
396  memset(pHba->detail, 0, sizeof(pHba->detail));
397  memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
398  memcpy(&(pHba->detail[16]), " Model: ", 8);
399  memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
400  memcpy(&(pHba->detail[40]), " FW: ", 4);
401  memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
402  pHba->detail[48] = '\0'; /* precautionary */
403  dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
404  }
405  adpt_i2o_status_get(pHba);
406  return ;
407 }
408 
409 
410 static int adpt_slave_configure(struct scsi_device * device)
411 {
412  struct Scsi_Host *host = device->host;
413  adpt_hba* pHba;
414 
415  pHba = (adpt_hba *) host->hostdata[0];
416 
417  if (host->can_queue && device->tagged_supported) {
419  host->can_queue - 1);
420  } else {
421  scsi_adjust_queue_depth(device, 0, 1);
422  }
423  return 0;
424 }
425 
426 static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
427 {
428  adpt_hba* pHba = NULL;
429  struct adpt_device* pDev = NULL; /* dpt per device information */
430 
431  cmd->scsi_done = done;
432  /*
433  * SCSI REQUEST_SENSE commands will be executed automatically by the
434  * Host Adapter for any errors, so they should not be executed
435  * explicitly unless the Sense Data is zero indicating that no error
436  * occurred.
437  */
438 
439  if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
440  cmd->result = (DID_OK << 16);
441  cmd->scsi_done(cmd);
442  return 0;
443  }
444 
445  pHba = (adpt_hba*)cmd->device->host->hostdata[0];
446  if (!pHba) {
447  return FAILED;
448  }
449 
450  rmb();
451  /*
452  * TODO: I need to block here if I am processing ioctl cmds
453  * but if the outstanding cmds all finish before the ioctl,
454  * the scsi-core will not know to start sending cmds to me again.
455  * I need to a way to restart the scsi-cores queues or should I block
456  * calling scsi_done on the outstanding cmds instead
457  * for now we don't set the IOCTL state
458  */
459  if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
460  pHba->host->last_reset = jiffies;
461  pHba->host->resetting = 1;
462  return 1;
463  }
464 
465  // TODO if the cmd->device if offline then I may need to issue a bus rescan
466  // followed by a get_lct to see if the device is there anymore
467  if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
468  /*
469  * First command request for this device. Set up a pointer
470  * to the device structure. This should be a TEST_UNIT_READY
471  * command from scan_scsis_single.
472  */
473  if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
474  // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
475  // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
476  cmd->result = (DID_NO_CONNECT << 16);
477  cmd->scsi_done(cmd);
478  return 0;
479  }
480  cmd->device->hostdata = pDev;
481  }
482  pDev->pScsi_dev = cmd->device;
483 
484  /*
485  * If we are being called from when the device is being reset,
486  * delay processing of the command until later.
487  */
488  if (pDev->state & DPTI_DEV_RESET ) {
489  return FAILED;
490  }
491  return adpt_scsi_to_i2o(pHba, cmd, pDev);
492 }
493 
494 static DEF_SCSI_QCMD(adpt_queue)
495 
496 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
497  sector_t capacity, int geom[])
498 {
499  int heads=-1;
500  int sectors=-1;
501  int cylinders=-1;
502 
503  // *** First lets set the default geometry ****
504 
505  // If the capacity is less than ox2000
506  if (capacity < 0x2000 ) { // floppy
507  heads = 18;
508  sectors = 2;
509  }
510  // else if between 0x2000 and 0x20000
511  else if (capacity < 0x20000) {
512  heads = 64;
513  sectors = 32;
514  }
515  // else if between 0x20000 and 0x40000
516  else if (capacity < 0x40000) {
517  heads = 65;
518  sectors = 63;
519  }
520  // else if between 0x4000 and 0x80000
521  else if (capacity < 0x80000) {
522  heads = 128;
523  sectors = 63;
524  }
525  // else if greater than 0x80000
526  else {
527  heads = 255;
528  sectors = 63;
529  }
530  cylinders = sector_div(capacity, heads * sectors);
531 
532  // Special case if CDROM
533  if(sdev->type == 5) { // CDROM
534  heads = 252;
535  sectors = 63;
536  cylinders = 1111;
537  }
538 
539  geom[0] = heads;
540  geom[1] = sectors;
541  geom[2] = cylinders;
542 
543  PDEBUG("adpt_bios_param: exit\n");
544  return 0;
545 }
546 
547 
548 static const char *adpt_info(struct Scsi_Host *host)
549 {
550  adpt_hba* pHba;
551 
552  pHba = (adpt_hba *) host->hostdata[0];
553  return (char *) (pHba->detail);
554 }
555 
556 static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
557  int length, int inout)
558 {
559  struct adpt_device* d;
560  int id;
561  int chan;
562  int len = 0;
563  int begin = 0;
564  int pos = 0;
565  adpt_hba* pHba;
566  int unit;
567 
568  *start = buffer;
569  if (inout == TRUE) {
570  /*
571  * The user has done a write and wants us to take the
572  * data in the buffer and do something with it.
573  * proc_scsiwrite calls us with inout = 1
574  *
575  * Read data from buffer (writing to us) - NOT SUPPORTED
576  */
577  return -EINVAL;
578  }
579 
580  /*
581  * inout = 0 means the user has done a read and wants information
582  * returned, so we write information about the cards into the buffer
583  * proc_scsiread() calls us with inout = 0
584  */
585 
586  // Find HBA (host bus adapter) we are looking for
587  mutex_lock(&adpt_configuration_lock);
588  for (pHba = hba_chain; pHba; pHba = pHba->next) {
589  if (pHba->host == host) {
590  break; /* found adapter */
591  }
592  }
593  mutex_unlock(&adpt_configuration_lock);
594  if (pHba == NULL) {
595  return 0;
596  }
597  host = pHba->host;
598 
599  len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
600  len += sprintf(buffer+len, "%s\n", pHba->detail);
601  len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
602  pHba->host->host_no, pHba->name, host->irq);
603  len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
604  host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
605 
606  pos = begin + len;
607 
608  /* CHECKPOINT */
609  if(pos > offset + length) {
610  goto stop_output;
611  }
612  if(pos <= offset) {
613  /*
614  * If we haven't even written to where we last left
615  * off (the last time we were called), reset the
616  * beginning pointer.
617  */
618  len = 0;
619  begin = pos;
620  }
621  len += sprintf(buffer+len, "Devices:\n");
622  for(chan = 0; chan < MAX_CHANNEL; chan++) {
623  for(id = 0; id < MAX_ID; id++) {
624  d = pHba->channel[chan].device[id];
625  while(d){
626  len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
627  len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
628  pos = begin + len;
629 
630 
631  /* CHECKPOINT */
632  if(pos > offset + length) {
633  goto stop_output;
634  }
635  if(pos <= offset) {
636  len = 0;
637  begin = pos;
638  }
639 
640  unit = d->pI2o_dev->lct_data.tid;
641  len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
642  unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
643  scsi_device_online(d->pScsi_dev)? "online":"offline");
644  pos = begin + len;
645 
646  /* CHECKPOINT */
647  if(pos > offset + length) {
648  goto stop_output;
649  }
650  if(pos <= offset) {
651  len = 0;
652  begin = pos;
653  }
654 
655  d = d->next_lun;
656  }
657  }
658  }
659 
660  /*
661  * begin is where we last checked our position with regards to offset
662  * begin is always less than offset. len is relative to begin. It
663  * is the number of bytes written past begin
664  *
665  */
666 stop_output:
667  /* stop the output and calculate the correct length */
668  *(buffer + len) = '\0';
669 
670  *start = buffer + (offset - begin); /* Start of wanted data */
671  len -= (offset - begin);
672  if(len > length) {
673  len = length;
674  } else if(len < 0){
675  len = 0;
676  **start = '\0';
677  }
678  return len;
679 }
680 
681 /*
682  * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
683  */
684 static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
685 {
686  return (u32)cmd->serial_number;
687 }
688 
689 /*
690  * Go from a u32 'context' to a struct scsi_cmnd * .
691  * This could probably be made more efficient.
692  */
693 static struct scsi_cmnd *
694  adpt_cmd_from_context(adpt_hba * pHba, u32 context)
695 {
696  struct scsi_cmnd * cmd;
697  struct scsi_device * d;
698 
699  if (context == 0)
700  return NULL;
701 
702  spin_unlock(pHba->host->host_lock);
703  shost_for_each_device(d, pHba->host) {
704  unsigned long flags;
705  spin_lock_irqsave(&d->list_lock, flags);
706  list_for_each_entry(cmd, &d->cmd_list, list) {
707  if (((u32)cmd->serial_number == context)) {
708  spin_unlock_irqrestore(&d->list_lock, flags);
709  scsi_device_put(d);
710  spin_lock(pHba->host->host_lock);
711  return cmd;
712  }
713  }
714  spin_unlock_irqrestore(&d->list_lock, flags);
715  }
716  spin_lock(pHba->host->host_lock);
717 
718  return NULL;
719 }
720 
721 /*
722  * Turn a pointer to ioctl reply data into an u32 'context'
723  */
724 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
725 {
726 #if BITS_PER_LONG == 32
727  return (u32)(unsigned long)reply;
728 #else
729  ulong flags = 0;
730  u32 nr, i;
731 
732  spin_lock_irqsave(pHba->host->host_lock, flags);
733  nr = ARRAY_SIZE(pHba->ioctl_reply_context);
734  for (i = 0; i < nr; i++) {
735  if (pHba->ioctl_reply_context[i] == NULL) {
736  pHba->ioctl_reply_context[i] = reply;
737  break;
738  }
739  }
740  spin_unlock_irqrestore(pHba->host->host_lock, flags);
741  if (i >= nr) {
742  kfree (reply);
743  printk(KERN_WARNING"%s: Too many outstanding "
744  "ioctl commands\n", pHba->name);
745  return (u32)-1;
746  }
747 
748  return i;
749 #endif
750 }
751 
752 /*
753  * Go from an u32 'context' to a pointer to ioctl reply data.
754  */
755 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
756 {
757 #if BITS_PER_LONG == 32
758  return (void *)(unsigned long)context;
759 #else
760  void *p = pHba->ioctl_reply_context[context];
762 
763  return p;
764 #endif
765 }
766 
767 /*===========================================================================
768  * Error Handling routines
769  *===========================================================================
770  */
771 
772 static int adpt_abort(struct scsi_cmnd * cmd)
773 {
774  adpt_hba* pHba = NULL; /* host bus adapter structure */
775  struct adpt_device* dptdevice; /* dpt per device information */
776  u32 msg[5];
777  int rcode;
778 
779  if(cmd->serial_number == 0){
780  return FAILED;
781  }
782  pHba = (adpt_hba*) cmd->device->host->hostdata[0];
783  printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
784  if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
785  printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
786  return FAILED;
787  }
788 
789  memset(msg, 0, sizeof(msg));
791  msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
792  msg[2] = 0;
793  msg[3]= 0;
794  msg[4] = adpt_cmd_to_context(cmd);
795  if (pHba->host)
796  spin_lock_irq(pHba->host->host_lock);
797  rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
798  if (pHba->host)
799  spin_unlock_irq(pHba->host->host_lock);
800  if (rcode != 0) {
801  if(rcode == -EOPNOTSUPP ){
802  printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
803  return FAILED;
804  }
805  printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
806  return FAILED;
807  }
808  printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
809  return SUCCESS;
810 }
811 
812 
813 #define I2O_DEVICE_RESET 0x27
814 // This is the same for BLK and SCSI devices
815 // NOTE this is wrong in the i2o.h definitions
816 // This is not currently supported by our adapter but we issue it anyway
817 static int adpt_device_reset(struct scsi_cmnd* cmd)
818 {
819  adpt_hba* pHba;
820  u32 msg[4];
821  u32 rcode;
822  int old_state;
823  struct adpt_device* d = cmd->device->hostdata;
824 
825  pHba = (void*) cmd->device->host->hostdata[0];
826  printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
827  if (!d) {
828  printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
829  return FAILED;
830  }
831  memset(msg, 0, sizeof(msg));
833  msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
834  msg[2] = 0;
835  msg[3] = 0;
836 
837  if (pHba->host)
838  spin_lock_irq(pHba->host->host_lock);
839  old_state = d->state;
840  d->state |= DPTI_DEV_RESET;
841  rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
842  d->state = old_state;
843  if (pHba->host)
844  spin_unlock_irq(pHba->host->host_lock);
845  if (rcode != 0) {
846  if(rcode == -EOPNOTSUPP ){
847  printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
848  return FAILED;
849  }
850  printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
851  return FAILED;
852  } else {
853  printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
854  return SUCCESS;
855  }
856 }
857 
858 
859 #define I2O_HBA_BUS_RESET 0x87
860 // This version of bus reset is called by the eh_error handler
861 static int adpt_bus_reset(struct scsi_cmnd* cmd)
862 {
863  adpt_hba* pHba;
864  u32 msg[4];
865  u32 rcode;
866 
867  pHba = (adpt_hba*)cmd->device->host->hostdata[0];
868  memset(msg, 0, sizeof(msg));
869  printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
871  msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
872  msg[2] = 0;
873  msg[3] = 0;
874  if (pHba->host)
875  spin_lock_irq(pHba->host->host_lock);
876  rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
877  if (pHba->host)
878  spin_unlock_irq(pHba->host->host_lock);
879  if (rcode != 0) {
880  printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
881  return FAILED;
882  } else {
883  printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
884  return SUCCESS;
885  }
886 }
887 
888 // This version of reset is called by the eh_error_handler
889 static int __adpt_reset(struct scsi_cmnd* cmd)
890 {
891  adpt_hba* pHba;
892  int rcode;
893  pHba = (adpt_hba*)cmd->device->host->hostdata[0];
894  printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
895  rcode = adpt_hba_reset(pHba);
896  if(rcode == 0){
897  printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
898  return SUCCESS;
899  } else {
900  printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
901  return FAILED;
902  }
903 }
904 
905 static int adpt_reset(struct scsi_cmnd* cmd)
906 {
907  int rc;
908 
909  spin_lock_irq(cmd->device->host->host_lock);
910  rc = __adpt_reset(cmd);
911  spin_unlock_irq(cmd->device->host->host_lock);
912 
913  return rc;
914 }
915 
916 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
917 static int adpt_hba_reset(adpt_hba* pHba)
918 {
919  int rcode;
920 
921  pHba->state |= DPTI_STATE_RESET;
922 
923  // Activate does get status , init outbound, and get hrt
924  if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
925  printk(KERN_ERR "%s: Could not activate\n", pHba->name);
926  adpt_i2o_delete_hba(pHba);
927  return rcode;
928  }
929 
930  if ((rcode=adpt_i2o_build_sys_table()) < 0) {
931  adpt_i2o_delete_hba(pHba);
932  return rcode;
933  }
934  PDEBUG("%s: in HOLD state\n",pHba->name);
935 
936  if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
937  adpt_i2o_delete_hba(pHba);
938  return rcode;
939  }
940  PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
941 
942  if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
943  adpt_i2o_delete_hba(pHba);
944  return rcode;
945  }
946 
947  if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
948  adpt_i2o_delete_hba(pHba);
949  return rcode;
950  }
951  pHba->state &= ~DPTI_STATE_RESET;
952 
953  adpt_fail_posted_scbs(pHba);
954  return 0; /* return success */
955 }
956 
957 /*===========================================================================
958  *
959  *===========================================================================
960  */
961 
962 
963 static void adpt_i2o_sys_shutdown(void)
964 {
965  adpt_hba *pHba, *pNext;
966  struct adpt_i2o_post_wait_data *p1, *old;
967 
968  printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
969  printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
970  /* Delete all IOPs from the controller chain */
971  /* They should have already been released by the
972  * scsi-core
973  */
974  for (pHba = hba_chain; pHba; pHba = pNext) {
975  pNext = pHba->next;
976  adpt_i2o_delete_hba(pHba);
977  }
978 
979  /* Remove any timedout entries from the wait queue. */
980 // spin_lock_irqsave(&adpt_post_wait_lock, flags);
981  /* Nothing should be outstanding at this point so just
982  * free them
983  */
984  for(p1 = adpt_post_wait_queue; p1;) {
985  old = p1;
986  p1 = p1->next;
987  kfree(old);
988  }
989 // spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
990  adpt_post_wait_queue = NULL;
991 
992  printk(KERN_INFO "Adaptec I2O controllers down.\n");
993 }
994 
995 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
996 {
997 
998  adpt_hba* pHba = NULL;
999  adpt_hba* p = NULL;
1000  ulong base_addr0_phys = 0;
1001  ulong base_addr1_phys = 0;
1002  u32 hba_map0_area_size = 0;
1003  u32 hba_map1_area_size = 0;
1004  void __iomem *base_addr_virt = NULL;
1005  void __iomem *msg_addr_virt = NULL;
1006  int dma64 = 0;
1007 
1008  int raptorFlag = FALSE;
1009 
1010  if(pci_enable_device(pDev)) {
1011  return -EINVAL;
1012  }
1013 
1014  if (pci_request_regions(pDev, "dpt_i2o")) {
1015  PERROR("dpti: adpt_config_hba: pci request region failed\n");
1016  return -EINVAL;
1017  }
1018 
1019  pci_set_master(pDev);
1020 
1021  /*
1022  * See if we should enable dma64 mode.
1023  */
1024  if (sizeof(dma_addr_t) > 4 &&
1025  pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
1026  if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
1027  dma64 = 1;
1028  }
1029  if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
1030  return -EINVAL;
1031 
1032  /* adapter only supports message blocks below 4GB */
1033  pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
1034 
1035  base_addr0_phys = pci_resource_start(pDev,0);
1036  hba_map0_area_size = pci_resource_len(pDev,0);
1037 
1038  // Check if standard PCI card or single BAR Raptor
1039  if(pDev->device == PCI_DPT_DEVICE_ID){
1040  if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
1041  // Raptor card with this device id needs 4M
1042  hba_map0_area_size = 0x400000;
1043  } else { // Not Raptor - it is a PCI card
1044  if(hba_map0_area_size > 0x100000 ){
1045  hba_map0_area_size = 0x100000;
1046  }
1047  }
1048  } else {// Raptor split BAR config
1049  // Use BAR1 in this configuration
1050  base_addr1_phys = pci_resource_start(pDev,1);
1051  hba_map1_area_size = pci_resource_len(pDev,1);
1052  raptorFlag = TRUE;
1053  }
1054 
1055 #if BITS_PER_LONG == 64
1056  /*
1057  * The original Adaptec 64 bit driver has this comment here:
1058  * "x86_64 machines need more optimal mappings"
1059  *
1060  * I assume some HBAs report ridiculously large mappings
1061  * and we need to limit them on platforms with IOMMUs.
1062  */
1063  if (raptorFlag == TRUE) {
1064  if (hba_map0_area_size > 128)
1065  hba_map0_area_size = 128;
1066  if (hba_map1_area_size > 524288)
1067  hba_map1_area_size = 524288;
1068  } else {
1069  if (hba_map0_area_size > 524288)
1070  hba_map0_area_size = 524288;
1071  }
1072 #endif
1073 
1074  base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
1075  if (!base_addr_virt) {
1076  pci_release_regions(pDev);
1077  PERROR("dpti: adpt_config_hba: io remap failed\n");
1078  return -EINVAL;
1079  }
1080 
1081  if(raptorFlag == TRUE) {
1082  msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
1083  if (!msg_addr_virt) {
1084  PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
1085  iounmap(base_addr_virt);
1086  pci_release_regions(pDev);
1087  return -EINVAL;
1088  }
1089  } else {
1090  msg_addr_virt = base_addr_virt;
1091  }
1092 
1093  // Allocate and zero the data structure
1094  pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1095  if (!pHba) {
1096  if (msg_addr_virt != base_addr_virt)
1097  iounmap(msg_addr_virt);
1098  iounmap(base_addr_virt);
1099  pci_release_regions(pDev);
1100  return -ENOMEM;
1101  }
1102 
1103  mutex_lock(&adpt_configuration_lock);
1104 
1105  if(hba_chain != NULL){
1106  for(p = hba_chain; p->next; p = p->next);
1107  p->next = pHba;
1108  } else {
1109  hba_chain = pHba;
1110  }
1111  pHba->next = NULL;
1112  pHba->unit = hba_count;
1113  sprintf(pHba->name, "dpti%d", hba_count);
1114  hba_count++;
1115 
1116  mutex_unlock(&adpt_configuration_lock);
1117 
1118  pHba->pDev = pDev;
1119  pHba->base_addr_phys = base_addr0_phys;
1120 
1121  // Set up the Virtual Base Address of the I2O Device
1122  pHba->base_addr_virt = base_addr_virt;
1123  pHba->msg_addr_virt = msg_addr_virt;
1124  pHba->irq_mask = base_addr_virt+0x30;
1125  pHba->post_port = base_addr_virt+0x40;
1126  pHba->reply_port = base_addr_virt+0x44;
1127 
1128  pHba->hrt = NULL;
1129  pHba->lct = NULL;
1130  pHba->lct_size = 0;
1131  pHba->status_block = NULL;
1132  pHba->post_count = 0;
1133  pHba->state = DPTI_STATE_RESET;
1134  pHba->pDev = pDev;
1135  pHba->devices = NULL;
1136  pHba->dma64 = dma64;
1137 
1138  // Initializing the spinlocks
1139  spin_lock_init(&pHba->state_lock);
1140  spin_lock_init(&adpt_post_wait_lock);
1141 
1142  if(raptorFlag == 0){
1143  printk(KERN_INFO "Adaptec I2O RAID controller"
1144  " %d at %p size=%x irq=%d%s\n",
1145  hba_count-1, base_addr_virt,
1146  hba_map0_area_size, pDev->irq,
1147  dma64 ? " (64-bit DMA)" : "");
1148  } else {
1149  printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1150  hba_count-1, pDev->irq,
1151  dma64 ? " (64-bit DMA)" : "");
1152  printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1153  printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1154  }
1155 
1156  if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1157  printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1158  adpt_i2o_delete_hba(pHba);
1159  return -EINVAL;
1160  }
1161 
1162  return 0;
1163 }
1164 
1165 
1166 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1167 {
1168  adpt_hba* p1;
1169  adpt_hba* p2;
1170  struct i2o_device* d;
1171  struct i2o_device* next;
1172  int i;
1173  int j;
1174  struct adpt_device* pDev;
1175  struct adpt_device* pNext;
1176 
1177 
1178  mutex_lock(&adpt_configuration_lock);
1179  // scsi_unregister calls our adpt_release which
1180  // does a quiese
1181  if(pHba->host){
1182  free_irq(pHba->host->irq, pHba);
1183  }
1184  p2 = NULL;
1185  for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1186  if(p1 == pHba) {
1187  if(p2) {
1188  p2->next = p1->next;
1189  } else {
1190  hba_chain = p1->next;
1191  }
1192  break;
1193  }
1194  }
1195 
1196  hba_count--;
1197  mutex_unlock(&adpt_configuration_lock);
1198 
1199  iounmap(pHba->base_addr_virt);
1200  pci_release_regions(pHba->pDev);
1201  if(pHba->msg_addr_virt != pHba->base_addr_virt){
1202  iounmap(pHba->msg_addr_virt);
1203  }
1204  if(pHba->FwDebugBuffer_P)
1205  iounmap(pHba->FwDebugBuffer_P);
1206  if(pHba->hrt) {
1207  dma_free_coherent(&pHba->pDev->dev,
1208  pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1209  pHba->hrt, pHba->hrt_pa);
1210  }
1211  if(pHba->lct) {
1212  dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1213  pHba->lct, pHba->lct_pa);
1214  }
1215  if(pHba->status_block) {
1216  dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1217  pHba->status_block, pHba->status_block_pa);
1218  }
1219  if(pHba->reply_pool) {
1220  dma_free_coherent(&pHba->pDev->dev,
1221  pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1222  pHba->reply_pool, pHba->reply_pool_pa);
1223  }
1224 
1225  for(d = pHba->devices; d ; d = next){
1226  next = d->next;
1227  kfree(d);
1228  }
1229  for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1230  for(j = 0; j < MAX_ID; j++){
1231  if(pHba->channel[i].device[j] != NULL){
1232  for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1233  pNext = pDev->next_lun;
1234  kfree(pDev);
1235  }
1236  }
1237  }
1238  }
1239  pci_dev_put(pHba->pDev);
1240  if (adpt_sysfs_class)
1241  device_destroy(adpt_sysfs_class,
1242  MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1243  kfree(pHba);
1244 
1245  if(hba_count <= 0){
1246  unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1247  if (adpt_sysfs_class) {
1248  class_destroy(adpt_sysfs_class);
1249  adpt_sysfs_class = NULL;
1250  }
1251  }
1252 }
1253 
1254 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1255 {
1256  struct adpt_device* d;
1257 
1258  if(chan < 0 || chan >= MAX_CHANNEL)
1259  return NULL;
1260 
1261  if( pHba->channel[chan].device == NULL){
1262  printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1263  return NULL;
1264  }
1265 
1266  d = pHba->channel[chan].device[id];
1267  if(!d || d->tid == 0) {
1268  return NULL;
1269  }
1270 
1271  /* If it is the only lun at that address then this should match*/
1272  if(d->scsi_lun == lun){
1273  return d;
1274  }
1275 
1276  /* else we need to look through all the luns */
1277  for(d=d->next_lun ; d ; d = d->next_lun){
1278  if(d->scsi_lun == lun){
1279  return d;
1280  }
1281  }
1282  return NULL;
1283 }
1284 
1285 
1286 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1287 {
1288  // I used my own version of the WAIT_QUEUE_HEAD
1289  // to handle some version differences
1290  // When embedded in the kernel this could go back to the vanilla one
1291  ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1292  int status = 0;
1293  ulong flags = 0;
1294  struct adpt_i2o_post_wait_data *p1, *p2;
1295  struct adpt_i2o_post_wait_data *wait_data =
1296  kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1298 
1299  if (!wait_data)
1300  return -ENOMEM;
1301 
1302  /*
1303  * The spin locking is needed to keep anyone from playing
1304  * with the queue pointers and id while we do the same
1305  */
1306  spin_lock_irqsave(&adpt_post_wait_lock, flags);
1307  // TODO we need a MORE unique way of getting ids
1308  // to support async LCT get
1309  wait_data->next = adpt_post_wait_queue;
1310  adpt_post_wait_queue = wait_data;
1311  adpt_post_wait_id++;
1312  adpt_post_wait_id &= 0x7fff;
1313  wait_data->id = adpt_post_wait_id;
1314  spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1315 
1316  wait_data->wq = &adpt_wq_i2o_post;
1317  wait_data->status = -ETIMEDOUT;
1318 
1319  add_wait_queue(&adpt_wq_i2o_post, &wait);
1320 
1321  msg[2] |= 0x80000000 | ((u32)wait_data->id);
1322  timeout *= HZ;
1323  if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1325  if(pHba->host)
1326  spin_unlock_irq(pHba->host->host_lock);
1327  if (!timeout)
1328  schedule();
1329  else{
1330  timeout = schedule_timeout(timeout);
1331  if (timeout == 0) {
1332  // I/O issued, but cannot get result in
1333  // specified time. Freeing resorces is
1334  // dangerous.
1335  status = -ETIME;
1336  }
1337  }
1338  if(pHba->host)
1339  spin_lock_irq(pHba->host->host_lock);
1340  }
1341  remove_wait_queue(&adpt_wq_i2o_post, &wait);
1342 
1343  if(status == -ETIMEDOUT){
1344  printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1345  // We will have to free the wait_data memory during shutdown
1346  return status;
1347  }
1348 
1349  /* Remove the entry from the queue. */
1350  p2 = NULL;
1351  spin_lock_irqsave(&adpt_post_wait_lock, flags);
1352  for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1353  if(p1 == wait_data) {
1354  if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1355  status = -EOPNOTSUPP;
1356  }
1357  if(p2) {
1358  p2->next = p1->next;
1359  } else {
1360  adpt_post_wait_queue = p1->next;
1361  }
1362  break;
1363  }
1364  }
1365  spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1366 
1367  kfree(wait_data);
1368 
1369  return status;
1370 }
1371 
1372 
1373 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1374 {
1375 
1376  u32 m = EMPTY_QUEUE;
1377  u32 __iomem *msg;
1378  ulong timeout = jiffies + 30*HZ;
1379  do {
1380  rmb();
1381  m = readl(pHba->post_port);
1382  if (m != EMPTY_QUEUE) {
1383  break;
1384  }
1385  if(time_after(jiffies,timeout)){
1386  printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1387  return -ETIMEDOUT;
1388  }
1390  } while(m == EMPTY_QUEUE);
1391 
1392  msg = pHba->msg_addr_virt + m;
1393  memcpy_toio(msg, data, len);
1394  wmb();
1395 
1396  //post message
1397  writel(m, pHba->post_port);
1398  wmb();
1399 
1400  return 0;
1401 }
1402 
1403 
1404 static void adpt_i2o_post_wait_complete(u32 context, int status)
1405 {
1406  struct adpt_i2o_post_wait_data *p1 = NULL;
1407  /*
1408  * We need to search through the adpt_post_wait
1409  * queue to see if the given message is still
1410  * outstanding. If not, it means that the IOP
1411  * took longer to respond to the message than we
1412  * had allowed and timer has already expired.
1413  * Not much we can do about that except log
1414  * it for debug purposes, increase timeout, and recompile
1415  *
1416  * Lock needed to keep anyone from moving queue pointers
1417  * around while we're looking through them.
1418  */
1419 
1420  context &= 0x7fff;
1421 
1422  spin_lock(&adpt_post_wait_lock);
1423  for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1424  if(p1->id == context) {
1425  p1->status = status;
1426  spin_unlock(&adpt_post_wait_lock);
1428  return;
1429  }
1430  }
1431  spin_unlock(&adpt_post_wait_lock);
1432  // If this happens we lose commands that probably really completed
1433  printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1434  printk(KERN_DEBUG" Tasks in wait queue:\n");
1435  for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1436  printk(KERN_DEBUG" %d\n",p1->id);
1437  }
1438  return;
1439 }
1440 
1441 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1442 {
1443  u32 msg[8];
1444  u8* status;
1445  dma_addr_t addr;
1446  u32 m = EMPTY_QUEUE ;
1447  ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1448 
1449  if(pHba->initialized == FALSE) { // First time reset should be quick
1450  timeout = jiffies + (25*HZ);
1451  } else {
1452  adpt_i2o_quiesce_hba(pHba);
1453  }
1454 
1455  do {
1456  rmb();
1457  m = readl(pHba->post_port);
1458  if (m != EMPTY_QUEUE) {
1459  break;
1460  }
1461  if(time_after(jiffies,timeout)){
1462  printk(KERN_WARNING"Timeout waiting for message!\n");
1463  return -ETIMEDOUT;
1464  }
1466  } while (m == EMPTY_QUEUE);
1467 
1468  status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1469  if(status == NULL) {
1470  adpt_send_nop(pHba, m);
1471  printk(KERN_ERR"IOP reset failed - no free memory.\n");
1472  return -ENOMEM;
1473  }
1474  memset(status,0,4);
1475 
1478  msg[2]=0;
1479  msg[3]=0;
1480  msg[4]=0;
1481  msg[5]=0;
1482  msg[6]=dma_low(addr);
1483  msg[7]=dma_high(addr);
1484 
1485  memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1486  wmb();
1487  writel(m, pHba->post_port);
1488  wmb();
1489 
1490  while(*status == 0){
1491  if(time_after(jiffies,timeout)){
1492  printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1493  /* We lose 4 bytes of "status" here, but we cannot
1494  free these because controller may awake and corrupt
1495  those bytes at any time */
1496  /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1497  return -ETIMEDOUT;
1498  }
1499  rmb();
1501  }
1502 
1503  if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1504  PDEBUG("%s: Reset in progress...\n", pHba->name);
1505  // Here we wait for message frame to become available
1506  // indicated that reset has finished
1507  do {
1508  rmb();
1509  m = readl(pHba->post_port);
1510  if (m != EMPTY_QUEUE) {
1511  break;
1512  }
1513  if(time_after(jiffies,timeout)){
1514  printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1515  /* We lose 4 bytes of "status" here, but we
1516  cannot free these because controller may
1517  awake and corrupt those bytes at any time */
1518  /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1519  return -ETIMEDOUT;
1520  }
1522  } while (m == EMPTY_QUEUE);
1523  // Flush the offset
1524  adpt_send_nop(pHba, m);
1525  }
1526  adpt_i2o_status_get(pHba);
1527  if(*status == 0x02 ||
1529  printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1530  pHba->name);
1531  } else {
1532  PDEBUG("%s: Reset completed.\n", pHba->name);
1533  }
1534 
1535  dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1536 #ifdef UARTDELAY
1537  // This delay is to allow someone attached to the card through the debug UART to
1538  // set up the dump levels that they want before the rest of the initialization sequence
1539  adpt_delay(20000);
1540 #endif
1541  return 0;
1542 }
1543 
1544 
1545 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1546 {
1547  int i;
1548  int max;
1549  int tid;
1550  struct i2o_device *d;
1551  i2o_lct *lct = pHba->lct;
1552  u8 bus_no = 0;
1553  s16 scsi_id;
1554  s16 scsi_lun;
1555  u32 buf[10]; // larger than 7, or 8 ...
1556  struct adpt_device* pDev;
1557 
1558  if (lct == NULL) {
1559  printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1560  return -1;
1561  }
1562 
1563  max = lct->table_size;
1564  max -= 3;
1565  max /= 9;
1566 
1567  for(i=0;i<max;i++) {
1568  if( lct->lct_entry[i].user_tid != 0xfff){
1569  /*
1570  * If we have hidden devices, we need to inform the upper layers about
1571  * the possible maximum id reference to handle device access when
1572  * an array is disassembled. This code has no other purpose but to
1573  * allow us future access to devices that are currently hidden
1574  * behind arrays, hotspares or have not been configured (JBOD mode).
1575  */
1579  continue;
1580  }
1581  tid = lct->lct_entry[i].tid;
1582  // I2O_DPT_DEVICE_INFO_GROUP_NO;
1583  if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1584  continue;
1585  }
1586  bus_no = buf[0]>>16;
1587  scsi_id = buf[1];
1588  scsi_lun = (buf[2]>>8 )&0xff;
1589  if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1590  printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1591  continue;
1592  }
1593  if (scsi_id >= MAX_ID){
1594  printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1595  continue;
1596  }
1597  if(bus_no > pHba->top_scsi_channel){
1598  pHba->top_scsi_channel = bus_no;
1599  }
1600  if(scsi_id > pHba->top_scsi_id){
1601  pHba->top_scsi_id = scsi_id;
1602  }
1603  if(scsi_lun > pHba->top_scsi_lun){
1604  pHba->top_scsi_lun = scsi_lun;
1605  }
1606  continue;
1607  }
1608  d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1609  if(d==NULL)
1610  {
1611  printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1612  return -ENOMEM;
1613  }
1614 
1615  d->controller = pHba;
1616  d->next = NULL;
1617 
1618  memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1619 
1620  d->flags = 0;
1621  tid = d->lct_data.tid;
1622  adpt_i2o_report_hba_unit(pHba, d);
1623  adpt_i2o_install_device(pHba, d);
1624  }
1625  bus_no = 0;
1626  for(d = pHba->devices; d ; d = d->next) {
1627  if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1629  tid = d->lct_data.tid;
1630  // TODO get the bus_no from hrt-but for now they are in order
1631  //bus_no =
1632  if(bus_no > pHba->top_scsi_channel){
1633  pHba->top_scsi_channel = bus_no;
1634  }
1635  pHba->channel[bus_no].type = d->lct_data.class_id;
1636  pHba->channel[bus_no].tid = tid;
1637  if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1638  {
1639  pHba->channel[bus_no].scsi_id = buf[1];
1640  PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1641  }
1642  // TODO remove - this is just until we get from hrt
1643  bus_no++;
1644  if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1645  printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1646  break;
1647  }
1648  }
1649  }
1650 
1651  // Setup adpt_device table
1652  for(d = pHba->devices; d ; d = d->next) {
1656 
1657  tid = d->lct_data.tid;
1658  scsi_id = -1;
1659  // I2O_DPT_DEVICE_INFO_GROUP_NO;
1660  if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1661  bus_no = buf[0]>>16;
1662  scsi_id = buf[1];
1663  scsi_lun = (buf[2]>>8 )&0xff;
1664  if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1665  continue;
1666  }
1667  if (scsi_id >= MAX_ID) {
1668  continue;
1669  }
1670  if( pHba->channel[bus_no].device[scsi_id] == NULL){
1671  pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1672  if(pDev == NULL) {
1673  return -ENOMEM;
1674  }
1675  pHba->channel[bus_no].device[scsi_id] = pDev;
1676  } else {
1677  for( pDev = pHba->channel[bus_no].device[scsi_id];
1678  pDev->next_lun; pDev = pDev->next_lun){
1679  }
1680  pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1681  if(pDev->next_lun == NULL) {
1682  return -ENOMEM;
1683  }
1684  pDev = pDev->next_lun;
1685  }
1686  pDev->tid = tid;
1687  pDev->scsi_channel = bus_no;
1688  pDev->scsi_id = scsi_id;
1689  pDev->scsi_lun = scsi_lun;
1690  pDev->pI2o_dev = d;
1691  d->owner = pDev;
1692  pDev->type = (buf[0])&0xff;
1693  pDev->flags = (buf[0]>>8)&0xff;
1694  if(scsi_id > pHba->top_scsi_id){
1695  pHba->top_scsi_id = scsi_id;
1696  }
1697  if(scsi_lun > pHba->top_scsi_lun){
1698  pHba->top_scsi_lun = scsi_lun;
1699  }
1700  }
1701  if(scsi_id == -1){
1702  printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1703  d->lct_data.identity_tag);
1704  }
1705  }
1706  }
1707  return 0;
1708 }
1709 
1710 
1711 /*
1712  * Each I2O controller has a chain of devices on it - these match
1713  * the useful parts of the LCT of the board.
1714  */
1715 
1716 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1717 {
1718  mutex_lock(&adpt_configuration_lock);
1719  d->controller=pHba;
1720  d->owner=NULL;
1721  d->next=pHba->devices;
1722  d->prev=NULL;
1723  if (pHba->devices != NULL){
1724  pHba->devices->prev=d;
1725  }
1726  pHba->devices=d;
1727  *d->dev_name = 0;
1728 
1729  mutex_unlock(&adpt_configuration_lock);
1730  return 0;
1731 }
1732 
1733 static int adpt_open(struct inode *inode, struct file *file)
1734 {
1735  int minor;
1736  adpt_hba* pHba;
1737 
1738  mutex_lock(&adpt_mutex);
1739  //TODO check for root access
1740  //
1741  minor = iminor(inode);
1742  if (minor >= hba_count) {
1743  mutex_unlock(&adpt_mutex);
1744  return -ENXIO;
1745  }
1746  mutex_lock(&adpt_configuration_lock);
1747  for (pHba = hba_chain; pHba; pHba = pHba->next) {
1748  if (pHba->unit == minor) {
1749  break; /* found adapter */
1750  }
1751  }
1752  if (pHba == NULL) {
1753  mutex_unlock(&adpt_configuration_lock);
1754  mutex_unlock(&adpt_mutex);
1755  return -ENXIO;
1756  }
1757 
1758 // if(pHba->in_use){
1759  // mutex_unlock(&adpt_configuration_lock);
1760 // return -EBUSY;
1761 // }
1762 
1763  pHba->in_use = 1;
1764  mutex_unlock(&adpt_configuration_lock);
1765  mutex_unlock(&adpt_mutex);
1766 
1767  return 0;
1768 }
1769 
1770 static int adpt_close(struct inode *inode, struct file *file)
1771 {
1772  int minor;
1773  adpt_hba* pHba;
1774 
1775  minor = iminor(inode);
1776  if (minor >= hba_count) {
1777  return -ENXIO;
1778  }
1779  mutex_lock(&adpt_configuration_lock);
1780  for (pHba = hba_chain; pHba; pHba = pHba->next) {
1781  if (pHba->unit == minor) {
1782  break; /* found adapter */
1783  }
1784  }
1785  mutex_unlock(&adpt_configuration_lock);
1786  if (pHba == NULL) {
1787  return -ENXIO;
1788  }
1789 
1790  pHba->in_use = 0;
1791 
1792  return 0;
1793 }
1794 
1795 
1796 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1797 {
1798  u32 msg[MAX_MESSAGE_SIZE];
1799  u32* reply = NULL;
1800  u32 size = 0;
1801  u32 reply_size = 0;
1802  u32 __user *user_msg = arg;
1803  u32 __user * user_reply = NULL;
1804  void *sg_list[pHba->sg_tablesize];
1805  u32 sg_offset = 0;
1806  u32 sg_count = 0;
1807  int sg_index = 0;
1808  u32 i = 0;
1809  u32 rcode = 0;
1810  void *p = NULL;
1811  dma_addr_t addr;
1812  ulong flags = 0;
1813 
1814  memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1815  // get user msg size in u32s
1816  if(get_user(size, &user_msg[0])){
1817  return -EFAULT;
1818  }
1819  size = size>>16;
1820 
1821  user_reply = &user_msg[size];
1822  if(size > MAX_MESSAGE_SIZE){
1823  return -EFAULT;
1824  }
1825  size *= 4; // Convert to bytes
1826 
1827  /* Copy in the user's I2O command */
1828  if(copy_from_user(msg, user_msg, size)) {
1829  return -EFAULT;
1830  }
1831  get_user(reply_size, &user_reply[0]);
1832  reply_size = reply_size>>16;
1833  if(reply_size > REPLY_FRAME_SIZE){
1834  reply_size = REPLY_FRAME_SIZE;
1835  }
1836  reply_size *= 4;
1837  reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1838  if(reply == NULL) {
1839  printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1840  return -ENOMEM;
1841  }
1842  sg_offset = (msg[0]>>4)&0xf;
1843  msg[2] = 0x40000000; // IOCTL context
1844  msg[3] = adpt_ioctl_to_context(pHba, reply);
1845  if (msg[3] == (u32)-1)
1846  return -EBUSY;
1847 
1848  memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1849  if(sg_offset) {
1850  // TODO add 64 bit API
1851  struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1852  sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1853  if (sg_count > pHba->sg_tablesize){
1854  printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1855  kfree (reply);
1856  return -EINVAL;
1857  }
1858 
1859  for(i = 0; i < sg_count; i++) {
1860  int sg_size;
1861 
1862  if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1863  printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1864  rcode = -EINVAL;
1865  goto cleanup;
1866  }
1867  sg_size = sg[i].flag_count & 0xffffff;
1868  /* Allocate memory for the transfer */
1869  p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1870  if(!p) {
1871  printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1872  pHba->name,sg_size,i,sg_count);
1873  rcode = -ENOMEM;
1874  goto cleanup;
1875  }
1876  sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1877  /* Copy in the user's SG buffer if necessary */
1878  if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1879  // sg_simple_element API is 32 bit
1880  if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1881  printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1882  rcode = -EFAULT;
1883  goto cleanup;
1884  }
1885  }
1886  /* sg_simple_element API is 32 bit, but addr < 4GB */
1887  sg[i].addr_bus = addr;
1888  }
1889  }
1890 
1891  do {
1892  if(pHba->host)
1893  spin_lock_irqsave(pHba->host->host_lock, flags);
1894  // This state stops any new commands from enterring the
1895  // controller while processing the ioctl
1896 // pHba->state |= DPTI_STATE_IOCTL;
1897 // We can't set this now - The scsi subsystem sets host_blocked and
1898 // the queue empties and stops. We need a way to restart the queue
1899  rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1900  if (rcode != 0)
1901  printk("adpt_i2o_passthru: post wait failed %d %p\n",
1902  rcode, reply);
1903 // pHba->state &= ~DPTI_STATE_IOCTL;
1904  if(pHba->host)
1905  spin_unlock_irqrestore(pHba->host->host_lock, flags);
1906  } while(rcode == -ETIMEDOUT);
1907 
1908  if(rcode){
1909  goto cleanup;
1910  }
1911 
1912  if(sg_offset) {
1913  /* Copy back the Scatter Gather buffers back to user space */
1914  u32 j;
1915  // TODO add 64 bit API
1916  struct sg_simple_element* sg;
1917  int sg_size;
1918 
1919  // re-acquire the original message to handle correctly the sg copy operation
1920  memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1921  // get user msg size in u32s
1922  if(get_user(size, &user_msg[0])){
1923  rcode = -EFAULT;
1924  goto cleanup;
1925  }
1926  size = size>>16;
1927  size *= 4;
1928  if (size > MAX_MESSAGE_SIZE) {
1929  rcode = -EINVAL;
1930  goto cleanup;
1931  }
1932  /* Copy in the user's I2O command */
1933  if (copy_from_user (msg, user_msg, size)) {
1934  rcode = -EFAULT;
1935  goto cleanup;
1936  }
1937  sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1938 
1939  // TODO add 64 bit API
1940  sg = (struct sg_simple_element*)(msg + sg_offset);
1941  for (j = 0; j < sg_count; j++) {
1942  /* Copy out the SG list to user's buffer if necessary */
1943  if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1944  sg_size = sg[j].flag_count & 0xffffff;
1945  // sg_simple_element API is 32 bit
1946  if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1947  printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1948  rcode = -EFAULT;
1949  goto cleanup;
1950  }
1951  }
1952  }
1953  }
1954 
1955  /* Copy back the reply to user space */
1956  if (reply_size) {
1957  // we wrote our own values for context - now restore the user supplied ones
1958  if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1959  printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1960  rcode = -EFAULT;
1961  }
1962  if(copy_to_user(user_reply, reply, reply_size)) {
1963  printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1964  rcode = -EFAULT;
1965  }
1966  }
1967 
1968 
1969 cleanup:
1970  if (rcode != -ETIME && rcode != -EINTR) {
1971  struct sg_simple_element *sg =
1972  (struct sg_simple_element*) (msg +sg_offset);
1973  kfree (reply);
1974  while(sg_index) {
1975  if(sg_list[--sg_index]) {
1976  dma_free_coherent(&pHba->pDev->dev,
1977  sg[sg_index].flag_count & 0xffffff,
1978  sg_list[sg_index],
1979  sg[sg_index].addr_bus);
1980  }
1981  }
1982  }
1983  return rcode;
1984 }
1985 
1986 #if defined __ia64__
1987 static void adpt_ia64_info(sysInfo_S* si)
1988 {
1989  // This is all the info we need for now
1990  // We will add more info as our new
1991  // managmenent utility requires it
1992  si->processorType = PROC_IA64;
1993 }
1994 #endif
1995 
1996 #if defined __sparc__
1997 static void adpt_sparc_info(sysInfo_S* si)
1998 {
1999  // This is all the info we need for now
2000  // We will add more info as our new
2001  // managmenent utility requires it
2003 }
2004 #endif
2005 #if defined __alpha__
2006 static void adpt_alpha_info(sysInfo_S* si)
2007 {
2008  // This is all the info we need for now
2009  // We will add more info as our new
2010  // managmenent utility requires it
2011  si->processorType = PROC_ALPHA;
2012 }
2013 #endif
2014 
2015 #if defined __i386__
2016 static void adpt_i386_info(sysInfo_S* si)
2017 {
2018  // This is all the info we need for now
2019  // We will add more info as our new
2020  // managmenent utility requires it
2021  switch (boot_cpu_data.x86) {
2022  case CPU_386:
2023  si->processorType = PROC_386;
2024  break;
2025  case CPU_486:
2026  si->processorType = PROC_486;
2027  break;
2028  case CPU_586:
2030  break;
2031  default: // Just in case
2033  break;
2034  }
2035 }
2036 #endif
2037 
2038 /*
2039  * This routine returns information about the system. This does not effect
2040  * any logic and if the info is wrong - it doesn't matter.
2041  */
2042 
2043 /* Get all the info we can not get from kernel services */
2044 static int adpt_system_info(void __user *buffer)
2045 {
2046  sysInfo_S si;
2047 
2048  memset(&si, 0, sizeof(si));
2049 
2050  si.osType = OS_LINUX;
2051  si.osMajorVersion = 0;
2052  si.osMinorVersion = 0;
2053  si.osRevision = 0;
2054  si.busType = SI_PCI_BUS;
2055  si.processorFamily = DPTI_sig.dsProcessorFamily;
2056 
2057 #if defined __i386__
2058  adpt_i386_info(&si);
2059 #elif defined (__ia64__)
2060  adpt_ia64_info(&si);
2061 #elif defined(__sparc__)
2062  adpt_sparc_info(&si);
2063 #elif defined (__alpha__)
2064  adpt_alpha_info(&si);
2065 #else
2066  si.processorType = 0xff ;
2067 #endif
2068  if (copy_to_user(buffer, &si, sizeof(si))){
2069  printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
2070  return -EFAULT;
2071  }
2072 
2073  return 0;
2074 }
2075 
2076 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
2077 {
2078  int minor;
2079  int error = 0;
2080  adpt_hba* pHba;
2081  ulong flags = 0;
2082  void __user *argp = (void __user *)arg;
2083 
2084  minor = iminor(inode);
2085  if (minor >= DPTI_MAX_HBA){
2086  return -ENXIO;
2087  }
2088  mutex_lock(&adpt_configuration_lock);
2089  for (pHba = hba_chain; pHba; pHba = pHba->next) {
2090  if (pHba->unit == minor) {
2091  break; /* found adapter */
2092  }
2093  }
2094  mutex_unlock(&adpt_configuration_lock);
2095  if(pHba == NULL){
2096  return -ENXIO;
2097  }
2098 
2099  while((volatile u32) pHba->state & DPTI_STATE_RESET )
2101 
2102  switch (cmd) {
2103  // TODO: handle 3 cases
2104  case DPT_SIGNATURE:
2105  if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2106  return -EFAULT;
2107  }
2108  break;
2109  case I2OUSRCMD:
2110  return adpt_i2o_passthru(pHba, argp);
2111 
2112  case DPT_CTRLINFO:{
2113  drvrHBAinfo_S HbaInfo;
2114 
2115 #define FLG_OSD_PCI_VALID 0x0001
2116 #define FLG_OSD_DMA 0x0002
2117 #define FLG_OSD_I2O 0x0004
2118  memset(&HbaInfo, 0, sizeof(HbaInfo));
2119  HbaInfo.drvrHBAnum = pHba->unit;
2120  HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2121  HbaInfo.blinkState = adpt_read_blink_led(pHba);
2122  HbaInfo.pciBusNum = pHba->pDev->bus->number;
2123  HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2124  HbaInfo.Interrupt = pHba->pDev->irq;
2126  if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2127  printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2128  return -EFAULT;
2129  }
2130  break;
2131  }
2132  case DPT_SYSINFO:
2133  return adpt_system_info(argp);
2134  case DPT_BLINKLED:{
2135  u32 value;
2136  value = (u32)adpt_read_blink_led(pHba);
2137  if (copy_to_user(argp, &value, sizeof(value))) {
2138  return -EFAULT;
2139  }
2140  break;
2141  }
2142  case I2ORESETCMD:
2143  if(pHba->host)
2144  spin_lock_irqsave(pHba->host->host_lock, flags);
2145  adpt_hba_reset(pHba);
2146  if(pHba->host)
2147  spin_unlock_irqrestore(pHba->host->host_lock, flags);
2148  break;
2149  case I2ORESCANCMD:
2150  adpt_rescan(pHba);
2151  break;
2152  default:
2153  return -EINVAL;
2154  }
2155 
2156  return error;
2157 }
2158 
2159 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2160 {
2161  struct inode *inode;
2162  long ret;
2163 
2164  inode = file->f_dentry->d_inode;
2165 
2166  mutex_lock(&adpt_mutex);
2167  ret = adpt_ioctl(inode, file, cmd, arg);
2168  mutex_unlock(&adpt_mutex);
2169 
2170  return ret;
2171 }
2172 
2173 #ifdef CONFIG_COMPAT
2174 static long compat_adpt_ioctl(struct file *file,
2175  unsigned int cmd, unsigned long arg)
2176 {
2177  struct inode *inode;
2178  long ret;
2179 
2180  inode = file->f_dentry->d_inode;
2181 
2182  mutex_lock(&adpt_mutex);
2183 
2184  switch(cmd) {
2185  case DPT_SIGNATURE:
2186  case I2OUSRCMD:
2187  case DPT_CTRLINFO:
2188  case DPT_SYSINFO:
2189  case DPT_BLINKLED:
2190  case I2ORESETCMD:
2191  case I2ORESCANCMD:
2192  case (DPT_TARGET_BUSY & 0xFFFF):
2193  case DPT_TARGET_BUSY:
2194  ret = adpt_ioctl(inode, file, cmd, arg);
2195  break;
2196  default:
2197  ret = -ENOIOCTLCMD;
2198  }
2199 
2200  mutex_unlock(&adpt_mutex);
2201 
2202  return ret;
2203 }
2204 #endif
2205 
2206 static irqreturn_t adpt_isr(int irq, void *dev_id)
2207 {
2208  struct scsi_cmnd* cmd;
2209  adpt_hba* pHba = dev_id;
2210  u32 m;
2211  void __iomem *reply;
2212  u32 status=0;
2213  u32 context;
2214  ulong flags = 0;
2215  int handled = 0;
2216 
2217  if (pHba == NULL){
2218  printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2219  return IRQ_NONE;
2220  }
2221  if(pHba->host)
2222  spin_lock_irqsave(pHba->host->host_lock, flags);
2223 
2224  while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2225  m = readl(pHba->reply_port);
2226  if(m == EMPTY_QUEUE){
2227  // Try twice then give up
2228  rmb();
2229  m = readl(pHba->reply_port);
2230  if(m == EMPTY_QUEUE){
2231  // This really should not happen
2232  printk(KERN_ERR"dpti: Could not get reply frame\n");
2233  goto out;
2234  }
2235  }
2236  if (pHba->reply_pool_pa <= m &&
2237  m < pHba->reply_pool_pa +
2238  (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2239  reply = (u8 *)pHba->reply_pool +
2240  (m - pHba->reply_pool_pa);
2241  } else {
2242  /* Ick, we should *never* be here */
2243  printk(KERN_ERR "dpti: reply frame not from pool\n");
2244  reply = (u8 *)bus_to_virt(m);
2245  }
2246 
2247  if (readl(reply) & MSG_FAIL) {
2248  u32 old_m = readl(reply+28);
2249  void __iomem *msg;
2250  u32 old_context;
2251  PDEBUG("%s: Failed message\n",pHba->name);
2252  if(old_m >= 0x100000){
2253  printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2254  writel(m,pHba->reply_port);
2255  continue;
2256  }
2257  // Transaction context is 0 in failed reply frame
2258  msg = pHba->msg_addr_virt + old_m;
2259  old_context = readl(msg+12);
2260  writel(old_context, reply+12);
2261  adpt_send_nop(pHba, old_m);
2262  }
2263  context = readl(reply+8);
2264  if(context & 0x40000000){ // IOCTL
2265  void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2266  if( p != NULL) {
2267  memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2268  }
2269  // All IOCTLs will also be post wait
2270  }
2271  if(context & 0x80000000){ // Post wait message
2272  status = readl(reply+16);
2273  if(status >> 24){
2274  status &= 0xffff; /* Get detail status */
2275  } else {
2276  status = I2O_POST_WAIT_OK;
2277  }
2278  if(!(context & 0x40000000)) {
2279  cmd = adpt_cmd_from_context(pHba,
2280  readl(reply+12));
2281  if(cmd != NULL) {
2282  printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2283  }
2284  }
2285  adpt_i2o_post_wait_complete(context, status);
2286  } else { // SCSI message
2287  cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2288  if(cmd != NULL){
2289  scsi_dma_unmap(cmd);
2290  if(cmd->serial_number != 0) { // If not timedout
2291  adpt_i2o_to_scsi(reply, cmd);
2292  }
2293  }
2294  }
2295  writel(m, pHba->reply_port);
2296  wmb();
2297  rmb();
2298  }
2299  handled = 1;
2300 out: if(pHba->host)
2301  spin_unlock_irqrestore(pHba->host->host_lock, flags);
2302  return IRQ_RETVAL(handled);
2303 }
2304 
2305 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2306 {
2307  int i;
2308  u32 msg[MAX_MESSAGE_SIZE];
2309  u32* mptr;
2310  u32* lptr;
2311  u32 *lenptr;
2312  int direction;
2313  int scsidir;
2314  int nseg;
2315  u32 len;
2316  u32 reqlen;
2317  s32 rcode;
2318  dma_addr_t addr;
2319 
2320  memset(msg, 0 , sizeof(msg));
2321  len = scsi_bufflen(cmd);
2322  direction = 0x00000000;
2323 
2324  scsidir = 0x00000000; // DATA NO XFER
2325  if(len) {
2326  /*
2327  * Set SCBFlags to indicate if data is being transferred
2328  * in or out, or no data transfer
2329  * Note: Do not have to verify index is less than 0 since
2330  * cmd->cmnd[0] is an unsigned char
2331  */
2332  switch(cmd->sc_data_direction){
2333  case DMA_FROM_DEVICE:
2334  scsidir =0x40000000; // DATA IN (iop<--dev)
2335  break;
2336  case DMA_TO_DEVICE:
2337  direction=0x04000000; // SGL OUT
2338  scsidir =0x80000000; // DATA OUT (iop-->dev)
2339  break;
2340  case DMA_NONE:
2341  break;
2342  case DMA_BIDIRECTIONAL:
2343  scsidir =0x40000000; // DATA IN (iop<--dev)
2344  // Assume In - and continue;
2345  break;
2346  default:
2347  printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2348  pHba->name, cmd->cmnd[0]);
2349  cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2350  cmd->scsi_done(cmd);
2351  return 0;
2352  }
2353  }
2354  // msg[0] is set later
2355  // I2O_CMD_SCSI_EXEC
2356  msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2357  msg[2] = 0;
2358  msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
2359  // Our cards use the transaction context as the tag for queueing
2360  // Adaptec/DPT Private stuff
2361  msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2362  msg[5] = d->tid;
2363  /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2364  // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2365  // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2366  // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2367  msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2368 
2369  mptr=msg+7;
2370 
2371  // Write SCSI command into the message - always 16 byte block
2372  memset(mptr, 0, 16);
2373  memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2374  mptr+=4;
2375  lenptr=mptr++; /* Remember me - fill in when we know */
2376  if (dpt_dma64(pHba)) {
2377  reqlen = 16; // SINGLE SGE
2378  *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2379  *mptr++ = 1 << PAGE_SHIFT;
2380  } else {
2381  reqlen = 14; // SINGLE SGE
2382  }
2383  /* Now fill in the SGList and command */
2384 
2385  nseg = scsi_dma_map(cmd);
2386  BUG_ON(nseg < 0);
2387  if (nseg) {
2388  struct scatterlist *sg;
2389 
2390  len = 0;
2391  scsi_for_each_sg(cmd, sg, nseg, i) {
2392  lptr = mptr;
2393  *mptr++ = direction|0x10000000|sg_dma_len(sg);
2394  len+=sg_dma_len(sg);
2395  addr = sg_dma_address(sg);
2396  *mptr++ = dma_low(addr);
2397  if (dpt_dma64(pHba))
2398  *mptr++ = dma_high(addr);
2399  /* Make this an end of list */
2400  if (i == nseg - 1)
2401  *lptr = direction|0xD0000000|sg_dma_len(sg);
2402  }
2403  reqlen = mptr - msg;
2404  *lenptr = len;
2405 
2406  if(cmd->underflow && len != cmd->underflow){
2407  printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2408  len, cmd->underflow);
2409  }
2410  } else {
2411  *lenptr = len = 0;
2412  reqlen = 12;
2413  }
2414 
2415  /* Stick the headers on */
2416  msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2417 
2418  // Send it on it's way
2419  rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2420  if (rcode == 0) {
2421  return 0;
2422  }
2423  return rcode;
2424 }
2425 
2426 
2427 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2428 {
2429  struct Scsi_Host *host;
2430 
2431  host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2432  if (host == NULL) {
2433  printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2434  return -1;
2435  }
2436  host->hostdata[0] = (unsigned long)pHba;
2437  pHba->host = host;
2438 
2439  host->irq = pHba->pDev->irq;
2440  /* no IO ports, so don't have to set host->io_port and
2441  * host->n_io_port
2442  */
2443  host->io_port = 0;
2444  host->n_io_port = 0;
2445  /* see comments in scsi_host.h */
2446  host->max_id = 16;
2447  host->max_lun = 256;
2448  host->max_channel = pHba->top_scsi_channel + 1;
2449  host->cmd_per_lun = 1;
2450  host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2451  host->sg_tablesize = pHba->sg_tablesize;
2452  host->can_queue = pHba->post_fifo_size;
2453 
2454  return 0;
2455 }
2456 
2457 
2458 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2459 {
2460  adpt_hba* pHba;
2461  u32 hba_status;
2462  u32 dev_status;
2463  u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2464  // I know this would look cleaner if I just read bytes
2465  // but the model I have been using for all the rest of the
2466  // io is in 4 byte words - so I keep that model
2467  u16 detailed_status = readl(reply+16) &0xffff;
2468  dev_status = (detailed_status & 0xff);
2469  hba_status = detailed_status >> 8;
2470 
2471  // calculate resid for sg
2472  scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2473 
2474  pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2475 
2476  cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2477 
2478  if(!(reply_flags & MSG_FAIL)) {
2479  switch(detailed_status & I2O_SCSI_DSC_MASK) {
2480  case I2O_SCSI_DSC_SUCCESS:
2481  cmd->result = (DID_OK << 16);
2482  // handle underflow
2483  if (readl(reply+20) < cmd->underflow) {
2484  cmd->result = (DID_ERROR <<16);
2485  printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2486  }
2487  break;
2489  cmd->result = (DID_ABORT << 16);
2490  break;
2497  printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2498  pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2499  cmd->result = (DID_TIME_OUT << 16);
2500  break;
2502  case I2O_SCSI_DSC_BUS_BUSY:
2503  cmd->result = (DID_BUS_BUSY << 16);
2504  break;
2507  cmd->result = (DID_RESET << 16);
2508  break;
2510  printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2511  cmd->result = (DID_PARITY << 16);
2512  break;
2531  case I2O_SCSI_DSC_NO_NEXUS:
2536  default:
2537  printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2538  pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2539  hba_status, dev_status, cmd->cmnd[0]);
2540  cmd->result = (DID_ERROR << 16);
2541  break;
2542  }
2543 
2544  // copy over the request sense data if it was a check
2545  // condition status
2546  if (dev_status == SAM_STAT_CHECK_CONDITION) {
2547  u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2548  // Copy over the sense data
2549  memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2550  if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2551  cmd->sense_buffer[2] == DATA_PROTECT ){
2552  /* This is to handle an array failed */
2553  cmd->result = (DID_TIME_OUT << 16);
2554  printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2555  pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2556  hba_status, dev_status, cmd->cmnd[0]);
2557 
2558  }
2559  }
2560  } else {
2561  /* In this condtion we could not talk to the tid
2562  * the card rejected it. We should signal a retry
2563  * for a limitted number of retries.
2564  */
2565  cmd->result = (DID_TIME_OUT << 16);
2566  printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2567  pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2568  ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2569  }
2570 
2571  cmd->result |= (dev_status);
2572 
2573  if(cmd->scsi_done != NULL){
2574  cmd->scsi_done(cmd);
2575  }
2576  return cmd->result;
2577 }
2578 
2579 
2580 static s32 adpt_rescan(adpt_hba* pHba)
2581 {
2582  s32 rcode;
2583  ulong flags = 0;
2584 
2585  if(pHba->host)
2586  spin_lock_irqsave(pHba->host->host_lock, flags);
2587  if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2588  goto out;
2589  if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2590  goto out;
2591  rcode = 0;
2592 out: if(pHba->host)
2593  spin_unlock_irqrestore(pHba->host->host_lock, flags);
2594  return rcode;
2595 }
2596 
2597 
2598 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2599 {
2600  int i;
2601  int max;
2602  int tid;
2603  struct i2o_device *d;
2604  i2o_lct *lct = pHba->lct;
2605  u8 bus_no = 0;
2606  s16 scsi_id;
2607  s16 scsi_lun;
2608  u32 buf[10]; // at least 8 u32's
2609  struct adpt_device* pDev = NULL;
2610  struct i2o_device* pI2o_dev = NULL;
2611 
2612  if (lct == NULL) {
2613  printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2614  return -1;
2615  }
2616 
2617  max = lct->table_size;
2618  max -= 3;
2619  max /= 9;
2620 
2621  // Mark each drive as unscanned
2622  for (d = pHba->devices; d; d = d->next) {
2623  pDev =(struct adpt_device*) d->owner;
2624  if(!pDev){
2625  continue;
2626  }
2627  pDev->state |= DPTI_DEV_UNSCANNED;
2628  }
2629 
2630  printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2631 
2632  for(i=0;i<max;i++) {
2633  if( lct->lct_entry[i].user_tid != 0xfff){
2634  continue;
2635  }
2636 
2640  tid = lct->lct_entry[i].tid;
2641  if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2642  printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2643  continue;
2644  }
2645  bus_no = buf[0]>>16;
2646  if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2648  "%s: Channel number %d out of range\n",
2649  pHba->name, bus_no);
2650  continue;
2651  }
2652 
2653  scsi_id = buf[1];
2654  scsi_lun = (buf[2]>>8 )&0xff;
2655  pDev = pHba->channel[bus_no].device[scsi_id];
2656  /* da lun */
2657  while(pDev) {
2658  if(pDev->scsi_lun == scsi_lun) {
2659  break;
2660  }
2661  pDev = pDev->next_lun;
2662  }
2663  if(!pDev ) { // Something new add it
2664  d = kmalloc(sizeof(struct i2o_device),
2665  GFP_ATOMIC);
2666  if(d==NULL)
2667  {
2668  printk(KERN_CRIT "Out of memory for I2O device data.\n");
2669  return -ENOMEM;
2670  }
2671 
2672  d->controller = pHba;
2673  d->next = NULL;
2674 
2675  memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2676 
2677  d->flags = 0;
2678  adpt_i2o_report_hba_unit(pHba, d);
2679  adpt_i2o_install_device(pHba, d);
2680 
2681  pDev = pHba->channel[bus_no].device[scsi_id];
2682  if( pDev == NULL){
2683  pDev =
2684  kzalloc(sizeof(struct adpt_device),
2685  GFP_ATOMIC);
2686  if(pDev == NULL) {
2687  return -ENOMEM;
2688  }
2689  pHba->channel[bus_no].device[scsi_id] = pDev;
2690  } else {
2691  while (pDev->next_lun) {
2692  pDev = pDev->next_lun;
2693  }
2694  pDev = pDev->next_lun =
2695  kzalloc(sizeof(struct adpt_device),
2696  GFP_ATOMIC);
2697  if(pDev == NULL) {
2698  return -ENOMEM;
2699  }
2700  }
2701  pDev->tid = d->lct_data.tid;
2702  pDev->scsi_channel = bus_no;
2703  pDev->scsi_id = scsi_id;
2704  pDev->scsi_lun = scsi_lun;
2705  pDev->pI2o_dev = d;
2706  d->owner = pDev;
2707  pDev->type = (buf[0])&0xff;
2708  pDev->flags = (buf[0]>>8)&0xff;
2709  // Too late, SCSI system has made up it's mind, but what the hey ...
2710  if(scsi_id > pHba->top_scsi_id){
2711  pHba->top_scsi_id = scsi_id;
2712  }
2713  if(scsi_lun > pHba->top_scsi_lun){
2714  pHba->top_scsi_lun = scsi_lun;
2715  }
2716  continue;
2717  } // end of new i2o device
2718 
2719  // We found an old device - check it
2720  while(pDev) {
2721  if(pDev->scsi_lun == scsi_lun) {
2722  if(!scsi_device_online(pDev->pScsi_dev)) {
2723  printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2724  pHba->name,bus_no,scsi_id,scsi_lun);
2725  if (pDev->pScsi_dev) {
2727  }
2728  }
2729  d = pDev->pI2o_dev;
2730  if(d->lct_data.tid != tid) { // something changed
2731  pDev->tid = tid;
2732  memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2733  if (pDev->pScsi_dev) {
2734  pDev->pScsi_dev->changed = TRUE;
2735  pDev->pScsi_dev->removable = TRUE;
2736  }
2737  }
2738  // Found it - mark it scanned
2739  pDev->state = DPTI_DEV_ONLINE;
2740  break;
2741  }
2742  pDev = pDev->next_lun;
2743  }
2744  }
2745  }
2746  for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2747  pDev =(struct adpt_device*) pI2o_dev->owner;
2748  if(!pDev){
2749  continue;
2750  }
2751  // Drive offline drives that previously existed but could not be found
2752  // in the LCT table
2753  if (pDev->state & DPTI_DEV_UNSCANNED){
2754  pDev->state = DPTI_DEV_OFFLINE;
2755  printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2756  if (pDev->pScsi_dev) {
2758  }
2759  }
2760  }
2761  return 0;
2762 }
2763 
2764 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2765 {
2766  struct scsi_cmnd* cmd = NULL;
2767  struct scsi_device* d = NULL;
2768 
2769  shost_for_each_device(d, pHba->host) {
2770  unsigned long flags;
2771  spin_lock_irqsave(&d->list_lock, flags);
2772  list_for_each_entry(cmd, &d->cmd_list, list) {
2773  if(cmd->serial_number == 0){
2774  continue;
2775  }
2776  cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2777  cmd->scsi_done(cmd);
2778  }
2779  spin_unlock_irqrestore(&d->list_lock, flags);
2780  }
2781 }
2782 
2783 
2784 /*============================================================================
2785  * Routines from i2o subsystem
2786  *============================================================================
2787  */
2788 
2789 
2790 
2791 /*
2792  * Bring an I2O controller into HOLD state. See the spec.
2793  */
2794 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2795 {
2796  int rcode;
2797 
2798  if(pHba->initialized ) {
2799  if (adpt_i2o_status_get(pHba) < 0) {
2800  if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2801  printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2802  return rcode;
2803  }
2804  if (adpt_i2o_status_get(pHba) < 0) {
2805  printk(KERN_INFO "HBA not responding.\n");
2806  return -1;
2807  }
2808  }
2809 
2811  printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2812  return -1;
2813  }
2814 
2815  if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2819  adpt_i2o_reset_hba(pHba);
2820  if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2821  printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2822  return -1;
2823  }
2824  }
2825  } else {
2826  if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2827  printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2828  return rcode;
2829  }
2830 
2831  }
2832 
2833  if (adpt_i2o_init_outbound_q(pHba) < 0) {
2834  return -1;
2835  }
2836 
2837  /* In HOLD state */
2838 
2839  if (adpt_i2o_hrt_get(pHba) < 0) {
2840  return -1;
2841  }
2842 
2843  return 0;
2844 }
2845 
2846 /*
2847  * Bring a controller online into OPERATIONAL state.
2848  */
2849 
2850 static int adpt_i2o_online_hba(adpt_hba* pHba)
2851 {
2852  if (adpt_i2o_systab_send(pHba) < 0) {
2853  adpt_i2o_delete_hba(pHba);
2854  return -1;
2855  }
2856  /* In READY state */
2857 
2858  if (adpt_i2o_enable_hba(pHba) < 0) {
2859  adpt_i2o_delete_hba(pHba);
2860  return -1;
2861  }
2862 
2863  /* In OPERATIONAL state */
2864  return 0;
2865 }
2866 
2867 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2868 {
2869  u32 __iomem *msg;
2870  ulong timeout = jiffies + 5*HZ;
2871 
2872  while(m == EMPTY_QUEUE){
2873  rmb();
2874  m = readl(pHba->post_port);
2875  if(m != EMPTY_QUEUE){
2876  break;
2877  }
2878  if(time_after(jiffies,timeout)){
2879  printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2880  return 2;
2881  }
2883  }
2884  msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2885  writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2886  writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2887  writel( 0,&msg[2]);
2888  wmb();
2889 
2890  writel(m, pHba->post_port);
2891  wmb();
2892  return 0;
2893 }
2894 
2895 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2896 {
2897  u8 *status;
2898  dma_addr_t addr;
2899  u32 __iomem *msg = NULL;
2900  int i;
2901  ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2902  u32 m;
2903 
2904  do {
2905  rmb();
2906  m = readl(pHba->post_port);
2907  if (m != EMPTY_QUEUE) {
2908  break;
2909  }
2910 
2911  if(time_after(jiffies,timeout)){
2912  printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2913  return -ETIMEDOUT;
2914  }
2916  } while(m == EMPTY_QUEUE);
2917 
2918  msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2919 
2920  status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2921  if (!status) {
2922  adpt_send_nop(pHba, m);
2923  printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2924  pHba->name);
2925  return -ENOMEM;
2926  }
2927  memset(status, 0, 4);
2928 
2930  writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2931  writel(0, &msg[2]);
2932  writel(0x0106, &msg[3]); /* Transaction context */
2933  writel(4096, &msg[4]); /* Host page frame size */
2934  writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2935  writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2936  writel((u32)addr, &msg[7]);
2937 
2938  writel(m, pHba->post_port);
2939  wmb();
2940 
2941  // Wait for the reply status to come back
2942  do {
2943  if (*status) {
2944  if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2945  break;
2946  }
2947  }
2948  rmb();
2949  if(time_after(jiffies,timeout)){
2950  printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2951  /* We lose 4 bytes of "status" here, but we
2952  cannot free these because controller may
2953  awake and corrupt those bytes at any time */
2954  /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2955  return -ETIMEDOUT;
2956  }
2958  } while (1);
2959 
2960  // If the command was successful, fill the fifo with our reply
2961  // message packets
2962  if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2963  dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2964  return -2;
2965  }
2966  dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2967 
2968  if(pHba->reply_pool != NULL) {
2969  dma_free_coherent(&pHba->pDev->dev,
2970  pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2971  pHba->reply_pool, pHba->reply_pool_pa);
2972  }
2973 
2974  pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2975  pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2976  &pHba->reply_pool_pa, GFP_KERNEL);
2977  if (!pHba->reply_pool) {
2978  printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2979  return -ENOMEM;
2980  }
2981  memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2982 
2983  for(i = 0; i < pHba->reply_fifo_size; i++) {
2984  writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2985  pHba->reply_port);
2986  wmb();
2987  }
2988  adpt_i2o_status_get(pHba);
2989  return 0;
2990 }
2991 
2992 
2993 /*
2994  * I2O System Table. Contains information about
2995  * all the IOPs in the system. Used to inform IOPs
2996  * about each other's existence.
2997  *
2998  * sys_tbl_ver is the CurrentChangeIndicator that is
2999  * used by IOPs to track changes.
3000  */
3001 
3002 
3003 
3004 static s32 adpt_i2o_status_get(adpt_hba* pHba)
3005 {
3006  ulong timeout;
3007  u32 m;
3008  u32 __iomem *msg;
3009  u8 *status_block=NULL;
3010 
3011  if(pHba->status_block == NULL) {
3012  pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
3013  sizeof(i2o_status_block),
3014  &pHba->status_block_pa, GFP_KERNEL);
3015  if(pHba->status_block == NULL) {
3017  "dpti%d: Get Status Block failed; Out of memory. \n",
3018  pHba->unit);
3019  return -ENOMEM;
3020  }
3021  }
3022  memset(pHba->status_block, 0, sizeof(i2o_status_block));
3023  status_block = (u8*)(pHba->status_block);
3024  timeout = jiffies+TMOUT_GETSTATUS*HZ;
3025  do {
3026  rmb();
3027  m = readl(pHba->post_port);
3028  if (m != EMPTY_QUEUE) {
3029  break;
3030  }
3031  if(time_after(jiffies,timeout)){
3032  printk(KERN_ERR "%s: Timeout waiting for message !\n",
3033  pHba->name);
3034  return -ETIMEDOUT;
3035  }
3037  } while(m==EMPTY_QUEUE);
3038 
3039 
3040  msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
3041 
3042  writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
3043  writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
3044  writel(1, &msg[2]);
3045  writel(0, &msg[3]);
3046  writel(0, &msg[4]);
3047  writel(0, &msg[5]);
3048  writel( dma_low(pHba->status_block_pa), &msg[6]);
3049  writel( dma_high(pHba->status_block_pa), &msg[7]);
3050  writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
3051 
3052  //post message
3053  writel(m, pHba->post_port);
3054  wmb();
3055 
3056  while(status_block[87]!=0xff){
3057  if(time_after(jiffies,timeout)){
3058  printk(KERN_ERR"dpti%d: Get status timeout.\n",
3059  pHba->unit);
3060  return -ETIMEDOUT;
3061  }
3062  rmb();
3064  }
3065 
3066  // Set up our number of outbound and inbound messages
3068  if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
3070  }
3071 
3073  if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
3075  }
3076 
3077  // Calculate the Scatter Gather list size
3078  if (dpt_dma64(pHba)) {
3079  pHba->sg_tablesize
3080  = ((pHba->status_block->inbound_frame_size * 4
3081  - 14 * sizeof(u32))
3082  / (sizeof(struct sg_simple_element) + sizeof(u32)));
3083  } else {
3084  pHba->sg_tablesize
3085  = ((pHba->status_block->inbound_frame_size * 4
3086  - 12 * sizeof(u32))
3087  / sizeof(struct sg_simple_element));
3088  }
3089  if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3091  }
3092 
3093 
3094 #ifdef DEBUG
3095  printk("dpti%d: State = ",pHba->unit);
3096  switch(pHba->status_block->iop_state) {
3097  case 0x01:
3098  printk("INIT\n");
3099  break;
3100  case 0x02:
3101  printk("RESET\n");
3102  break;
3103  case 0x04:
3104  printk("HOLD\n");
3105  break;
3106  case 0x05:
3107  printk("READY\n");
3108  break;
3109  case 0x08:
3110  printk("OPERATIONAL\n");
3111  break;
3112  case 0x10:
3113  printk("FAILED\n");
3114  break;
3115  case 0x11:
3116  printk("FAULTED\n");
3117  break;
3118  default:
3119  printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3120  }
3121 #endif
3122  return 0;
3123 }
3124 
3125 /*
3126  * Get the IOP's Logical Configuration Table
3127  */
3128 static int adpt_i2o_lct_get(adpt_hba* pHba)
3129 {
3130  u32 msg[8];
3131  int ret;
3132  u32 buf[16];
3133 
3134  if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3135  pHba->lct_size = pHba->status_block->expected_lct_size;
3136  }
3137  do {
3138  if (pHba->lct == NULL) {
3139  pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3140  pHba->lct_size, &pHba->lct_pa,
3141  GFP_ATOMIC);
3142  if(pHba->lct == NULL) {
3143  printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3144  pHba->name);
3145  return -ENOMEM;
3146  }
3147  }
3148  memset(pHba->lct, 0, pHba->lct_size);
3149 
3151  msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3152  msg[2] = 0;
3153  msg[3] = 0;
3154  msg[4] = 0xFFFFFFFF; /* All devices */
3155  msg[5] = 0x00000000; /* Report now */
3156  msg[6] = 0xD0000000|pHba->lct_size;
3157  msg[7] = (u32)pHba->lct_pa;
3158 
3159  if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3160  printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3161  pHba->name, ret);
3162  printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3163  return ret;
3164  }
3165 
3166  if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3167  pHba->lct_size = pHba->lct->table_size << 2;
3168  dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3169  pHba->lct, pHba->lct_pa);
3170  pHba->lct = NULL;
3171  }
3172  } while (pHba->lct == NULL);
3173 
3174  PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3175 
3176 
3177  // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3178  if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3179  pHba->FwDebugBufferSize = buf[1];
3180  pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3181  pHba->FwDebugBufferSize);
3182  if (pHba->FwDebugBuffer_P) {
3183  pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3185  pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3187  pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3188  pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3190  pHba->FwDebugBuffer_P += buf[2];
3191  pHba->FwDebugFlags = 0;
3192  }
3193  }
3194 
3195  return 0;
3196 }
3197 
3198 static int adpt_i2o_build_sys_table(void)
3199 {
3200  adpt_hba* pHba = hba_chain;
3201  int count = 0;
3202 
3203  if (sys_tbl)
3204  dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3205  sys_tbl, sys_tbl_pa);
3206 
3207  sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3208  (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3209 
3210  sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3211  sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3212  if (!sys_tbl) {
3213  printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3214  return -ENOMEM;
3215  }
3216  memset(sys_tbl, 0, sys_tbl_len);
3217 
3218  sys_tbl->num_entries = hba_count;
3219  sys_tbl->version = I2OVERSION;
3220  sys_tbl->change_ind = sys_tbl_ind++;
3221 
3222  for(pHba = hba_chain; pHba; pHba = pHba->next) {
3223  u64 addr;
3224  // Get updated Status Block so we have the latest information
3225  if (adpt_i2o_status_get(pHba)) {
3226  sys_tbl->num_entries--;
3227  continue; // try next one
3228  }
3229 
3230  sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3231  sys_tbl->iops[count].iop_id = pHba->unit + 2;
3232  sys_tbl->iops[count].seg_num = 0;
3233  sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3234  sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3235  sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3236  sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3237  sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3238  sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3239  addr = pHba->base_addr_phys + 0x40;
3240  sys_tbl->iops[count].inbound_low = dma_low(addr);
3241  sys_tbl->iops[count].inbound_high = dma_high(addr);
3242 
3243  count++;
3244  }
3245 
3246 #ifdef DEBUG
3247 {
3248  u32 *table = (u32*)sys_tbl;
3249  printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3250  for(count = 0; count < (sys_tbl_len >>2); count++) {
3251  printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3252  count, table[count]);
3253  }
3254 }
3255 #endif
3256 
3257  return 0;
3258 }
3259 
3260 
3261 /*
3262  * Dump the information block associated with a given unit (TID)
3263  */
3264 
3265 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3266 {
3267  char buf[64];
3268  int unit = d->lct_data.tid;
3269 
3270  printk(KERN_INFO "TID %3.3d ", unit);
3271 
3272  if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3273  {
3274  buf[16]=0;
3275  printk(" Vendor: %-12.12s", buf);
3276  }
3277  if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3278  {
3279  buf[16]=0;
3280  printk(" Device: %-12.12s", buf);
3281  }
3282  if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3283  {
3284  buf[8]=0;
3285  printk(" Rev: %-12.12s\n", buf);
3286  }
3287 #ifdef DEBUG
3288  printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3289  printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3290  printk(KERN_INFO "\tFlags: ");
3291 
3292  if(d->lct_data.device_flags&(1<<0))
3293  printk("C"); // ConfigDialog requested
3294  if(d->lct_data.device_flags&(1<<1))
3295  printk("U"); // Multi-user capable
3296  if(!(d->lct_data.device_flags&(1<<4)))
3297  printk("P"); // Peer service enabled!
3298  if(!(d->lct_data.device_flags&(1<<5)))
3299  printk("M"); // Mgmt service enabled!
3300  printk("\n");
3301 #endif
3302 }
3303 
3304 #ifdef DEBUG
3305 /*
3306  * Do i2o class name lookup
3307  */
3308 static const char *adpt_i2o_get_class_name(int class)
3309 {
3310  int idx = 16;
3311  static char *i2o_class_name[] = {
3312  "Executive",
3313  "Device Driver Module",
3314  "Block Device",
3315  "Tape Device",
3316  "LAN Interface",
3317  "WAN Interface",
3318  "Fibre Channel Port",
3319  "Fibre Channel Device",
3320  "SCSI Device",
3321  "ATE Port",
3322  "ATE Device",
3323  "Floppy Controller",
3324  "Floppy Device",
3325  "Secondary Bus Port",
3326  "Peer Transport Agent",
3327  "Peer Transport",
3328  "Unknown"
3329  };
3330 
3331  switch(class&0xFFF) {
3332  case I2O_CLASS_EXECUTIVE:
3333  idx = 0; break;
3334  case I2O_CLASS_DDM:
3335  idx = 1; break;
3337  idx = 2; break;
3339  idx = 3; break;
3340  case I2O_CLASS_LAN:
3341  idx = 4; break;
3342  case I2O_CLASS_WAN:
3343  idx = 5; break;
3345  idx = 6; break;
3347  idx = 7; break;
3349  idx = 8; break;
3350  case I2O_CLASS_ATE_PORT:
3351  idx = 9; break;
3353  idx = 10; break;
3355  idx = 11; break;
3357  idx = 12; break;
3358  case I2O_CLASS_BUS_ADAPTER_PORT:
3359  idx = 13; break;
3361  idx = 14; break;
3363  idx = 15; break;
3364  }
3365  return i2o_class_name[idx];
3366 }
3367 #endif
3368 
3369 
3370 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3371 {
3372  u32 msg[6];
3373  int ret, size = sizeof(i2o_hrt);
3374 
3375  do {
3376  if (pHba->hrt == NULL) {
3377  pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3378  size, &pHba->hrt_pa, GFP_KERNEL);
3379  if (pHba->hrt == NULL) {
3380  printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3381  return -ENOMEM;
3382  }
3383  }
3384 
3386  msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3387  msg[2]= 0;
3388  msg[3]= 0;
3389  msg[4]= (0xD0000000 | size); /* Simple transaction */
3390  msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3391 
3392  if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3393  printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3394  return ret;
3395  }
3396 
3397  if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3398  int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3399  dma_free_coherent(&pHba->pDev->dev, size,
3400  pHba->hrt, pHba->hrt_pa);
3401  size = newsize;
3402  pHba->hrt = NULL;
3403  }
3404  } while(pHba->hrt == NULL);
3405  return 0;
3406 }
3407 
3408 /*
3409  * Query one scalar group value or a whole scalar group.
3410  */
3411 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3412  int group, int field, void *buf, int buflen)
3413 {
3414  u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3415  u8 *opblk_va;
3416  dma_addr_t opblk_pa;
3417  u8 *resblk_va;
3418  dma_addr_t resblk_pa;
3419 
3420  int size;
3421 
3422  /* 8 bytes for header */
3423  resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3424  sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3425  if (resblk_va == NULL) {
3426  printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3427  return -ENOMEM;
3428  }
3429 
3430  opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3431  sizeof(opblk), &opblk_pa, GFP_KERNEL);
3432  if (opblk_va == NULL) {
3433  dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3434  resblk_va, resblk_pa);
3435  printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3436  pHba->name);
3437  return -ENOMEM;
3438  }
3439  if (field == -1) /* whole group */
3440  opblk[4] = -1;
3441 
3442  memcpy(opblk_va, opblk, sizeof(opblk));
3443  size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3444  opblk_va, opblk_pa, sizeof(opblk),
3445  resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3446  dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3447  if (size == -ETIME) {
3448  dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3449  resblk_va, resblk_pa);
3450  printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3451  return -ETIME;
3452  } else if (size == -EINTR) {
3453  dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3454  resblk_va, resblk_pa);
3455  printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3456  return -EINTR;
3457  }
3458 
3459  memcpy(buf, resblk_va+8, buflen); /* cut off header */
3460 
3461  dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3462  resblk_va, resblk_pa);
3463  if (size < 0)
3464  return size;
3465 
3466  return buflen;
3467 }
3468 
3469 
3470 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3471  *
3472  * This function can be used for all UtilParamsGet/Set operations.
3473  * The OperationBlock is given in opblk-buffer,
3474  * and results are returned in resblk-buffer.
3475  * Note that the minimum sized resblk is 8 bytes and contains
3476  * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3477  */
3478 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3479  void *opblk_va, dma_addr_t opblk_pa, int oplen,
3480  void *resblk_va, dma_addr_t resblk_pa, int reslen)
3481 {
3482  u32 msg[9];
3483  u32 *res = (u32 *)resblk_va;
3484  int wait_status;
3485 
3486  msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3487  msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3488  msg[2] = 0;
3489  msg[3] = 0;
3490  msg[4] = 0;
3491  msg[5] = 0x54000000 | oplen; /* OperationBlock */
3492  msg[6] = (u32)opblk_pa;
3493  msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3494  msg[8] = (u32)resblk_pa;
3495 
3496  if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3497  printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3498  return wait_status; /* -DetailedStatus */
3499  }
3500 
3501  if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3502  printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3503  "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3504  pHba->name,
3505  (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3506  : "PARAMS_GET",
3507  res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3508  return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3509  }
3510 
3511  return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3512 }
3513 
3514 
3515 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3516 {
3517  u32 msg[4];
3518  int ret;
3519 
3520  adpt_i2o_status_get(pHba);
3521 
3522  /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3523 
3524  if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3526  return 0;
3527  }
3528 
3530  msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3531  msg[2] = 0;
3532  msg[3] = 0;
3533 
3534  if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3535  printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3536  pHba->unit, -ret);
3537  } else {
3538  printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3539  }
3540 
3541  adpt_i2o_status_get(pHba);
3542  return ret;
3543 }
3544 
3545 
3546 /*
3547  * Enable IOP. Allows the IOP to resume external operations.
3548  */
3549 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3550 {
3551  u32 msg[4];
3552  int ret;
3553 
3554  adpt_i2o_status_get(pHba);
3555  if(!pHba->status_block){
3556  return -ENOMEM;
3557  }
3558  /* Enable only allowed on READY state */
3560  return 0;
3561 
3563  return -EINVAL;
3564 
3566  msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3567  msg[2]= 0;
3568  msg[3]= 0;
3569 
3570  if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3571  printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3572  pHba->name, ret);
3573  } else {
3574  PDEBUG("%s: Enabled.\n", pHba->name);
3575  }
3576 
3577  adpt_i2o_status_get(pHba);
3578  return ret;
3579 }
3580 
3581 
3582 static int adpt_i2o_systab_send(adpt_hba* pHba)
3583 {
3584  u32 msg[12];
3585  int ret;
3586 
3587  msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3588  msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3589  msg[2] = 0;
3590  msg[3] = 0;
3591  msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3592  msg[5] = 0; /* Segment 0 */
3593 
3594  /*
3595  * Provide three SGL-elements:
3596  * System table (SysTab), Private memory space declaration and
3597  * Private i/o space declaration
3598  */
3599  msg[6] = 0x54000000 | sys_tbl_len;
3600  msg[7] = (u32)sys_tbl_pa;
3601  msg[8] = 0x54000000 | 0;
3602  msg[9] = 0;
3603  msg[10] = 0xD4000000 | 0;
3604  msg[11] = 0;
3605 
3606  if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3607  printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3608  pHba->name, ret);
3609  }
3610 #ifdef DEBUG
3611  else {
3612  PINFO("%s: SysTab set.\n", pHba->name);
3613  }
3614 #endif
3615 
3616  return ret;
3617  }
3618 
3619 
3620 /*============================================================================
3621  *
3622  *============================================================================
3623  */
3624 
3625 
3626 #ifdef UARTDELAY
3627 
3628 static static void adpt_delay(int millisec)
3629 {
3630  int i;
3631  for (i = 0; i < millisec; i++) {
3632  udelay(1000); /* delay for one millisecond */
3633  }
3634 }
3635 
3636 #endif
3637 
3638 static struct scsi_host_template driver_template = {
3639  .module = THIS_MODULE,
3640  .name = "dpt_i2o",
3641  .proc_name = "dpt_i2o",
3642  .proc_info = adpt_proc_info,
3643  .info = adpt_info,
3644  .queuecommand = adpt_queue,
3645  .eh_abort_handler = adpt_abort,
3646  .eh_device_reset_handler = adpt_device_reset,
3647  .eh_bus_reset_handler = adpt_bus_reset,
3648  .eh_host_reset_handler = adpt_reset,
3649  .bios_param = adpt_bios_param,
3650  .slave_configure = adpt_slave_configure,
3651  .can_queue = MAX_TO_IOP_MESSAGES,
3652  .this_id = 7,
3653  .cmd_per_lun = 1,
3654  .use_clustering = ENABLE_CLUSTERING,
3655 };
3656 
3657 static int __init adpt_init(void)
3658 {
3659  int error;
3660  adpt_hba *pHba, *next;
3661 
3662  printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3663 
3664  error = adpt_detect(&driver_template);
3665  if (error < 0)
3666  return error;
3667  if (hba_chain == NULL)
3668  return -ENODEV;
3669 
3670  for (pHba = hba_chain; pHba; pHba = pHba->next) {
3671  error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3672  if (error)
3673  goto fail;
3674  scsi_scan_host(pHba->host);
3675  }
3676  return 0;
3677 fail:
3678  for (pHba = hba_chain; pHba; pHba = next) {
3679  next = pHba->next;
3680  scsi_remove_host(pHba->host);
3681  }
3682  return error;
3683 }
3684 
3685 static void __exit adpt_exit(void)
3686 {
3687  adpt_hba *pHba, *next;
3688 
3689  for (pHba = hba_chain; pHba; pHba = pHba->next)
3690  scsi_remove_host(pHba->host);
3691  for (pHba = hba_chain; pHba; pHba = next) {
3692  next = pHba->next;
3693  adpt_release(pHba->host);
3694  }
3695 }
3696 
3697 module_init(adpt_init);
3698 module_exit(adpt_exit);
3699 
3700 MODULE_LICENSE("GPL");