Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
53c700.c
Go to the documentation of this file.
1 /* -*- mode: c; c-basic-offset: 8 -*- */
2 
3 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
4  *
5  * Copyright (C) 2001 by [email protected]
6 **-----------------------------------------------------------------------------
7 **
8 ** This program is free software; you can redistribute it and/or modify
9 ** it under the terms of the GNU General Public License as published by
10 ** the Free Software Foundation; either version 2 of the License, or
11 ** (at your option) any later version.
12 **
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details.
17 **
18 ** You should have received a copy of the GNU General Public License
19 ** along with this program; if not, write to the Free Software
20 ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 **
22 **-----------------------------------------------------------------------------
23  */
24 
25 /* Notes:
26  *
27  * This driver is designed exclusively for these chips (virtually the
28  * earliest of the scripts engine chips). They need their own drivers
29  * because they are missing so many of the scripts and snazzy register
30  * features of their elder brothers (the 710, 720 and 770).
31  *
32  * The 700 is the lowliest of the line, it can only do async SCSI.
33  * The 700-66 can at least do synchronous SCSI up to 10MHz.
34  *
35  * The 700 chip has no host bus interface logic of its own. However,
36  * it is usually mapped to a location with well defined register
37  * offsets. Therefore, if you can determine the base address and the
38  * irq your board incorporating this chip uses, you can probably use
39  * this driver to run it (although you'll probably have to write a
40  * minimal wrapper for the purpose---see the NCR_D700 driver for
41  * details about how to do this).
42  *
43  *
44  * TODO List:
45  *
46  * 1. Better statistics in the proc fs
47  *
48  * 2. Implement message queue (queues SCSI messages like commands) and make
49  * the abort and device reset functions use them.
50  * */
51 
52 /* CHANGELOG
53  *
54  * Version 2.8
55  *
56  * Fixed bad bug affecting tag starvation processing (previously the
57  * driver would hang the system if too many tags starved. Also fixed
58  * bad bug having to do with 10 byte command processing and REQUEST
59  * SENSE (the command would loop forever getting a transfer length
60  * mismatch in the CMD phase).
61  *
62  * Version 2.7
63  *
64  * Fixed scripts problem which caused certain devices (notably CDRWs)
65  * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
66  * __raw_readl/writel for parisc compatibility (Thomas
67  * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68  * for sense requests (Ryan Bradetich).
69  *
70  * Version 2.6
71  *
72  * Following test of the 64 bit parisc kernel by Richard Hirst,
73  * several problems have now been corrected. Also adds support for
74  * consistent memory allocation.
75  *
76  * Version 2.5
77  *
78  * More Compatibility changes for 710 (now actually works). Enhanced
79  * support for odd clock speeds which constrain SDTR negotiations.
80  * correct cacheline separation for scsi messages and status for
81  * incoherent architectures. Use of the pci mapping functions on
82  * buffers to begin support for 64 bit drivers.
83  *
84  * Version 2.4
85  *
86  * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87  * special 53c710 instructions or registers are used).
88  *
89  * Version 2.3
90  *
91  * More endianness/cache coherency changes.
92  *
93  * Better bad device handling (handles devices lying about tag
94  * queueing support and devices which fail to provide sense data on
95  * contingent allegiance conditions)
96  *
97  * Many thanks to Richard Hirst <[email protected]> for patiently
98  * debugging this driver on the parisc architecture and suggesting
99  * many improvements and bug fixes.
100  *
101  * Thanks also go to Linuxcare Inc. for providing several PARISC
102  * machines for me to debug the driver on.
103  *
104  * Version 2.2
105  *
106  * Made the driver mem or io mapped; added endian invariance; added
107  * dma cache flushing operations for architectures which need it;
108  * added support for more varied clocking speeds.
109  *
110  * Version 2.1
111  *
112  * Initial modularisation from the D700. See NCR_D700.c for the rest of
113  * the changelog.
114  * */
115 #define NCR_700_VERSION "2.8"
116 
117 #include <linux/kernel.h>
118 #include <linux/types.h>
119 #include <linux/string.h>
120 #include <linux/slab.h>
121 #include <linux/ioport.h>
122 #include <linux/delay.h>
123 #include <linux/spinlock.h>
124 #include <linux/completion.h>
125 #include <linux/init.h>
126 #include <linux/proc_fs.h>
127 #include <linux/blkdev.h>
128 #include <linux/module.h>
129 #include <linux/interrupt.h>
130 #include <linux/device.h>
131 #include <asm/dma.h>
132 #include <asm/io.h>
133 #include <asm/pgtable.h>
134 #include <asm/byteorder.h>
135 
136 #include <scsi/scsi.h>
137 #include <scsi/scsi_cmnd.h>
138 #include <scsi/scsi_dbg.h>
139 #include <scsi/scsi_eh.h>
140 #include <scsi/scsi_host.h>
141 #include <scsi/scsi_tcq.h>
142 #include <scsi/scsi_transport.h>
143 #include <scsi/scsi_transport_spi.h>
144 
145 #include "53c700.h"
146 
147 /* NOTE: For 64 bit drivers there are points in the code where we use
148  * a non dereferenceable pointer to point to a structure in dma-able
149  * memory (which is 32 bits) so that we can use all of the structure
150  * operations but take the address at the end. This macro allows us
151  * to truncate the 64 bit pointer down to 32 bits without the compiler
152  * complaining */
153 #define to32bit(x) ((__u32)((unsigned long)(x)))
154 
155 #ifdef NCR_700_DEBUG
156 #define STATIC
157 #else
158 #define STATIC static
159 #endif
160 
161 MODULE_AUTHOR("James Bottomley");
162 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
163 MODULE_LICENSE("GPL");
164 
165 /* This is the script */
166 #include "53c700_d.h"
167 
168 
169 STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
170 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
171 STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
172 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
175 STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
176 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
177 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
178 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth, int reason);
179 static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
180 
182 
184 
185 static char *NCR_700_phase[] = {
186  "",
187  "after selection",
188  "before command phase",
189  "after command phase",
190  "after status phase",
191  "after data in phase",
192  "after data out phase",
193  "during data phase",
194 };
195 
196 static char *NCR_700_condition[] = {
197  "",
198  "NOT MSG_OUT",
199  "UNEXPECTED PHASE",
200  "NOT MSG_IN",
201  "UNEXPECTED MSG",
202  "MSG_IN",
203  "SDTR_MSG RECEIVED",
204  "REJECT_MSG RECEIVED",
205  "DISCONNECT_MSG RECEIVED",
206  "MSG_OUT",
207  "DATA_IN",
208 
209 };
210 
211 static char *NCR_700_fatal_messages[] = {
212  "unexpected message after reselection",
213  "still MSG_OUT after message injection",
214  "not MSG_IN after selection",
215  "Illegal message length received",
216 };
217 
218 static char *NCR_700_SBCL_bits[] = {
219  "IO ",
220  "CD ",
221  "MSG ",
222  "ATN ",
223  "SEL ",
224  "BSY ",
225  "ACK ",
226  "REQ ",
227 };
228 
229 static char *NCR_700_SBCL_to_phase[] = {
230  "DATA_OUT",
231  "DATA_IN",
232  "CMD_OUT",
233  "STATE",
234  "ILLEGAL PHASE",
235  "ILLEGAL PHASE",
236  "MSG OUT",
237  "MSG IN",
238 };
239 
240 /* This translates the SDTR message offset and period to a value
241  * which can be loaded into the SXFER_REG.
242  *
243  * NOTE: According to SCSI-2, the true transfer period (in ns) is
244  * actually four times this period value */
245 static inline __u8
246 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
248 {
249  int XFERP;
250 
251  __u8 min_xferp = (hostdata->chip710
253  __u8 max_offset = (hostdata->chip710
255 
256  if(offset == 0)
257  return 0;
258 
259  if(period < hostdata->min_period) {
260  printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
261  period = hostdata->min_period;
262  }
263  XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
264  if(offset > max_offset) {
265  printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
266  offset, max_offset);
267  offset = max_offset;
268  }
269  if(XFERP < min_xferp) {
270  XFERP = min_xferp;
271  }
272  return (offset & 0x0f) | (XFERP & 0x07)<<4;
273 }
274 
275 static inline __u8
276 NCR_700_get_SXFER(struct scsi_device *SDp)
277 {
278  struct NCR_700_Host_Parameters *hostdata =
279  (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
280 
281  return NCR_700_offset_period_to_sxfer(hostdata,
282  spi_offset(SDp->sdev_target),
283  spi_period(SDp->sdev_target));
284 }
285 
286 struct Scsi_Host *
288  struct NCR_700_Host_Parameters *hostdata, struct device *dev)
289 {
290  dma_addr_t pScript, pSlots;
291  __u8 *memory;
292  __u32 *script;
293  struct Scsi_Host *host;
294  static int banner = 0;
295  int j;
296 
297  if(tpnt->sdev_attrs == NULL)
299 
300  memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
301  &pScript, GFP_KERNEL);
302  if(memory == NULL) {
303  printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
304  return NULL;
305  }
306 
307  script = (__u32 *)memory;
308  hostdata->msgin = memory + MSGIN_OFFSET;
309  hostdata->msgout = memory + MSGOUT_OFFSET;
310  hostdata->status = memory + STATUS_OFFSET;
311  hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
312  hostdata->dev = dev;
313 
314  pSlots = pScript + SLOTS_OFFSET;
315 
316  /* Fill in the missing routines from the host template */
328  tpnt->change_queue_depth = NCR_700_change_queue_depth;
329  tpnt->change_queue_type = NCR_700_change_queue_type;
330 
331  if(tpnt->name == NULL)
332  tpnt->name = "53c700";
333  if(tpnt->proc_name == NULL)
334  tpnt->proc_name = "53c700";
335 
336  host = scsi_host_alloc(tpnt, 4);
337  if (!host)
338  return NULL;
339  memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
341  for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
342  dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
343  - (unsigned long)&hostdata->slots[0].SG[0]);
344  hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
345  if(j == 0)
346  hostdata->free_list = &hostdata->slots[j];
347  else
348  hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
349  hostdata->slots[j].state = NCR_700_SLOT_FREE;
350  }
351 
352  for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
353  script[j] = bS_to_host(SCRIPT[j]);
354 
355  /* adjust all labels to be bus physical */
356  for (j = 0; j < PATCHES; j++)
357  script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
358  /* now patch up fixed addresses. */
359  script_patch_32(hostdata->dev, script, MessageLocation,
360  pScript + MSGOUT_OFFSET);
361  script_patch_32(hostdata->dev, script, StatusAddress,
362  pScript + STATUS_OFFSET);
363  script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
364  pScript + MSGIN_OFFSET);
365 
366  hostdata->script = script;
367  hostdata->pScript = pScript;
368  dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
369  hostdata->state = NCR_700_HOST_FREE;
370  hostdata->cmd = NULL;
371  host->max_id = 8;
372  host->max_lun = NCR_700_MAX_LUNS;
373  BUG_ON(NCR_700_transport_template == NULL);
375  host->unique_id = (unsigned long)hostdata->base;
376  hostdata->eh_complete = NULL;
377  host->hostdata[0] = (unsigned long)hostdata;
378  /* kick the chip */
379  NCR_700_writeb(0xff, host, CTEST9_REG);
380  if (hostdata->chip710)
381  hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
382  else
383  hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
384  hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
385  if (banner == 0) {
386  printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By [email protected]\n");
387  banner = 1;
388  }
389  printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
390  hostdata->chip710 ? "53c710" :
391  (hostdata->fast ? "53c700-66" : "53c700"),
392  hostdata->rev, hostdata->differential ?
393  "(Differential)" : "");
394  /* reset the chip */
395  NCR_700_chip_reset(host);
396 
397  if (scsi_add_host(host, dev)) {
398  dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
399  scsi_host_put(host);
400  return NULL;
401  }
402 
403  spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
405 
406  return host;
407 }
408 
409 int
411 {
412  struct NCR_700_Host_Parameters *hostdata =
413  (struct NCR_700_Host_Parameters *)host->hostdata[0];
414 
416  hostdata->script, hostdata->pScript);
417  return 1;
418 }
419 
420 static inline __u8
421 NCR_700_identify(int can_disconnect, __u8 lun)
422 {
423  return IDENTIFY_BASE |
424  ((can_disconnect) ? 0x40 : 0) |
425  (lun & NCR_700_LUN_MASK);
426 }
427 
428 /*
429  * Function : static int data_residual (Scsi_Host *host)
430  *
431  * Purpose : return residual data count of what's in the chip. If you
432  * really want to know what this function is doing, it's almost a
433  * direct transcription of the algorithm described in the 53c710
434  * guide, except that the DBC and DFIFO registers are only 6 bits
435  * wide on a 53c700.
436  *
437  * Inputs : host - SCSI host */
438 static inline int
439 NCR_700_data_residual (struct Scsi_Host *host) {
440  struct NCR_700_Host_Parameters *hostdata =
441  (struct NCR_700_Host_Parameters *)host->hostdata[0];
442  int count, synchronous = 0;
443  unsigned int ddir;
444 
445  if(hostdata->chip710) {
446  count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
447  (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
448  } else {
449  count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
450  (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
451  }
452 
453  if(hostdata->fast)
454  synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
455 
456  /* get the data direction */
457  ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
458 
459  if (ddir) {
460  /* Receive */
461  if (synchronous)
462  count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
463  else
464  if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
465  ++count;
466  } else {
467  /* Send */
468  __u8 sstat = NCR_700_readb(host, SSTAT1_REG);
469  if (sstat & SODL_REG_FULL)
470  ++count;
471  if (synchronous && (sstat & SODR_REG_FULL))
472  ++count;
473  }
474 #ifdef NCR_700_DEBUG
475  if(count)
476  printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
477 #endif
478  return count;
479 }
480 
481 /* print out the SCSI wires and corresponding phase from the SBCL register
482  * in the chip */
483 static inline char *
484 sbcl_to_string(__u8 sbcl)
485 {
486  int i;
487  static char ret[256];
488 
489  ret[0]='\0';
490  for(i=0; i<8; i++) {
491  if((1<<i) & sbcl)
492  strcat(ret, NCR_700_SBCL_bits[i]);
493  }
494  strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
495  return ret;
496 }
497 
498 static inline __u8
499 bitmap_to_number(__u8 bitmap)
500 {
501  __u8 i;
502 
503  for(i=0; i<8 && !(bitmap &(1<<i)); i++)
504  ;
505  return i;
506 }
507 
508 /* Pull a slot off the free list */
511 {
512  struct NCR_700_command_slot *slot = hostdata->free_list;
513 
514  if(slot == NULL) {
515  /* sanity check */
517  printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
518  return NULL;
519  }
520 
521  if(slot->state != NCR_700_SLOT_FREE)
522  /* should panic! */
523  printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
524 
525 
526  hostdata->free_list = slot->ITL_forw;
527  slot->ITL_forw = NULL;
528 
529 
530  /* NOTE: set the state to busy here, not queued, since this
531  * indicates the slot is in use and cannot be run by the IRQ
532  * finish routine. If we cannot queue the command when it
533  * is properly build, we then change to NCR_700_SLOT_QUEUED */
534  slot->state = NCR_700_SLOT_BUSY;
535  slot->flags = 0;
536  hostdata->command_slot_count++;
537 
538  return slot;
539 }
540 
541 STATIC void
543  struct NCR_700_Host_Parameters *hostdata)
544 {
545  if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
546  printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
547  }
548  if(slot->state == NCR_700_SLOT_FREE) {
549  printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
550  }
551 
552  slot->resume_offset = 0;
553  slot->cmnd = NULL;
554  slot->state = NCR_700_SLOT_FREE;
555  slot->ITL_forw = hostdata->free_list;
556  hostdata->free_list = slot;
557  hostdata->command_slot_count--;
558 }
559 
560 
561 /* This routine really does very little. The command is indexed on
562  the ITL and (if tagged) the ITLQ lists in _queuecommand */
563 STATIC void
565  struct scsi_cmnd *SCp, __u32 dsp)
566 {
567  /* Its just possible that this gets executed twice */
568  if(SCp != NULL) {
569  struct NCR_700_command_slot *slot =
570  (struct NCR_700_command_slot *)SCp->host_scribble;
571 
572  slot->resume_offset = dsp;
573  }
574  hostdata->state = NCR_700_HOST_FREE;
575  hostdata->cmd = NULL;
576 }
577 
578 STATIC inline void
579 NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
580  struct NCR_700_command_slot *slot)
581 {
582  if(SCp->sc_data_direction != DMA_NONE &&
584  scsi_dma_unmap(SCp);
585 }
586 
587 STATIC inline void
589  struct scsi_cmnd *SCp, int result)
590 {
591  hostdata->state = NCR_700_HOST_FREE;
592  hostdata->cmd = NULL;
593 
594  if(SCp != NULL) {
595  struct NCR_700_command_slot *slot =
596  (struct NCR_700_command_slot *)SCp->host_scribble;
597 
598  dma_unmap_single(hostdata->dev, slot->pCmd,
600  if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
601  char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
602 #ifdef NCR_700_DEBUG
603  printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
604  SCp, SCp->cmnd[7], result);
605  scsi_print_sense("53c700", SCp);
606 
607 #endif
608  dma_unmap_single(hostdata->dev, slot->dma_handle,
610  /* restore the old result if the request sense was
611  * successful */
612  if (result == 0)
613  result = cmnd[7];
614  /* restore the original length */
615  SCp->cmd_len = cmnd[8];
616  } else
617  NCR_700_unmap(hostdata, SCp, slot);
618 
619  free_slot(slot, hostdata);
620 #ifdef NCR_700_DEBUG
621  if(NCR_700_get_depth(SCp->device) == 0 ||
622  NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
623  printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
624  NCR_700_get_depth(SCp->device));
625 #endif /* NCR_700_DEBUG */
626  NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
627 
628  SCp->host_scribble = NULL;
629  SCp->result = result;
630  SCp->scsi_done(SCp);
631  } else {
632  printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
633  }
634 }
635 
636 
637 STATIC void
639 {
640  /* Bus reset */
641  NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
642  udelay(50);
643  NCR_700_writeb(0, host, SCNTL1_REG);
644 
645 }
646 
647 STATIC void
649 {
650  struct NCR_700_Host_Parameters *hostdata =
651  (struct NCR_700_Host_Parameters *)host->hostdata[0];
652  __u8 min_period;
653  __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
654 
655  if(hostdata->chip710) {
656  __u8 burst_disable = 0;
657  __u8 burst_length = 0;
658 
659  switch (hostdata->burst_length) {
660  case 1:
661  burst_length = BURST_LENGTH_1;
662  break;
663  case 2:
664  burst_length = BURST_LENGTH_2;
665  break;
666  case 4:
667  burst_length = BURST_LENGTH_4;
668  break;
669  case 8:
670  burst_length = BURST_LENGTH_8;
671  break;
672  default:
673  burst_disable = BURST_DISABLE;
674  break;
675  }
676  hostdata->dcntl_extra |= COMPAT_700_MODE;
677 
678  NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
679  NCR_700_writeb(burst_length | hostdata->dmode_extra,
680  host, DMODE_710_REG);
681  NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
682  (hostdata->differential ? DIFF : 0),
683  host, CTEST7_REG);
684  NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
685  NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
686  | AUTO_ATN, host, SCNTL0_REG);
687  } else {
688  NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
689  host, DMODE_700_REG);
690  NCR_700_writeb(hostdata->differential ?
691  DIFF : 0, host, CTEST7_REG);
692  if(hostdata->fast) {
693  /* this is for 700-66, does nothing on 700 */
694  NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
695  | GENERATE_RECEIVE_PARITY, host,
696  CTEST8_REG);
697  } else {
698  NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
699  | PARITY | AUTO_ATN, host, SCNTL0_REG);
700  }
701  }
702 
703  NCR_700_writeb(1 << host->this_id, host, SCID_REG);
704  NCR_700_writeb(0, host, SBCL_REG);
705  NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
706 
708  | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
709 
710  NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
711  NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
712  if(hostdata->clock > 75) {
713  printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
714  /* do the best we can, but the async clock will be out
715  * of spec: sync divider 2, async divider 3 */
716  DEBUG(("53c700: sync 2 async 3\n"));
717  NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
718  NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
719  hostdata->sync_clock = hostdata->clock/2;
720  } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
721  /* sync divider 1.5, async divider 3 */
722  DEBUG(("53c700: sync 1.5 async 3\n"));
723  NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
724  NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
725  hostdata->sync_clock = hostdata->clock*2;
726  hostdata->sync_clock /= 3;
727 
728  } else if(hostdata->clock > 37 && hostdata->clock <= 50) {
729  /* sync divider 1, async divider 2 */
730  DEBUG(("53c700: sync 1 async 2\n"));
731  NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
732  NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
733  hostdata->sync_clock = hostdata->clock;
734  } else if(hostdata->clock > 25 && hostdata->clock <=37) {
735  /* sync divider 1, async divider 1.5 */
736  DEBUG(("53c700: sync 1 async 1.5\n"));
737  NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
738  NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
739  hostdata->sync_clock = hostdata->clock;
740  } else {
741  DEBUG(("53c700: sync 1 async 1\n"));
742  NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
743  NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
744  /* sync divider 1, async divider 1 */
745  hostdata->sync_clock = hostdata->clock;
746  }
747  /* Calculate the actual minimum period that can be supported
748  * by our synchronous clock speed. See the 710 manual for
749  * exact details of this calculation which is based on a
750  * setting of the SXFER register */
751  min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
752  hostdata->min_period = NCR_700_MIN_PERIOD;
753  if(min_period > NCR_700_MIN_PERIOD)
754  hostdata->min_period = min_period;
755 }
756 
757 STATIC void
759 {
760  struct NCR_700_Host_Parameters *hostdata =
761  (struct NCR_700_Host_Parameters *)host->hostdata[0];
762  if(hostdata->chip710) {
763  NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
764  udelay(100);
765 
766  NCR_700_writeb(0, host, ISTAT_REG);
767  } else {
768  NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
769  udelay(100);
770 
771  NCR_700_writeb(0, host, DCNTL_REG);
772  }
773 
774  mdelay(1000);
775 
776  NCR_700_chip_setup(host);
777 }
778 
779 /* The heart of the message processing engine is that the instruction
780  * immediately after the INT is the normal case (and so must be CLEAR
781  * ACK). If we want to do something else, we call that routine in
782  * scripts and set temp to be the normal case + 8 (skipping the CLEAR
783  * ACK) so that the routine returns correctly to resume its activity
784  * */
787  struct NCR_700_Host_Parameters *hostdata,
788  struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
789 {
790  __u32 resume_offset = dsp, temp = dsp + 8;
791  __u8 pun = 0xff, lun = 0xff;
792 
793  if(SCp != NULL) {
794  pun = SCp->device->id;
795  lun = SCp->device->lun;
796  }
797 
798  switch(hostdata->msgin[2]) {
799  case A_SDTR_MSG:
800  if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
801  struct scsi_target *starget = SCp->device->sdev_target;
802  __u8 period = hostdata->msgin[3];
803  __u8 offset = hostdata->msgin[4];
804 
805  if(offset == 0 || period == 0) {
806  offset = 0;
807  period = 0;
808  }
809 
810  spi_offset(starget) = offset;
811  spi_period(starget) = period;
812 
813  if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
815  NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
816  }
817 
818  NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
819  NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
820 
821  NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
822  host, SXFER_REG);
823 
824  } else {
825  /* SDTR message out of the blue, reject it */
827  "Unexpected SDTR msg\n");
828  hostdata->msgout[0] = A_REJECT_MSG;
829  dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
830  script_patch_16(hostdata->dev, hostdata->script,
831  MessageCount, 1);
832  /* SendMsgOut returns, so set up the return
833  * address */
834  resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
835  }
836  break;
837 
838  case A_WDTR_MSG:
839  printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
840  host->host_no, pun, lun);
841  hostdata->msgout[0] = A_REJECT_MSG;
842  dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
843  script_patch_16(hostdata->dev, hostdata->script, MessageCount,
844  1);
845  resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
846 
847  break;
848 
849  default:
850  printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
851  host->host_no, pun, lun,
852  NCR_700_phase[(dsps & 0xf00) >> 8]);
853  spi_print_msg(hostdata->msgin);
854  printk("\n");
855  /* just reject it */
856  hostdata->msgout[0] = A_REJECT_MSG;
857  dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
858  script_patch_16(hostdata->dev, hostdata->script, MessageCount,
859  1);
860  /* SendMsgOut returns, so set up the return
861  * address */
862  resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
863  }
864  NCR_700_writel(temp, host, TEMP_REG);
865  return resume_offset;
866 }
867 
869 process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata,
870  struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
871 {
872  /* work out where to return to */
873  __u32 temp = dsp + 8, resume_offset = dsp;
874  __u8 pun = 0xff, lun = 0xff;
875 
876  if(SCp != NULL) {
877  pun = SCp->device->id;
878  lun = SCp->device->lun;
879  }
880 
881 #ifdef NCR_700_DEBUG
882  printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
883  NCR_700_phase[(dsps & 0xf00) >> 8]);
884  spi_print_msg(hostdata->msgin);
885  printk("\n");
886 #endif
887 
888  switch(hostdata->msgin[0]) {
889 
890  case A_EXTENDED_MSG:
891  resume_offset = process_extended_message(host, hostdata, SCp,
892  dsp, dsps);
893  break;
894 
895  case A_REJECT_MSG:
896  if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
897  /* Rejected our sync negotiation attempt */
898  spi_period(SCp->device->sdev_target) =
899  spi_offset(SCp->device->sdev_target) = 0;
900  NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
901  NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
902  } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
903  /* rejected our first simple tag message */
905  "Rejected first tag queue attempt, turning off tag queueing\n");
906  /* we're done negotiating */
907  NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
908  hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
909  SCp->device->tagged_supported = 0;
910  scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
911  } else {
913  "(%d:%d) Unexpected REJECT Message %s\n",
914  pun, lun,
915  NCR_700_phase[(dsps & 0xf00) >> 8]);
916  /* however, just ignore it */
917  }
918  break;
919 
920  case A_PARITY_ERROR_MSG:
921  printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
922  pun, lun);
924  break;
925  case A_SIMPLE_TAG_MSG:
926  printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
927  pun, lun, hostdata->msgin[1],
928  NCR_700_phase[(dsps & 0xf00) >> 8]);
929  /* just ignore it */
930  break;
931  default:
932  printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
933  host->host_no, pun, lun,
934  NCR_700_phase[(dsps & 0xf00) >> 8]);
935 
936  spi_print_msg(hostdata->msgin);
937  printk("\n");
938  /* just reject it */
939  hostdata->msgout[0] = A_REJECT_MSG;
940  dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
941  script_patch_16(hostdata->dev, hostdata->script, MessageCount,
942  1);
943  /* SendMsgOut returns, so set up the return
944  * address */
945  resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
946 
947  break;
948  }
949  NCR_700_writel(temp, host, TEMP_REG);
950  /* set us up to receive another message */
951  dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
952  return resume_offset;
953 }
954 
957  struct Scsi_Host *host,
958  struct NCR_700_Host_Parameters *hostdata)
959 {
960  __u32 resume_offset = 0;
961  __u8 pun = 0xff, lun=0xff;
962 
963  if(SCp != NULL) {
964  pun = SCp->device->id;
965  lun = SCp->device->lun;
966  }
967 
968  if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
969  DEBUG((" COMMAND COMPLETE, status=%02x\n",
970  hostdata->status[0]));
971  /* OK, if TCQ still under negotiation, we now know it works */
972  if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
973  NCR_700_set_tag_neg_state(SCp->device,
975 
976  /* check for contingent allegiance contitions */
977  if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
978  status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
979  struct NCR_700_command_slot *slot =
980  (struct NCR_700_command_slot *)SCp->host_scribble;
981  if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
982  /* OOPS: bad device, returning another
983  * contingent allegiance condition */
984  scmd_printk(KERN_ERR, SCp,
985  "broken device is looping in contingent allegiance: ignoring\n");
986  NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
987  } else {
988  char *cmnd =
989  NCR_700_get_sense_cmnd(SCp->device);
990 #ifdef NCR_DEBUG
991  scsi_print_command(SCp);
992  printk(" cmd %p has status %d, requesting sense\n",
993  SCp, hostdata->status[0]);
994 #endif
995  /* we can destroy the command here
996  * because the contingent allegiance
997  * condition will cause a retry which
998  * will re-copy the command from the
999  * saved data_cmnd. We also unmap any
1000  * data associated with the command
1001  * here */
1002  NCR_700_unmap(hostdata, SCp, slot);
1003  dma_unmap_single(hostdata->dev, slot->pCmd,
1005  DMA_TO_DEVICE);
1006 
1007  cmnd[0] = REQUEST_SENSE;
1008  cmnd[1] = (SCp->device->lun & 0x7) << 5;
1009  cmnd[2] = 0;
1010  cmnd[3] = 0;
1011  cmnd[4] = SCSI_SENSE_BUFFERSIZE;
1012  cmnd[5] = 0;
1013  /* Here's a quiet hack: the
1014  * REQUEST_SENSE command is six bytes,
1015  * so store a flag indicating that
1016  * this was an internal sense request
1017  * and the original status at the end
1018  * of the command */
1019  cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1020  cmnd[7] = hostdata->status[0];
1021  cmnd[8] = SCp->cmd_len;
1022  SCp->cmd_len = 6; /* command length for
1023  * REQUEST_SENSE */
1024  slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1027  slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1028  slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1029  slot->SG[1].pAddr = 0;
1030  slot->resume_offset = hostdata->pScript;
1031  dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1033 
1034  /* queue the command for reissue */
1035  slot->state = NCR_700_SLOT_QUEUED;
1036  slot->flags = NCR_700_FLAG_AUTOSENSE;
1037  hostdata->state = NCR_700_HOST_FREE;
1038  hostdata->cmd = NULL;
1039  }
1040  } else {
1041  // Currently rely on the mid layer evaluation
1042  // of the tag queuing capability
1043  //
1044  //if(status_byte(hostdata->status[0]) == GOOD &&
1045  // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1046  // /* Piggy back the tag queueing support
1047  // * on this command */
1048  // dma_sync_single_for_cpu(hostdata->dev,
1049  // slot->dma_handle,
1050  // SCp->request_bufflen,
1051  // DMA_FROM_DEVICE);
1052  // if(((char *)SCp->request_buffer)[7] & 0x02) {
1053  // scmd_printk(KERN_INFO, SCp,
1054  // "Enabling Tag Command Queuing\n");
1055  // hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1056  // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1057  // } else {
1058  // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1059  // hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1060  // }
1061  //}
1062  NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1063  }
1064  } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1065  __u8 i = (dsps & 0xf00) >> 8;
1066 
1067  scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1068  NCR_700_phase[i],
1069  sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1070  scmd_printk(KERN_ERR, SCp, " len = %d, cmd =",
1071  SCp->cmd_len);
1072  scsi_print_command(SCp);
1073 
1075  } else if((dsps & 0xfffff000) == A_FATAL) {
1076  int i = (dsps & 0xfff);
1077 
1078  printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1079  host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1080  if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1081  printk(KERN_ERR " msg begins %02x %02x\n",
1082  hostdata->msgin[0], hostdata->msgin[1]);
1083  }
1085  } else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1086 #ifdef NCR_700_DEBUG
1087  __u8 i = (dsps & 0xf00) >> 8;
1088 
1089  printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1090  host->host_no, pun, lun,
1091  i, NCR_700_phase[i]);
1092 #endif
1093  save_for_reselection(hostdata, SCp, dsp);
1094 
1095  } else if(dsps == A_RESELECTION_IDENTIFIED) {
1096  __u8 lun;
1097  struct NCR_700_command_slot *slot;
1098  __u8 reselection_id = hostdata->reselection_id;
1099  struct scsi_device *SDp;
1100 
1101  lun = hostdata->msgin[0] & 0x1f;
1102 
1103  hostdata->reselection_id = 0xff;
1104  DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1105  host->host_no, reselection_id, lun));
1106  /* clear the reselection indicator */
1107  SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1108  if(unlikely(SDp == NULL)) {
1109  printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1110  host->host_no, reselection_id, lun);
1111  BUG();
1112  }
1113  if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1114  struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1115  if(unlikely(SCp == NULL)) {
1116  printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1117  host->host_no, reselection_id, lun, hostdata->msgin[2]);
1118  BUG();
1119  }
1120 
1121  slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1122  DDEBUG(KERN_DEBUG, SDp,
1123  "reselection is tag %d, slot %p(%d)\n",
1124  hostdata->msgin[2], slot, slot->tag);
1125  } else {
1126  struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1127  if(unlikely(SCp == NULL)) {
1128  sdev_printk(KERN_ERR, SDp,
1129  "no saved request for untagged cmd\n");
1130  BUG();
1131  }
1132  slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1133  }
1134 
1135  if(slot == NULL) {
1136  printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1137  host->host_no, reselection_id, lun,
1138  hostdata->msgin[0], hostdata->msgin[1],
1139  hostdata->msgin[2]);
1140  } else {
1141  if(hostdata->state != NCR_700_HOST_BUSY)
1142  printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1143  host->host_no);
1144  resume_offset = slot->resume_offset;
1145  hostdata->cmd = slot->cmnd;
1146 
1147  /* re-patch for this command */
1148  script_patch_32_abs(hostdata->dev, hostdata->script,
1149  CommandAddress, slot->pCmd);
1150  script_patch_16(hostdata->dev, hostdata->script,
1151  CommandCount, slot->cmnd->cmd_len);
1152  script_patch_32_abs(hostdata->dev, hostdata->script,
1153  SGScriptStartAddress,
1154  to32bit(&slot->pSG[0].ins));
1155 
1156  /* Note: setting SXFER only works if we're
1157  * still in the MESSAGE phase, so it is vital
1158  * that ACK is still asserted when we process
1159  * the reselection message. The resume offset
1160  * should therefore always clear ACK */
1161  NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1162  host, SXFER_REG);
1163  dma_cache_sync(hostdata->dev, hostdata->msgin,
1165  dma_cache_sync(hostdata->dev, hostdata->msgout,
1167  /* I'm just being paranoid here, the command should
1168  * already have been flushed from the cache */
1169  dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
1170  slot->cmnd->cmd_len, DMA_TO_DEVICE);
1171 
1172 
1173 
1174  }
1175  } else if(dsps == A_RESELECTED_DURING_SELECTION) {
1176 
1177  /* This section is full of debugging code because I've
1178  * never managed to reach it. I think what happens is
1179  * that, because the 700 runs with selection
1180  * interrupts enabled the whole time that we take a
1181  * selection interrupt before we manage to get to the
1182  * reselected script interrupt */
1183 
1184  __u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1185  struct NCR_700_command_slot *slot;
1186 
1187  /* Take out our own ID */
1188  reselection_id &= ~(1<<host->this_id);
1189 
1190  /* I've never seen this happen, so keep this as a printk rather
1191  * than a debug */
1192  printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1193  host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1194 
1195  {
1196  /* FIXME: DEBUGGING CODE */
1197  __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1198  int i;
1199 
1200  for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1201  if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1202  && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1203  break;
1204  }
1205  printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1206  SCp = hostdata->slots[i].cmnd;
1207  }
1208 
1209  if(SCp != NULL) {
1210  slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1211  /* change slot from busy to queued to redo command */
1212  slot->state = NCR_700_SLOT_QUEUED;
1213  }
1214  hostdata->cmd = NULL;
1215 
1216  if(reselection_id == 0) {
1217  if(hostdata->reselection_id == 0xff) {
1218  printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1219  return 0;
1220  } else {
1221  printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1222  host->host_no);
1223  reselection_id = hostdata->reselection_id;
1224  }
1225  } else {
1226 
1227  /* convert to real ID */
1228  reselection_id = bitmap_to_number(reselection_id);
1229  }
1230  hostdata->reselection_id = reselection_id;
1231  /* just in case we have a stale simple tag message, clear it */
1232  hostdata->msgin[1] = 0;
1233  dma_cache_sync(hostdata->dev, hostdata->msgin,
1235  if(hostdata->tag_negotiated & (1<<reselection_id)) {
1236  resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1237  } else {
1238  resume_offset = hostdata->pScript + Ent_GetReselectionData;
1239  }
1240  } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1241  /* we've just disconnected from the bus, do nothing since
1242  * a return here will re-run the queued command slot
1243  * that may have been interrupted by the initial selection */
1244  DEBUG((" SELECTION COMPLETED\n"));
1245  } else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1246  resume_offset = process_message(host, hostdata, SCp,
1247  dsp, dsps);
1248  } else if((dsps & 0xfffff000) == 0) {
1249  __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1250  printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1251  host->host_no, pun, lun, NCR_700_condition[i],
1252  NCR_700_phase[j], dsp - hostdata->pScript);
1253  if(SCp != NULL) {
1254  struct scatterlist *sg;
1255 
1256  scsi_print_command(SCp);
1257  scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1258  printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1259  }
1260  }
1262  } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1263  printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1264  host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1265  resume_offset = dsp;
1266  } else {
1267  printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1268  host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1270  }
1271  return resume_offset;
1272 }
1273 
1274 /* We run the 53c700 with selection interrupts always enabled. This
1275  * means that the chip may be selected as soon as the bus frees. On a
1276  * busy bus, this can be before the scripts engine finishes its
1277  * processing. Therefore, part of the selection processing has to be
1278  * to find out what the scripts engine is doing and complete the
1279  * function if necessary (i.e. process the pending disconnect or save
1280  * the interrupted initial selection */
1281 STATIC inline __u32
1283 {
1284  __u8 id = 0; /* Squash compiler warning */
1285  int count = 0;
1286  __u32 resume_offset = 0;
1287  struct NCR_700_Host_Parameters *hostdata =
1288  (struct NCR_700_Host_Parameters *)host->hostdata[0];
1289  struct scsi_cmnd *SCp = hostdata->cmd;
1290  __u8 sbcl;
1291 
1292  for(count = 0; count < 5; count++) {
1293  id = NCR_700_readb(host, hostdata->chip710 ?
1294  CTEST9_REG : SFBR_REG);
1295 
1296  /* Take out our own ID */
1297  id &= ~(1<<host->this_id);
1298  if(id != 0)
1299  break;
1300  udelay(5);
1301  }
1302  sbcl = NCR_700_readb(host, SBCL_REG);
1303  if((sbcl & SBCL_IO) == 0) {
1304  /* mark as having been selected rather than reselected */
1305  id = 0xff;
1306  } else {
1307  /* convert to real ID */
1308  hostdata->reselection_id = id = bitmap_to_number(id);
1309  DEBUG(("scsi%d: Reselected by %d\n",
1310  host->host_no, id));
1311  }
1312  if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1313  struct NCR_700_command_slot *slot =
1314  (struct NCR_700_command_slot *)SCp->host_scribble;
1315  DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1316 
1317  switch(dsp - hostdata->pScript) {
1318  case Ent_Disconnect1:
1319  case Ent_Disconnect2:
1320  save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1321  break;
1322  case Ent_Disconnect3:
1323  case Ent_Disconnect4:
1324  save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1325  break;
1326  case Ent_Disconnect5:
1327  case Ent_Disconnect6:
1328  save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1329  break;
1330  case Ent_Disconnect7:
1331  case Ent_Disconnect8:
1332  save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1333  break;
1334  case Ent_Finish1:
1335  case Ent_Finish2:
1336  process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1337  break;
1338 
1339  default:
1340  slot->state = NCR_700_SLOT_QUEUED;
1341  break;
1342  }
1343  }
1344  hostdata->state = NCR_700_HOST_BUSY;
1345  hostdata->cmd = NULL;
1346  /* clear any stale simple tag message */
1347  hostdata->msgin[1] = 0;
1348  dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1350 
1351  if(id == 0xff) {
1352  /* Selected as target, Ignore */
1353  resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1354  } else if(hostdata->tag_negotiated & (1<<id)) {
1355  resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1356  } else {
1357  resume_offset = hostdata->pScript + Ent_GetReselectionData;
1358  }
1359  return resume_offset;
1360 }
1361 
1362 static inline void
1363 NCR_700_clear_fifo(struct Scsi_Host *host) {
1364  const struct NCR_700_Host_Parameters *hostdata
1365  = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1366  if(hostdata->chip710) {
1367  NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1368  } else {
1369  NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1370  }
1371 }
1372 
1373 static inline void
1374 NCR_700_flush_fifo(struct Scsi_Host *host) {
1375  const struct NCR_700_Host_Parameters *hostdata
1376  = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1377  if(hostdata->chip710) {
1378  NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1379  udelay(10);
1380  NCR_700_writeb(0, host, CTEST8_REG);
1381  } else {
1382  NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1383  udelay(10);
1384  NCR_700_writeb(0, host, DFIFO_REG);
1385  }
1386 }
1387 
1388 
1389 /* The queue lock with interrupts disabled must be held on entry to
1390  * this function */
1391 STATIC int
1393 {
1394  struct NCR_700_command_slot *slot =
1395  (struct NCR_700_command_slot *)SCp->host_scribble;
1396  struct NCR_700_Host_Parameters *hostdata =
1397  (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1398  __u16 count = 1; /* for IDENTIFY message */
1399 
1400  if(hostdata->state != NCR_700_HOST_FREE) {
1401  /* keep this inside the lock to close the race window where
1402  * the running command finishes on another CPU while we don't
1403  * change the state to queued on this one */
1404  slot->state = NCR_700_SLOT_QUEUED;
1405 
1406  DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1407  SCp->device->host->host_no, slot->cmnd, slot));
1408  return 0;
1409  }
1410  hostdata->state = NCR_700_HOST_BUSY;
1411  hostdata->cmd = SCp;
1412  slot->state = NCR_700_SLOT_BUSY;
1413  /* keep interrupts disabled until we have the command correctly
1414  * set up so we cannot take a selection interrupt */
1415 
1416  hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1417  slot->flags != NCR_700_FLAG_AUTOSENSE),
1418  SCp->device->lun);
1419  /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1420  * if the negotiated transfer parameters still hold, so
1421  * always renegotiate them */
1422  if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1423  slot->flags == NCR_700_FLAG_AUTOSENSE) {
1424  NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1425  }
1426 
1427  /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1428  * If a contingent allegiance condition exists, the device
1429  * will refuse all tags, so send the request sense as untagged
1430  * */
1431  if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1432  && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1433  slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1434  count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1435  }
1436 
1437  if(hostdata->fast &&
1438  NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1439  count += spi_populate_sync_msg(&hostdata->msgout[count],
1440  spi_period(SCp->device->sdev_target),
1441  spi_offset(SCp->device->sdev_target));
1442  NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1443  }
1444 
1445  script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
1446 
1447 
1448  script_patch_ID(hostdata->dev, hostdata->script,
1449  Device_ID, 1<<scmd_id(SCp));
1450 
1451  script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
1452  slot->pCmd);
1453  script_patch_16(hostdata->dev, hostdata->script, CommandCount,
1454  SCp->cmd_len);
1455  /* finally plumb the beginning of the SG list into the script
1456  * */
1457  script_patch_32_abs(hostdata->dev, hostdata->script,
1458  SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1459  NCR_700_clear_fifo(SCp->device->host);
1460 
1461  if(slot->resume_offset == 0)
1462  slot->resume_offset = hostdata->pScript;
1463  /* now perform all the writebacks and invalidates */
1464  dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
1465  dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1466  DMA_FROM_DEVICE);
1467  dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1468  dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
1469 
1470  /* set the synchronous period/offset */
1471  NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1472  SCp->device->host, SXFER_REG);
1473  NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1474  NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1475 
1476  return 1;
1477 }
1478 
1480 NCR_700_intr(int irq, void *dev_id)
1481 {
1482  struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1483  struct NCR_700_Host_Parameters *hostdata =
1484  (struct NCR_700_Host_Parameters *)host->hostdata[0];
1485  __u8 istat;
1486  __u32 resume_offset = 0;
1487  __u8 pun = 0xff, lun = 0xff;
1488  unsigned long flags;
1489  int handled = 0;
1490 
1491  /* Use the host lock to serialise access to the 53c700
1492  * hardware. Note: In future, we may need to take the queue
1493  * lock to enter the done routines. When that happens, we
1494  * need to ensure that for this driver, the host lock and the
1495  * queue lock point to the same thing. */
1497  if((istat = NCR_700_readb(host, ISTAT_REG))
1499  __u32 dsps;
1500  __u8 sstat0 = 0, dstat = 0;
1501  __u32 dsp;
1502  struct scsi_cmnd *SCp = hostdata->cmd;
1504 
1505  handled = 1;
1506  state = hostdata->state;
1507  SCp = hostdata->cmd;
1508 
1509  if(istat & SCSI_INT_PENDING) {
1510  udelay(10);
1511 
1512  sstat0 = NCR_700_readb(host, SSTAT0_REG);
1513  }
1514 
1515  if(istat & DMA_INT_PENDING) {
1516  udelay(10);
1517 
1518  dstat = NCR_700_readb(host, DSTAT_REG);
1519  }
1520 
1521  dsps = NCR_700_readl(host, DSPS_REG);
1522  dsp = NCR_700_readl(host, DSP_REG);
1523 
1524  DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1525  host->host_no, istat, sstat0, dstat,
1526  (dsp - (__u32)(hostdata->pScript))/4,
1527  dsp, dsps));
1528 
1529  if(SCp != NULL) {
1530  pun = SCp->device->id;
1531  lun = SCp->device->lun;
1532  }
1533 
1534  if(sstat0 & SCSI_RESET_DETECTED) {
1535  struct scsi_device *SDp;
1536  int i;
1537 
1538  hostdata->state = NCR_700_HOST_BUSY;
1539 
1540  printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1541  host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1542 
1543  scsi_report_bus_reset(host, 0);
1544 
1545  /* clear all the negotiated parameters */
1546  __shost_for_each_device(SDp, host)
1547  NCR_700_clear_flag(SDp, ~0);
1548 
1549  /* clear all the slots and their pending commands */
1550  for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1551  struct scsi_cmnd *SCp;
1552  struct NCR_700_command_slot *slot =
1553  &hostdata->slots[i];
1554 
1555  if(slot->state == NCR_700_SLOT_FREE)
1556  continue;
1557 
1558  SCp = slot->cmnd;
1559  printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1560  slot, SCp);
1561  free_slot(slot, hostdata);
1562  SCp->host_scribble = NULL;
1563  NCR_700_set_depth(SCp->device, 0);
1564  /* NOTE: deadlock potential here: we
1565  * rely on mid-layer guarantees that
1566  * scsi_done won't try to issue the
1567  * command again otherwise we'll
1568  * deadlock on the
1569  * hostdata->state_lock */
1570  SCp->result = DID_RESET << 16;
1571  SCp->scsi_done(SCp);
1572  }
1573  mdelay(25);
1574  NCR_700_chip_setup(host);
1575 
1576  hostdata->state = NCR_700_HOST_FREE;
1577  hostdata->cmd = NULL;
1578  /* signal back if this was an eh induced reset */
1579  if(hostdata->eh_complete != NULL)
1580  complete(hostdata->eh_complete);
1581  goto out_unlock;
1582  } else if(sstat0 & SELECTION_TIMEOUT) {
1583  DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1584  host->host_no, pun, lun));
1585  NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1586  } else if(sstat0 & PHASE_MISMATCH) {
1587  struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1588  (struct NCR_700_command_slot *)SCp->host_scribble;
1589 
1590  if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1591  /* It wants to reply to some part of
1592  * our message */
1593 #ifdef NCR_700_DEBUG
1594  __u32 temp = NCR_700_readl(host, TEMP_REG);
1595  int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1596  printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1597 #endif
1598  resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1599  } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1600  dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1601  int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1602  int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1603  int residual = NCR_700_data_residual(host);
1604  int i;
1605 #ifdef NCR_700_DEBUG
1606  __u32 naddr = NCR_700_readl(host, DNAD_REG);
1607 
1608  printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1609  host->host_no, pun, lun,
1610  SGcount, data_transfer);
1611  scsi_print_command(SCp);
1612  if(residual) {
1613  printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1614  host->host_no, pun, lun,
1615  SGcount, data_transfer, residual);
1616  }
1617 #endif
1618  data_transfer += residual;
1619 
1620  if(data_transfer != 0) {
1621  int count;
1622  __u32 pAddr;
1623 
1624  SGcount--;
1625 
1626  count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1627  DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1628  slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1629  slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1630  pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1631  pAddr += (count - data_transfer);
1632 #ifdef NCR_700_DEBUG
1633  if(pAddr != naddr) {
1634  printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1635  }
1636 #endif
1637  slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1638  }
1639  /* set the executed moves to nops */
1640  for(i=0; i<SGcount; i++) {
1641  slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1642  slot->SG[i].pAddr = 0;
1643  }
1644  dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1645  /* and pretend we disconnected after
1646  * the command phase */
1647  resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1648  /* make sure all the data is flushed */
1649  NCR_700_flush_fifo(host);
1650  } else {
1651  __u8 sbcl = NCR_700_readb(host, SBCL_REG);
1652  printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1653  host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1655  }
1656 
1657  } else if(sstat0 & SCSI_GROSS_ERROR) {
1658  printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1659  host->host_no, pun, lun);
1660  NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1661  } else if(sstat0 & PARITY_ERROR) {
1662  printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1663  host->host_no, pun, lun);
1664  NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1665  } else if(dstat & SCRIPT_INT_RECEIVED) {
1666  DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1667  host->host_no, pun, lun));
1668  resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1669  } else if(dstat & (ILGL_INST_DETECTED)) {
1670  printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1671  " Please email [email protected] with the details\n",
1672  host->host_no, pun, lun,
1673  dsp, dsp - hostdata->pScript);
1674  NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1675  } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1676  printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1677  host->host_no, pun, lun, dstat);
1678  NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1679  }
1680 
1681 
1682  /* NOTE: selection interrupt processing MUST occur
1683  * after script interrupt processing to correctly cope
1684  * with the case where we process a disconnect and
1685  * then get reselected before we process the
1686  * disconnection */
1687  if(sstat0 & SELECTED) {
1688  /* FIXME: It currently takes at least FOUR
1689  * interrupts to complete a command that
1690  * disconnects: one for the disconnect, one
1691  * for the reselection, one to get the
1692  * reselection data and one to complete the
1693  * command. If we guess the reselected
1694  * command here and prepare it, we only need
1695  * to get a reselection data interrupt if we
1696  * guessed wrongly. Since the interrupt
1697  * overhead is much greater than the command
1698  * setup, this would be an efficient
1699  * optimisation particularly as we probably
1700  * only have one outstanding command on a
1701  * target most of the time */
1702 
1703  resume_offset = process_selection(host, dsp);
1704 
1705  }
1706 
1707  }
1708 
1709  if(resume_offset) {
1710  if(hostdata->state != NCR_700_HOST_BUSY) {
1711  printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1712  host->host_no, resume_offset, resume_offset - hostdata->pScript);
1713  hostdata->state = NCR_700_HOST_BUSY;
1714  }
1715 
1716  DEBUG(("Attempting to resume at %x\n", resume_offset));
1717  NCR_700_clear_fifo(host);
1718  NCR_700_writel(resume_offset, host, DSP_REG);
1719  }
1720  /* There is probably a technical no-no about this: If we're a
1721  * shared interrupt and we got this interrupt because the
1722  * other device needs servicing not us, we're still going to
1723  * check our queued commands here---of course, there shouldn't
1724  * be any outstanding.... */
1725  if(hostdata->state == NCR_700_HOST_FREE) {
1726  int i;
1727 
1728  for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1729  /* fairness: always run the queue from the last
1730  * position we left off */
1731  int j = (i + hostdata->saved_slot_position)
1732  % NCR_700_COMMAND_SLOTS_PER_HOST;
1733 
1734  if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1735  continue;
1736  if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1737  DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1738  host->host_no, &hostdata->slots[j],
1739  hostdata->slots[j].cmnd));
1740  hostdata->saved_slot_position = j + 1;
1741  }
1742 
1743  break;
1744  }
1745  }
1746  out_unlock:
1747  spin_unlock_irqrestore(host->host_lock, flags);
1748  return IRQ_RETVAL(handled);
1749 }
1750 
1751 static int
1752 NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1753 {
1754  struct NCR_700_Host_Parameters *hostdata =
1755  (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1756  __u32 move_ins;
1758  struct NCR_700_command_slot *slot;
1759 
1761  /* We're over our allocation, this should never happen
1762  * since we report the max allocation to the mid layer */
1763  printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1764  return 1;
1765  }
1766  /* check for untagged commands. We cannot have any outstanding
1767  * commands if we accept them. Commands could be untagged because:
1768  *
1769  * - The tag negotiated bitmap is clear
1770  * - The blk layer sent and untagged command
1771  */
1772  if(NCR_700_get_depth(SCp->device) != 0
1773  && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1774  || !blk_rq_tagged(SCp->request))) {
1775  CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1776  NCR_700_get_depth(SCp->device));
1777  return SCSI_MLQUEUE_DEVICE_BUSY;
1778  }
1779  if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1780  CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1781  NCR_700_get_depth(SCp->device));
1782  return SCSI_MLQUEUE_DEVICE_BUSY;
1783  }
1784  NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1785 
1786  /* begin the command here */
1787  /* no need to check for NULL, test for command_slot_count above
1788  * ensures a slot is free */
1789  slot = find_empty_slot(hostdata);
1790 
1791  slot->cmnd = SCp;
1792 
1793  SCp->scsi_done = done;
1794  SCp->host_scribble = (unsigned char *)slot;
1795  SCp->SCp.ptr = NULL;
1796  SCp->SCp.buffer = NULL;
1797 
1798 #ifdef NCR_700_DEBUG
1799  printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1800  scsi_print_command(SCp);
1801 #endif
1802  if(blk_rq_tagged(SCp->request)
1803  && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1804  && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1805  scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1806  hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1807  NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1808  }
1809 
1810  /* here we may have to process an untagged command. The gate
1811  * above ensures that this will be the only one outstanding,
1812  * so clear the tag negotiated bit.
1813  *
1814  * FIXME: This will royally screw up on multiple LUN devices
1815  * */
1816  if(!blk_rq_tagged(SCp->request)
1817  && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1818  scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1819  hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1820  }
1821 
1822  if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
1823  && scsi_get_tag_type(SCp->device)) {
1824  slot->tag = SCp->request->tag;
1825  CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1826  slot->tag, slot);
1827  } else {
1828  slot->tag = SCSI_NO_TAG;
1829  /* must populate current_cmnd for scsi_find_tag to work */
1830  SCp->device->current_cmnd = SCp;
1831  }
1832  /* sanity check: some of the commands generated by the mid-layer
1833  * have an eccentric idea of their sc_data_direction */
1834  if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1835  SCp->sc_data_direction != DMA_NONE) {
1836 #ifdef NCR_700_DEBUG
1837  printk("53c700: Command");
1838  scsi_print_command(SCp);
1839  printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1840 #endif
1841  SCp->sc_data_direction = DMA_NONE;
1842  }
1843 
1844  switch (SCp->cmnd[0]) {
1845  case REQUEST_SENSE:
1846  /* clear the internal sense magic */
1847  SCp->cmnd[6] = 0;
1848  /* fall through */
1849  default:
1850  /* OK, get it from the command */
1851  switch(SCp->sc_data_direction) {
1852  case DMA_BIDIRECTIONAL:
1853  default:
1854  printk(KERN_ERR "53c700: Unknown command for data direction ");
1855  scsi_print_command(SCp);
1856 
1857  move_ins = 0;
1858  break;
1859  case DMA_NONE:
1860  move_ins = 0;
1861  break;
1862  case DMA_FROM_DEVICE:
1863  move_ins = SCRIPT_MOVE_DATA_IN;
1864  break;
1865  case DMA_TO_DEVICE:
1866  move_ins = SCRIPT_MOVE_DATA_OUT;
1867  break;
1868  }
1869  }
1870 
1871  /* now build the scatter gather list */
1873  if(move_ins != 0) {
1874  int i;
1875  int sg_count;
1876  dma_addr_t vPtr = 0;
1877  struct scatterlist *sg;
1878  __u32 count = 0;
1879 
1880  sg_count = scsi_dma_map(SCp);
1881  BUG_ON(sg_count < 0);
1882 
1883  scsi_for_each_sg(SCp, sg, sg_count, i) {
1884  vPtr = sg_dma_address(sg);
1885  count = sg_dma_len(sg);
1886 
1887  slot->SG[i].ins = bS_to_host(move_ins | count);
1888  DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1889  i, count, slot->SG[i].ins, (unsigned long)vPtr));
1890  slot->SG[i].pAddr = bS_to_host(vPtr);
1891  }
1892  slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1893  slot->SG[i].pAddr = 0;
1894  dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1895  DEBUG((" SETTING %08lx to %x\n",
1896  (&slot->pSG[i].ins),
1897  slot->SG[i].ins));
1898  }
1899  slot->resume_offset = 0;
1900  slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1902  NCR_700_start_command(SCp);
1903  return 0;
1904 }
1905 
1907 
1908 STATIC int
1910 {
1911  struct NCR_700_command_slot *slot;
1912 
1913  scmd_printk(KERN_INFO, SCp,
1914  "New error handler wants to abort command\n\t");
1915  scsi_print_command(SCp);
1916 
1917  slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1918 
1919  if(slot == NULL)
1920  /* no outstanding command to abort */
1921  return SUCCESS;
1922  if(SCp->cmnd[0] == TEST_UNIT_READY) {
1923  /* FIXME: This is because of a problem in the new
1924  * error handler. When it is in error recovery, it
1925  * will send a TUR to a device it thinks may still be
1926  * showing a problem. If the TUR isn't responded to,
1927  * it will abort it and mark the device off line.
1928  * Unfortunately, it does no other error recovery, so
1929  * this would leave us with an outstanding command
1930  * occupying a slot. Rather than allow this to
1931  * happen, we issue a bus reset to force all
1932  * outstanding commands to terminate here. */
1933  NCR_700_internal_bus_reset(SCp->device->host);
1934  /* still drop through and return failed */
1935  }
1936  return FAILED;
1937 
1938 }
1939 
1940 STATIC int
1942 {
1944  struct NCR_700_Host_Parameters *hostdata =
1945  (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1946 
1947  scmd_printk(KERN_INFO, SCp,
1948  "New error handler wants BUS reset, cmd %p\n\t", SCp);
1949  scsi_print_command(SCp);
1950 
1951  /* In theory, eh_complete should always be null because the
1952  * eh is single threaded, but just in case we're handling a
1953  * reset via sg or something */
1954  spin_lock_irq(SCp->device->host->host_lock);
1955  while (hostdata->eh_complete != NULL) {
1956  spin_unlock_irq(SCp->device->host->host_lock);
1957  msleep_interruptible(100);
1958  spin_lock_irq(SCp->device->host->host_lock);
1959  }
1960 
1961  hostdata->eh_complete = &complete;
1962  NCR_700_internal_bus_reset(SCp->device->host);
1963 
1964  spin_unlock_irq(SCp->device->host->host_lock);
1966  spin_lock_irq(SCp->device->host->host_lock);
1967 
1968  hostdata->eh_complete = NULL;
1969  /* Revalidate the transport parameters of the failing device */
1970  if(hostdata->fast)
1972 
1973  spin_unlock_irq(SCp->device->host->host_lock);
1974  return SUCCESS;
1975 }
1976 
1977 STATIC int
1979 {
1980  scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
1981  scsi_print_command(SCp);
1982 
1983  spin_lock_irq(SCp->device->host->host_lock);
1984 
1985  NCR_700_internal_bus_reset(SCp->device->host);
1986  NCR_700_chip_reset(SCp->device->host);
1987 
1988  spin_unlock_irq(SCp->device->host->host_lock);
1989 
1990  return SUCCESS;
1991 }
1992 
1993 STATIC void
1994 NCR_700_set_period(struct scsi_target *STp, int period)
1995 {
1996  struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1997  struct NCR_700_Host_Parameters *hostdata =
1998  (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
1999 
2000  if(!hostdata->fast)
2001  return;
2002 
2003  if(period < hostdata->min_period)
2004  period = hostdata->min_period;
2005 
2006  spi_period(STp) = period;
2010 }
2011 
2012 STATIC void
2013 NCR_700_set_offset(struct scsi_target *STp, int offset)
2014 {
2015  struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2016  struct NCR_700_Host_Parameters *hostdata =
2017  (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2018  int max_offset = hostdata->chip710
2020 
2021  if(!hostdata->fast)
2022  return;
2023 
2024  if(offset > max_offset)
2025  offset = max_offset;
2026 
2027  /* if we're currently async, make sure the period is reasonable */
2028  if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2029  spi_period(STp) > 0xff))
2030  spi_period(STp) = hostdata->min_period;
2031 
2032  spi_offset(STp) = offset;
2036 }
2037 
2038 STATIC int
2040 {
2041  SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2042  GFP_KERNEL);
2043 
2044  if (!SDp->hostdata)
2045  return -ENOMEM;
2046 
2047  return 0;
2048 }
2049 
2050 STATIC int
2052 {
2053  struct NCR_700_Host_Parameters *hostdata =
2054  (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2055 
2056  /* to do here: allocate memory; build a queue_full list */
2057  if(SDp->tagged_supported) {
2058  scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
2059  scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2060  NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2061  } else {
2062  /* initialise to default depth */
2063  scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2064  }
2065  if(hostdata->fast) {
2066  /* Find the correct offset and period via domain validation */
2067  if (!spi_initial_dv(SDp->sdev_target))
2068  spi_dv_device(SDp);
2069  } else {
2070  spi_offset(SDp->sdev_target) = 0;
2071  spi_period(SDp->sdev_target) = 0;
2072  }
2073  return 0;
2074 }
2075 
2076 STATIC void
2078 {
2079  kfree(SDp->hostdata);
2080  SDp->hostdata = NULL;
2081 }
2082 
2083 static int
2084 NCR_700_change_queue_depth(struct scsi_device *SDp, int depth, int reason)
2085 {
2086  if (reason != SCSI_QDEPTH_DEFAULT)
2087  return -EOPNOTSUPP;
2088 
2089  if (depth > NCR_700_MAX_TAGS)
2090  depth = NCR_700_MAX_TAGS;
2091 
2092  scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2093  return depth;
2094 }
2095 
2096 static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2097 {
2098  int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0)
2099  || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2100  struct NCR_700_Host_Parameters *hostdata =
2101  (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2102 
2103  scsi_set_tag_type(SDp, tag_type);
2104 
2105  /* We have a global (per target) flag to track whether TCQ is
2106  * enabled, so we'll be turning it off for the entire target here.
2107  * our tag algorithm will fail if we mix tagged and untagged commands,
2108  * so quiesce the device before doing this */
2109  if (change_tag)
2111 
2112  if (!tag_type) {
2113  /* shift back to the default unqueued number of commands
2114  * (the user can still raise this) */
2115  scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
2116  hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
2117  } else {
2118  /* Here, we cleared the negotiation flag above, so this
2119  * will force the driver to renegotiate */
2120  scsi_activate_tcq(SDp, SDp->queue_depth);
2121  if (change_tag)
2122  NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2123  }
2124  if (change_tag)
2126 
2127  return tag_type;
2128 }
2129 
2130 static ssize_t
2131 NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2132 {
2133  struct scsi_device *SDp = to_scsi_device(dev);
2134 
2135  return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2136 }
2137 
2138 static struct device_attribute NCR_700_active_tags_attr = {
2139  .attr = {
2140  .name = "active_tags",
2141  .mode = S_IRUGO,
2142  },
2143  .show = NCR_700_show_active_tags,
2144 };
2145 
2146 STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2147  &NCR_700_active_tags_attr,
2148  NULL,
2149 };
2150 
2154 
2155 static struct spi_function_template NCR_700_transport_functions = {
2156  .set_period = NCR_700_set_period,
2157  .show_period = 1,
2158  .set_offset = NCR_700_set_offset,
2159  .show_offset = 1,
2160 };
2161 
2162 static int __init NCR_700_init(void)
2163 {
2164  NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2165  if(!NCR_700_transport_template)
2166  return -ENODEV;
2167  return 0;
2168 }
2169 
2170 static void __exit NCR_700_exit(void)
2171 {
2172  spi_release_transport(NCR_700_transport_template);
2173 }
2174 
2175 module_init(NCR_700_init);
2176 module_exit(NCR_700_exit);
2177