Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mesh.c
Go to the documentation of this file.
1 /*
2  * SCSI low-level driver for the MESH (Macintosh Enhanced SCSI Hardware)
3  * bus adaptor found on Power Macintosh computers.
4  * We assume the MESH is connected to a DBDMA (descriptor-based DMA)
5  * controller.
6  *
7  * Paul Mackerras, August 1996.
8  * Copyright (C) 1996 Paul Mackerras.
9  *
10  * Apr. 21 2002 - BenH Rework bus reset code for new error handler
11  * Add delay after initial bus reset
12  * Add module parameters
13  *
14  * Sep. 27 2003 - BenH Move to new driver model, fix some write posting
15  * issues
16  * To do:
17  * - handle aborts correctly
18  * - retry arbitration if lost (unless higher levels do this for us)
19  * - power down the chip when no device is detected
20  */
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/delay.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/blkdev.h>
27 #include <linux/proc_fs.h>
28 #include <linux/stat.h>
29 #include <linux/interrupt.h>
30 #include <linux/reboot.h>
31 #include <linux/spinlock.h>
32 #include <asm/dbdma.h>
33 #include <asm/io.h>
34 #include <asm/pgtable.h>
35 #include <asm/prom.h>
36 #include <asm/irq.h>
37 #include <asm/hydra.h>
38 #include <asm/processor.h>
39 #include <asm/machdep.h>
40 #include <asm/pmac_feature.h>
41 #include <asm/pci-bridge.h>
42 #include <asm/macio.h>
43 
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_cmnd.h>
46 #include <scsi/scsi_device.h>
47 #include <scsi/scsi_host.h>
48 
49 #include "mesh.h"
50 
51 #if 1
52 #undef KERN_DEBUG
53 #define KERN_DEBUG KERN_WARNING
54 #endif
55 
56 MODULE_AUTHOR("Paul Mackerras ([email protected])");
57 MODULE_DESCRIPTION("PowerMac MESH SCSI driver");
58 MODULE_LICENSE("GPL");
59 
60 static int sync_rate = CONFIG_SCSI_MESH_SYNC_RATE;
61 static int sync_targets = 0xff;
62 static int resel_targets = 0xff;
63 static int debug_targets = 0; /* print debug for these targets */
64 static int init_reset_delay = CONFIG_SCSI_MESH_RESET_DELAY_MS;
65 
66 module_param(sync_rate, int, 0);
67 MODULE_PARM_DESC(sync_rate, "Synchronous rate (0..10, 0=async)");
68 module_param(sync_targets, int, 0);
69 MODULE_PARM_DESC(sync_targets, "Bitmask of targets allowed to set synchronous");
70 module_param(resel_targets, int, 0);
71 MODULE_PARM_DESC(resel_targets, "Bitmask of targets allowed to set disconnect");
72 module_param(debug_targets, int, 0644);
73 MODULE_PARM_DESC(debug_targets, "Bitmask of debugged targets");
74 module_param(init_reset_delay, int, 0);
75 MODULE_PARM_DESC(init_reset_delay, "Initial bus reset delay (0=no reset)");
76 
77 static int mesh_sync_period = 100;
78 static int mesh_sync_offset = 0;
79 static unsigned char use_active_neg = 0; /* bit mask for SEQ_ACTIVE_NEG if used */
80 
81 #define ALLOW_SYNC(tgt) ((sync_targets >> (tgt)) & 1)
82 #define ALLOW_RESEL(tgt) ((resel_targets >> (tgt)) & 1)
83 #define ALLOW_DEBUG(tgt) ((debug_targets >> (tgt)) & 1)
84 #define DEBUG_TARGET(cmd) ((cmd) && ALLOW_DEBUG((cmd)->device->id))
85 
86 #undef MESH_DBG
87 #define N_DBG_LOG 50
88 #define N_DBG_SLOG 20
89 #define NUM_DBG_EVENTS 13
90 #undef DBG_USE_TB /* bombs on 601 */
91 
92 struct dbglog {
93  char *fmt;
99  int d;
100 };
101 
113 };
114 
115 enum msg_phase {
122 };
123 
128 };
129 
130 struct mesh_target {
133  int data_goes_out; /* guess as to data direction */
136 #ifdef MESH_DBG
137  int log_ix;
138  int n_log;
139  struct dbglog log[N_DBG_LOG];
140 #endif
141 };
142 
143 struct mesh_state {
144  volatile struct mesh_regs __iomem *mesh;
145  int meshintr;
146  volatile struct dbdma_regs __iomem *dma;
147  int dmaintr;
148  struct Scsi_Host *host;
149  struct mesh_state *next;
152  enum mesh_phase phase; /* what we're currently trying to do */
154  int conn_tgt; /* target we're connected to */
155  struct scsi_cmnd *current_req; /* req we're currently working on */
156  int data_ptr;
159  int stat;
160  int aborting;
162  int n_msgin;
163  u8 msgin[16];
164  int n_msgout;
166  u8 msgout[16];
167  struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */
171  int clk_freq;
172  struct mesh_target tgts[8];
173  struct macio_dev *mdev;
174  struct pci_dev* pdev;
175 #ifdef MESH_DBG
176  int log_ix;
177  int n_log;
178  struct dbglog log[N_DBG_SLOG];
179 #endif
180 };
181 
182 /*
183  * Driver is too messy, we need a few prototypes...
184  */
185 static void mesh_done(struct mesh_state *ms, int start_next);
186 static void mesh_interrupt(struct mesh_state *ms);
187 static void cmd_complete(struct mesh_state *ms);
188 static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd);
189 static void halt_dma(struct mesh_state *ms);
190 static void phase_mismatch(struct mesh_state *ms);
191 
192 
193 /*
194  * Some debugging & logging routines
195  */
196 
197 #ifdef MESH_DBG
198 
199 static inline u32 readtb(void)
200 {
201  u32 tb;
202 
203 #ifdef DBG_USE_TB
204  /* Beware: if you enable this, it will crash on 601s. */
205  asm ("mftb %0" : "=r" (tb) : );
206 #else
207  tb = 0;
208 #endif
209  return tb;
210 }
211 
212 static void dlog(struct mesh_state *ms, char *fmt, int a)
213 {
214  struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
215  struct dbglog *tlp, *slp;
216 
217  tlp = &tp->log[tp->log_ix];
218  slp = &ms->log[ms->log_ix];
219  tlp->fmt = fmt;
220  tlp->tb = readtb();
221  tlp->phase = (ms->msgphase << 4) + ms->phase;
222  tlp->bs0 = ms->mesh->bus_status0;
223  tlp->bs1 = ms->mesh->bus_status1;
224  tlp->tgt = ms->conn_tgt;
225  tlp->d = a;
226  *slp = *tlp;
227  if (++tp->log_ix >= N_DBG_LOG)
228  tp->log_ix = 0;
229  if (tp->n_log < N_DBG_LOG)
230  ++tp->n_log;
231  if (++ms->log_ix >= N_DBG_SLOG)
232  ms->log_ix = 0;
233  if (ms->n_log < N_DBG_SLOG)
234  ++ms->n_log;
235 }
236 
237 static void dumplog(struct mesh_state *ms, int t)
238 {
239  struct mesh_target *tp = &ms->tgts[t];
240  struct dbglog *lp;
241  int i;
242 
243  if (tp->n_log == 0)
244  return;
245  i = tp->log_ix - tp->n_log;
246  if (i < 0)
247  i += N_DBG_LOG;
248  tp->n_log = 0;
249  do {
250  lp = &tp->log[i];
251  printk(KERN_DEBUG "mesh log %d: bs=%.2x%.2x ph=%.2x ",
252  t, lp->bs1, lp->bs0, lp->phase);
253 #ifdef DBG_USE_TB
254  printk("tb=%10u ", lp->tb);
255 #endif
256  printk(lp->fmt, lp->d);
257  printk("\n");
258  if (++i >= N_DBG_LOG)
259  i = 0;
260  } while (i != tp->log_ix);
261 }
262 
263 static void dumpslog(struct mesh_state *ms)
264 {
265  struct dbglog *lp;
266  int i;
267 
268  if (ms->n_log == 0)
269  return;
270  i = ms->log_ix - ms->n_log;
271  if (i < 0)
272  i += N_DBG_SLOG;
273  ms->n_log = 0;
274  do {
275  lp = &ms->log[i];
276  printk(KERN_DEBUG "mesh log: bs=%.2x%.2x ph=%.2x t%d ",
277  lp->bs1, lp->bs0, lp->phase, lp->tgt);
278 #ifdef DBG_USE_TB
279  printk("tb=%10u ", lp->tb);
280 #endif
281  printk(lp->fmt, lp->d);
282  printk("\n");
283  if (++i >= N_DBG_SLOG)
284  i = 0;
285  } while (i != ms->log_ix);
286 }
287 
288 #else
289 
290 static inline void dlog(struct mesh_state *ms, char *fmt, int a)
291 {}
292 static inline void dumplog(struct mesh_state *ms, int tgt)
293 {}
294 static inline void dumpslog(struct mesh_state *ms)
295 {}
296 
297 #endif /* MESH_DBG */
298 
299 #define MKWORD(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
300 
301 static void
302 mesh_dump_regs(struct mesh_state *ms)
303 {
304  volatile struct mesh_regs __iomem *mr = ms->mesh;
305  volatile struct dbdma_regs __iomem *md = ms->dma;
306  int t;
307  struct mesh_target *tp;
308 
309  printk(KERN_DEBUG "mesh: state at %p, regs at %p, dma at %p\n",
310  ms, mr, md);
311  printk(KERN_DEBUG " ct=%4x seq=%2x bs=%4x fc=%2x "
312  "exc=%2x err=%2x im=%2x int=%2x sp=%2x\n",
313  (mr->count_hi << 8) + mr->count_lo, mr->sequence,
314  (mr->bus_status1 << 8) + mr->bus_status0, mr->fifo_count,
315  mr->exception, mr->error, mr->intr_mask, mr->interrupt,
316  mr->sync_params);
317  while(in_8(&mr->fifo_count))
318  printk(KERN_DEBUG " fifo data=%.2x\n",in_8(&mr->fifo));
319  printk(KERN_DEBUG " dma stat=%x cmdptr=%x\n",
320  in_le32(&md->status), in_le32(&md->cmdptr));
321  printk(KERN_DEBUG " phase=%d msgphase=%d conn_tgt=%d data_ptr=%d\n",
322  ms->phase, ms->msgphase, ms->conn_tgt, ms->data_ptr);
323  printk(KERN_DEBUG " dma_st=%d dma_ct=%d n_msgout=%d\n",
324  ms->dma_started, ms->dma_count, ms->n_msgout);
325  for (t = 0; t < 8; ++t) {
326  tp = &ms->tgts[t];
327  if (tp->current_req == NULL)
328  continue;
329  printk(KERN_DEBUG " target %d: req=%p goes_out=%d saved_ptr=%d\n",
330  t, tp->current_req, tp->data_goes_out, tp->saved_ptr);
331  }
332 }
333 
334 
335 /*
336  * Flush write buffers on the bus path to the mesh
337  */
338 static inline void mesh_flush_io(volatile struct mesh_regs __iomem *mr)
339 {
340  (void)in_8(&mr->mesh_id);
341 }
342 
343 
344 /*
345  * Complete a SCSI command
346  */
347 static void mesh_completed(struct mesh_state *ms, struct scsi_cmnd *cmd)
348 {
349  (*cmd->scsi_done)(cmd);
350 }
351 
352 
353 /* Called with meshinterrupt disabled, initialize the chipset
354  * and eventually do the initial bus reset. The lock must not be
355  * held since we can schedule.
356  */
357 static void mesh_init(struct mesh_state *ms)
358 {
359  volatile struct mesh_regs __iomem *mr = ms->mesh;
360  volatile struct dbdma_regs __iomem *md = ms->dma;
361 
362  mesh_flush_io(mr);
363  udelay(100);
364 
365  /* Reset controller */
366  out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */
367  out_8(&mr->exception, 0xff); /* clear all exception bits */
368  out_8(&mr->error, 0xff); /* clear all error bits */
370  mesh_flush_io(mr);
371  udelay(10);
373  out_8(&mr->source_id, ms->host->this_id);
374  out_8(&mr->sel_timeout, 25); /* 250ms */
376 
377  if (init_reset_delay) {
378  printk(KERN_INFO "mesh: performing initial bus reset...\n");
379 
380  /* Reset bus */
381  out_8(&mr->bus_status1, BS1_RST); /* assert RST */
382  mesh_flush_io(mr);
383  udelay(30); /* leave it on for >= 25us */
384  out_8(&mr->bus_status1, 0); /* negate RST */
385  mesh_flush_io(mr);
386 
387  /* Wait for bus to come back */
388  msleep(init_reset_delay);
389  }
390 
391  /* Reconfigure controller */
392  out_8(&mr->interrupt, 0xff); /* clear all interrupt bits */
394  mesh_flush_io(mr);
395  udelay(1);
397  out_8(&mr->sequence, SEQ_ENBRESEL);
398 
399  ms->phase = idle;
400  ms->msgphase = msg_none;
401 }
402 
403 
404 static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
405 {
406  volatile struct mesh_regs __iomem *mr = ms->mesh;
407  int t, id;
408 
409  id = cmd->device->id;
410  ms->current_req = cmd;
411  ms->tgts[id].data_goes_out = cmd->sc_data_direction == DMA_TO_DEVICE;
412  ms->tgts[id].current_req = cmd;
413 
414 #if 1
415  if (DEBUG_TARGET(cmd)) {
416  int i;
417  printk(KERN_DEBUG "mesh_start: %p tgt=%d cmd=", cmd, id);
418  for (i = 0; i < cmd->cmd_len; ++i)
419  printk(" %x", cmd->cmnd[i]);
420  printk(" use_sg=%d buffer=%p bufflen=%u\n",
421  scsi_sg_count(cmd), scsi_sglist(cmd), scsi_bufflen(cmd));
422  }
423 #endif
424  if (ms->dma_started)
425  panic("mesh: double DMA start !\n");
426 
427  ms->phase = arbitrating;
428  ms->msgphase = msg_none;
429  ms->data_ptr = 0;
430  ms->dma_started = 0;
431  ms->n_msgout = 0;
432  ms->last_n_msgout = 0;
433  ms->expect_reply = 0;
434  ms->conn_tgt = id;
435  ms->tgts[id].saved_ptr = 0;
436  ms->stat = DID_OK;
437  ms->aborting = 0;
438 #ifdef MESH_DBG
439  ms->tgts[id].n_log = 0;
440  dlog(ms, "start cmd=%x", (int) cmd);
441 #endif
442 
443  /* Off we go */
444  dlog(ms, "about to arb, intr/exc/err/fc=%.8x",
445  MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
446  out_8(&mr->interrupt, INT_CMDDONE);
447  out_8(&mr->sequence, SEQ_ENBRESEL);
448  mesh_flush_io(mr);
449  udelay(1);
450 
451  if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) {
452  /*
453  * Some other device has the bus or is arbitrating for it -
454  * probably a target which is about to reselect us.
455  */
456  dlog(ms, "busy b4 arb, intr/exc/err/fc=%.8x",
457  MKWORD(mr->interrupt, mr->exception,
458  mr->error, mr->fifo_count));
459  for (t = 100; t > 0; --t) {
460  if ((in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) == 0)
461  break;
462  if (in_8(&mr->interrupt) != 0) {
463  dlog(ms, "intr b4 arb, intr/exc/err/fc=%.8x",
464  MKWORD(mr->interrupt, mr->exception,
465  mr->error, mr->fifo_count));
466  mesh_interrupt(ms);
467  if (ms->phase != arbitrating)
468  return;
469  }
470  udelay(1);
471  }
472  if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) {
473  /* XXX should try again in a little while */
474  ms->stat = DID_BUS_BUSY;
475  ms->phase = idle;
476  mesh_done(ms, 0);
477  return;
478  }
479  }
480 
481  /*
482  * Apparently the mesh has a bug where it will assert both its
483  * own bit and the target's bit on the bus during arbitration.
484  */
485  out_8(&mr->dest_id, mr->source_id);
486 
487  /*
488  * There appears to be a race with reselection sometimes,
489  * where a target reselects us just as we issue the
490  * arbitrate command. It seems that then the arbitrate
491  * command just hangs waiting for the bus to be free
492  * without giving us a reselection exception.
493  * The only way I have found to get it to respond correctly
494  * is this: disable reselection before issuing the arbitrate
495  * command, then after issuing it, if it looks like a target
496  * is trying to reselect us, reset the mesh and then enable
497  * reselection.
498  */
499  out_8(&mr->sequence, SEQ_DISRESEL);
500  if (in_8(&mr->interrupt) != 0) {
501  dlog(ms, "intr after disresel, intr/exc/err/fc=%.8x",
502  MKWORD(mr->interrupt, mr->exception,
503  mr->error, mr->fifo_count));
504  mesh_interrupt(ms);
505  if (ms->phase != arbitrating)
506  return;
507  dlog(ms, "after intr after disresel, intr/exc/err/fc=%.8x",
508  MKWORD(mr->interrupt, mr->exception,
509  mr->error, mr->fifo_count));
510  }
511 
513 
514  for (t = 230; t > 0; --t) {
515  if (in_8(&mr->interrupt) != 0)
516  break;
517  udelay(1);
518  }
519  dlog(ms, "after arb, intr/exc/err/fc=%.8x",
520  MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
521  if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL)
522  && (in_8(&mr->bus_status0) & BS0_IO)) {
523  /* looks like a reselection - try resetting the mesh */
524  dlog(ms, "resel? after arb, intr/exc/err/fc=%.8x",
525  MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
527  mesh_flush_io(mr);
528  udelay(10);
531  out_8(&mr->sequence, SEQ_ENBRESEL);
532  mesh_flush_io(mr);
533  for (t = 10; t > 0 && in_8(&mr->interrupt) == 0; --t)
534  udelay(1);
535  dlog(ms, "tried reset after arb, intr/exc/err/fc=%.8x",
536  MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
537 #ifndef MESH_MULTIPLE_HOSTS
538  if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL)
539  && (in_8(&mr->bus_status0) & BS0_IO)) {
540  printk(KERN_ERR "mesh: controller not responding"
541  " to reselection!\n");
542  /*
543  * If this is a target reselecting us, and the
544  * mesh isn't responding, the higher levels of
545  * the scsi code will eventually time out and
546  * reset the bus.
547  */
548  }
549 #endif
550  }
551 }
552 
553 /*
554  * Start the next command for a MESH.
555  * Should be called with interrupts disabled.
556  */
557 static void mesh_start(struct mesh_state *ms)
558 {
559  struct scsi_cmnd *cmd, *prev, *next;
560 
561  if (ms->phase != idle || ms->current_req != NULL) {
562  printk(KERN_ERR "inappropriate mesh_start (phase=%d, ms=%p)",
563  ms->phase, ms);
564  return;
565  }
566 
567  while (ms->phase == idle) {
568  prev = NULL;
569  for (cmd = ms->request_q; ; cmd = (struct scsi_cmnd *) cmd->host_scribble) {
570  if (cmd == NULL)
571  return;
572  if (ms->tgts[cmd->device->id].current_req == NULL)
573  break;
574  prev = cmd;
575  }
576  next = (struct scsi_cmnd *) cmd->host_scribble;
577  if (prev == NULL)
578  ms->request_q = next;
579  else
580  prev->host_scribble = (void *) next;
581  if (next == NULL)
582  ms->request_qtail = prev;
583 
584  mesh_start_cmd(ms, cmd);
585  }
586 }
587 
588 static void mesh_done(struct mesh_state *ms, int start_next)
589 {
590  struct scsi_cmnd *cmd;
591  struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
592 
593  cmd = ms->current_req;
594  ms->current_req = NULL;
595  tp->current_req = NULL;
596  if (cmd) {
597  cmd->result = (ms->stat << 16) + cmd->SCp.Status;
598  if (ms->stat == DID_OK)
599  cmd->result += (cmd->SCp.Message << 8);
600  if (DEBUG_TARGET(cmd)) {
601  printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
602  cmd->result, ms->data_ptr, scsi_bufflen(cmd));
603 #if 0
604  /* needs to use sg? */
605  if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 3)
606  && cmd->request_buffer != 0) {
607  unsigned char *b = cmd->request_buffer;
608  printk(KERN_DEBUG "buffer = %x %x %x %x %x %x %x %x\n",
609  b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
610  }
611 #endif
612  }
613  cmd->SCp.this_residual -= ms->data_ptr;
614  mesh_completed(ms, cmd);
615  }
616  if (start_next) {
617  out_8(&ms->mesh->sequence, SEQ_ENBRESEL);
618  mesh_flush_io(ms->mesh);
619  udelay(1);
620  ms->phase = idle;
621  mesh_start(ms);
622  }
623 }
624 
625 static inline void add_sdtr_msg(struct mesh_state *ms)
626 {
627  int i = ms->n_msgout;
628 
629  ms->msgout[i] = EXTENDED_MESSAGE;
630  ms->msgout[i+1] = 3;
631  ms->msgout[i+2] = EXTENDED_SDTR;
632  ms->msgout[i+3] = mesh_sync_period/4;
633  ms->msgout[i+4] = (ALLOW_SYNC(ms->conn_tgt)? mesh_sync_offset: 0);
634  ms->n_msgout = i + 5;
635 }
636 
637 static void set_sdtr(struct mesh_state *ms, int period, int offset)
638 {
639  struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
640  volatile struct mesh_regs __iomem *mr = ms->mesh;
641  int v, tr;
642 
643  tp->sdtr_state = sdtr_done;
644  if (offset == 0) {
645  /* asynchronous */
646  if (SYNC_OFF(tp->sync_params))
647  printk(KERN_INFO "mesh: target %d now asynchronous\n",
648  ms->conn_tgt);
651  return;
652  }
653  /*
654  * We need to compute ceil(clk_freq * period / 500e6) - 2
655  * without incurring overflow.
656  */
657  v = (ms->clk_freq / 5000) * period;
658  if (v <= 250000) {
659  /* special case: sync_period == 5 * clk_period */
660  v = 0;
661  /* units of tr are 100kB/s */
662  tr = (ms->clk_freq + 250000) / 500000;
663  } else {
664  /* sync_period == (v + 2) * 2 * clk_period */
665  v = (v + 99999) / 100000 - 2;
666  if (v > 15)
667  v = 15; /* oops */
668  tr = ((ms->clk_freq / (v + 2)) + 199999) / 200000;
669  }
670  if (offset > 15)
671  offset = 15; /* can't happen */
672  tp->sync_params = SYNC_PARAMS(offset, v);
673  out_8(&mr->sync_params, tp->sync_params);
674  printk(KERN_INFO "mesh: target %d synchronous at %d.%d MB/s\n",
675  ms->conn_tgt, tr/10, tr%10);
676 }
677 
678 static void start_phase(struct mesh_state *ms)
679 {
680  int i, seq, nb;
681  volatile struct mesh_regs __iomem *mr = ms->mesh;
682  volatile struct dbdma_regs __iomem *md = ms->dma;
683  struct scsi_cmnd *cmd = ms->current_req;
684  struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
685 
686  dlog(ms, "start_phase nmo/exc/fc/seq = %.8x",
687  MKWORD(ms->n_msgout, mr->exception, mr->fifo_count, mr->sequence));
689  seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0);
690  switch (ms->msgphase) {
691  case msg_none:
692  break;
693 
694  case msg_in:
695  out_8(&mr->count_hi, 0);
696  out_8(&mr->count_lo, 1);
697  out_8(&mr->sequence, SEQ_MSGIN + seq);
698  ms->n_msgin = 0;
699  return;
700 
701  case msg_out:
702  /*
703  * To make sure ATN drops before we assert ACK for
704  * the last byte of the message, we have to do the
705  * last byte specially.
706  */
707  if (ms->n_msgout <= 0) {
708  printk(KERN_ERR "mesh: msg_out but n_msgout=%d\n",
709  ms->n_msgout);
710  mesh_dump_regs(ms);
711  ms->msgphase = msg_none;
712  break;
713  }
714  if (ALLOW_DEBUG(ms->conn_tgt)) {
715  printk(KERN_DEBUG "mesh: sending %d msg bytes:",
716  ms->n_msgout);
717  for (i = 0; i < ms->n_msgout; ++i)
718  printk(" %x", ms->msgout[i]);
719  printk("\n");
720  }
721  dlog(ms, "msgout msg=%.8x", MKWORD(ms->n_msgout, ms->msgout[0],
722  ms->msgout[1], ms->msgout[2]));
723  out_8(&mr->count_hi, 0);
725  mesh_flush_io(mr);
726  udelay(1);
727  /*
728  * If ATN is not already asserted, we assert it, then
729  * issue a SEQ_MSGOUT to get the mesh to drop ACK.
730  */
731  if ((in_8(&mr->bus_status0) & BS0_ATN) == 0) {
732  dlog(ms, "bus0 was %.2x explicitly asserting ATN", mr->bus_status0);
733  out_8(&mr->bus_status0, BS0_ATN); /* explicit ATN */
734  mesh_flush_io(mr);
735  udelay(1);
736  out_8(&mr->count_lo, 1);
737  out_8(&mr->sequence, SEQ_MSGOUT + seq);
738  out_8(&mr->bus_status0, 0); /* release explicit ATN */
739  dlog(ms,"hace: after explicit ATN bus0=%.2x",mr->bus_status0);
740  }
741  if (ms->n_msgout == 1) {
742  /*
743  * We can't issue the SEQ_MSGOUT without ATN
744  * until the target has asserted REQ. The logic
745  * in cmd_complete handles both situations:
746  * REQ already asserted or not.
747  */
748  cmd_complete(ms);
749  } else {
750  out_8(&mr->count_lo, ms->n_msgout - 1);
751  out_8(&mr->sequence, SEQ_MSGOUT + seq);
752  for (i = 0; i < ms->n_msgout - 1; ++i)
753  out_8(&mr->fifo, ms->msgout[i]);
754  }
755  return;
756 
757  default:
758  printk(KERN_ERR "mesh bug: start_phase msgphase=%d\n",
759  ms->msgphase);
760  }
761 
762  switch (ms->phase) {
763  case selecting:
764  out_8(&mr->dest_id, ms->conn_tgt);
765  out_8(&mr->sequence, SEQ_SELECT + SEQ_ATN);
766  break;
767  case commanding:
768  out_8(&mr->sync_params, tp->sync_params);
769  out_8(&mr->count_hi, 0);
770  if (cmd) {
771  out_8(&mr->count_lo, cmd->cmd_len);
772  out_8(&mr->sequence, SEQ_COMMAND + seq);
773  for (i = 0; i < cmd->cmd_len; ++i)
774  out_8(&mr->fifo, cmd->cmnd[i]);
775  } else {
776  out_8(&mr->count_lo, 6);
777  out_8(&mr->sequence, SEQ_COMMAND + seq);
778  for (i = 0; i < 6; ++i)
779  out_8(&mr->fifo, 0);
780  }
781  break;
782  case dataing:
783  /* transfer data, if any */
784  if (!ms->dma_started) {
785  set_dma_cmds(ms, cmd);
786  out_le32(&md->cmdptr, virt_to_phys(ms->dma_cmds));
787  out_le32(&md->control, (RUN << 16) | RUN);
788  ms->dma_started = 1;
789  }
790  nb = ms->dma_count;
791  if (nb > 0xfff0)
792  nb = 0xfff0;
793  ms->dma_count -= nb;
794  ms->data_ptr += nb;
795  out_8(&mr->count_lo, nb);
796  out_8(&mr->count_hi, nb >> 8);
797  out_8(&mr->sequence, (tp->data_goes_out?
799  break;
800  case statusing:
801  out_8(&mr->count_hi, 0);
802  out_8(&mr->count_lo, 1);
803  out_8(&mr->sequence, SEQ_STATUS + seq);
804  break;
805  case busfreeing:
806  case disconnecting:
807  out_8(&mr->sequence, SEQ_ENBRESEL);
808  mesh_flush_io(mr);
809  udelay(1);
810  dlog(ms, "enbresel intr/exc/err/fc=%.8x",
811  MKWORD(mr->interrupt, mr->exception, mr->error,
812  mr->fifo_count));
813  out_8(&mr->sequence, SEQ_BUSFREE);
814  break;
815  default:
816  printk(KERN_ERR "mesh: start_phase called with phase=%d\n",
817  ms->phase);
818  dumpslog(ms);
819  }
820 
821 }
822 
823 static inline void get_msgin(struct mesh_state *ms)
824 {
825  volatile struct mesh_regs __iomem *mr = ms->mesh;
826  int i, n;
827 
828  n = mr->fifo_count;
829  if (n != 0) {
830  i = ms->n_msgin;
831  ms->n_msgin = i + n;
832  for (; n > 0; --n)
833  ms->msgin[i++] = in_8(&mr->fifo);
834  }
835 }
836 
837 static inline int msgin_length(struct mesh_state *ms)
838 {
839  int b, n;
840 
841  n = 1;
842  if (ms->n_msgin > 0) {
843  b = ms->msgin[0];
844  if (b == 1) {
845  /* extended message */
846  n = ms->n_msgin < 2? 2: ms->msgin[1] + 2;
847  } else if (0x20 <= b && b <= 0x2f) {
848  /* 2-byte message */
849  n = 2;
850  }
851  }
852  return n;
853 }
854 
855 static void reselected(struct mesh_state *ms)
856 {
857  volatile struct mesh_regs __iomem *mr = ms->mesh;
858  struct scsi_cmnd *cmd;
859  struct mesh_target *tp;
860  int b, t, prev;
861 
862  switch (ms->phase) {
863  case idle:
864  break;
865  case arbitrating:
866  if ((cmd = ms->current_req) != NULL) {
867  /* put the command back on the queue */
868  cmd->host_scribble = (void *) ms->request_q;
869  if (ms->request_q == NULL)
870  ms->request_qtail = cmd;
871  ms->request_q = cmd;
872  tp = &ms->tgts[cmd->device->id];
873  tp->current_req = NULL;
874  }
875  break;
876  case busfreeing:
877  ms->phase = reselecting;
878  mesh_done(ms, 0);
879  break;
880  case disconnecting:
881  break;
882  default:
883  printk(KERN_ERR "mesh: reselected in phase %d/%d tgt %d\n",
884  ms->msgphase, ms->phase, ms->conn_tgt);
885  dumplog(ms, ms->conn_tgt);
886  dumpslog(ms);
887  }
888 
889  if (ms->dma_started) {
890  printk(KERN_ERR "mesh: reselected with DMA started !\n");
891  halt_dma(ms);
892  }
893  ms->current_req = NULL;
894  ms->phase = dataing;
895  ms->msgphase = msg_in;
896  ms->n_msgout = 0;
897  ms->last_n_msgout = 0;
898  prev = ms->conn_tgt;
899 
900  /*
901  * We seem to get abortive reselections sometimes.
902  */
903  while ((in_8(&mr->bus_status1) & BS1_BSY) == 0) {
904  static int mesh_aborted_resels;
905  mesh_aborted_resels++;
907  mesh_flush_io(mr);
908  udelay(1);
909  out_8(&mr->sequence, SEQ_ENBRESEL);
910  mesh_flush_io(mr);
911  udelay(5);
912  dlog(ms, "extra resel err/exc/fc = %.6x",
913  MKWORD(0, mr->error, mr->exception, mr->fifo_count));
914  }
916  mesh_flush_io(mr);
917  udelay(1);
918  out_8(&mr->sequence, SEQ_ENBRESEL);
919  mesh_flush_io(mr);
920  udelay(1);
922 
923  /*
924  * Find out who reselected us.
925  */
926  if (in_8(&mr->fifo_count) == 0) {
927  printk(KERN_ERR "mesh: reselection but nothing in fifo?\n");
928  ms->conn_tgt = ms->host->this_id;
929  goto bogus;
930  }
931  /* get the last byte in the fifo */
932  do {
933  b = in_8(&mr->fifo);
934  dlog(ms, "reseldata %x", b);
935  } while (in_8(&mr->fifo_count));
936  for (t = 0; t < 8; ++t)
937  if ((b & (1 << t)) != 0 && t != ms->host->this_id)
938  break;
939  if (b != (1 << t) + (1 << ms->host->this_id)) {
940  printk(KERN_ERR "mesh: bad reselection data %x\n", b);
941  ms->conn_tgt = ms->host->this_id;
942  goto bogus;
943  }
944 
945 
946  /*
947  * Set up to continue with that target's transfer.
948  */
949  ms->conn_tgt = t;
950  tp = &ms->tgts[t];
951  out_8(&mr->sync_params, tp->sync_params);
952  if (ALLOW_DEBUG(t)) {
953  printk(KERN_DEBUG "mesh: reselected by target %d\n", t);
954  printk(KERN_DEBUG "mesh: saved_ptr=%x goes_out=%d cmd=%p\n",
955  tp->saved_ptr, tp->data_goes_out, tp->current_req);
956  }
957  ms->current_req = tp->current_req;
958  if (tp->current_req == NULL) {
959  printk(KERN_ERR "mesh: reselected by tgt %d but no cmd!\n", t);
960  goto bogus;
961  }
962  ms->data_ptr = tp->saved_ptr;
963  dlog(ms, "resel prev tgt=%d", prev);
964  dlog(ms, "resel err/exc=%.4x", MKWORD(0, 0, mr->error, mr->exception));
965  start_phase(ms);
966  return;
967 
968 bogus:
969  dumplog(ms, ms->conn_tgt);
970  dumpslog(ms);
971  ms->data_ptr = 0;
972  ms->aborting = 1;
973  start_phase(ms);
974 }
975 
976 static void do_abort(struct mesh_state *ms)
977 {
978  ms->msgout[0] = ABORT;
979  ms->n_msgout = 1;
980  ms->aborting = 1;
981  ms->stat = DID_ABORT;
982  dlog(ms, "abort", 0);
983 }
984 
985 static void handle_reset(struct mesh_state *ms)
986 {
987  int tgt;
988  struct mesh_target *tp;
989  struct scsi_cmnd *cmd;
990  volatile struct mesh_regs __iomem *mr = ms->mesh;
991 
992  for (tgt = 0; tgt < 8; ++tgt) {
993  tp = &ms->tgts[tgt];
994  if ((cmd = tp->current_req) != NULL) {
995  cmd->result = DID_RESET << 16;
996  tp->current_req = NULL;
997  mesh_completed(ms, cmd);
998  }
999  ms->tgts[tgt].sdtr_state = do_sdtr;
1000  ms->tgts[tgt].sync_params = ASYNC_PARAMS;
1001  }
1002  ms->current_req = NULL;
1003  while ((cmd = ms->request_q) != NULL) {
1004  ms->request_q = (struct scsi_cmnd *) cmd->host_scribble;
1005  cmd->result = DID_RESET << 16;
1006  mesh_completed(ms, cmd);
1007  }
1008  ms->phase = idle;
1009  ms->msgphase = msg_none;
1011  out_8(&mr->sequence, SEQ_FLUSHFIFO);
1012  mesh_flush_io(mr);
1013  udelay(1);
1015  out_8(&mr->sequence, SEQ_ENBRESEL);
1016 }
1017 
1018 static irqreturn_t do_mesh_interrupt(int irq, void *dev_id)
1019 {
1020  unsigned long flags;
1021  struct mesh_state *ms = dev_id;
1022  struct Scsi_Host *dev = ms->host;
1023 
1024  spin_lock_irqsave(dev->host_lock, flags);
1025  mesh_interrupt(ms);
1026  spin_unlock_irqrestore(dev->host_lock, flags);
1027  return IRQ_HANDLED;
1028 }
1029 
1030 static void handle_error(struct mesh_state *ms)
1031 {
1032  int err, exc, count;
1033  volatile struct mesh_regs __iomem *mr = ms->mesh;
1034 
1035  err = in_8(&mr->error);
1036  exc = in_8(&mr->exception);
1038  dlog(ms, "error err/exc/fc/cl=%.8x",
1039  MKWORD(err, exc, mr->fifo_count, mr->count_lo));
1040  if (err & ERR_SCSIRESET) {
1041  /* SCSI bus was reset */
1042  printk(KERN_INFO "mesh: SCSI bus reset detected: "
1043  "waiting for end...");
1044  while ((in_8(&mr->bus_status1) & BS1_RST) != 0)
1045  udelay(1);
1046  printk("done\n");
1047  handle_reset(ms);
1048  /* request_q is empty, no point in mesh_start() */
1049  return;
1050  }
1051  if (err & ERR_UNEXPDISC) {
1052  /* Unexpected disconnect */
1053  if (exc & EXC_RESELECTED) {
1054  reselected(ms);
1055  return;
1056  }
1057  if (!ms->aborting) {
1058  printk(KERN_WARNING "mesh: target %d aborted\n",
1059  ms->conn_tgt);
1060  dumplog(ms, ms->conn_tgt);
1061  dumpslog(ms);
1062  }
1063  out_8(&mr->interrupt, INT_CMDDONE);
1064  ms->stat = DID_ABORT;
1065  mesh_done(ms, 1);
1066  return;
1067  }
1068  if (err & ERR_PARITY) {
1069  if (ms->msgphase == msg_in) {
1070  printk(KERN_ERR "mesh: msg parity error, target %d\n",
1071  ms->conn_tgt);
1072  ms->msgout[0] = MSG_PARITY_ERROR;
1073  ms->n_msgout = 1;
1074  ms->msgphase = msg_in_bad;
1075  cmd_complete(ms);
1076  return;
1077  }
1078  if (ms->stat == DID_OK) {
1079  printk(KERN_ERR "mesh: parity error, target %d\n",
1080  ms->conn_tgt);
1081  ms->stat = DID_PARITY;
1082  }
1083  count = (mr->count_hi << 8) + mr->count_lo;
1084  if (count == 0) {
1085  cmd_complete(ms);
1086  } else {
1087  /* reissue the data transfer command */
1088  out_8(&mr->sequence, mr->sequence);
1089  }
1090  return;
1091  }
1092  if (err & ERR_SEQERR) {
1093  if (exc & EXC_RESELECTED) {
1094  /* This can happen if we issue a command to
1095  get the bus just after the target reselects us. */
1096  static int mesh_resel_seqerr;
1097  mesh_resel_seqerr++;
1098  reselected(ms);
1099  return;
1100  }
1101  if (exc == EXC_PHASEMM) {
1102  static int mesh_phasemm_seqerr;
1103  mesh_phasemm_seqerr++;
1104  phase_mismatch(ms);
1105  return;
1106  }
1107  printk(KERN_ERR "mesh: sequence error (err=%x exc=%x)\n",
1108  err, exc);
1109  } else {
1110  printk(KERN_ERR "mesh: unknown error %x (exc=%x)\n", err, exc);
1111  }
1112  mesh_dump_regs(ms);
1113  dumplog(ms, ms->conn_tgt);
1114  if (ms->phase > selecting && (in_8(&mr->bus_status1) & BS1_BSY)) {
1115  /* try to do what the target wants */
1116  do_abort(ms);
1117  phase_mismatch(ms);
1118  return;
1119  }
1120  ms->stat = DID_ERROR;
1121  mesh_done(ms, 1);
1122 }
1123 
1124 static void handle_exception(struct mesh_state *ms)
1125 {
1126  int exc;
1127  volatile struct mesh_regs __iomem *mr = ms->mesh;
1128 
1129  exc = in_8(&mr->exception);
1131  if (exc & EXC_RESELECTED) {
1132  static int mesh_resel_exc;
1133  mesh_resel_exc++;
1134  reselected(ms);
1135  } else if (exc == EXC_ARBLOST) {
1136  printk(KERN_DEBUG "mesh: lost arbitration\n");
1137  ms->stat = DID_BUS_BUSY;
1138  mesh_done(ms, 1);
1139  } else if (exc == EXC_SELTO) {
1140  /* selection timed out */
1141  ms->stat = DID_BAD_TARGET;
1142  mesh_done(ms, 1);
1143  } else if (exc == EXC_PHASEMM) {
1144  /* target wants to do something different:
1145  find out what it wants and do it. */
1146  phase_mismatch(ms);
1147  } else {
1148  printk(KERN_ERR "mesh: can't cope with exception %x\n", exc);
1149  mesh_dump_regs(ms);
1150  dumplog(ms, ms->conn_tgt);
1151  do_abort(ms);
1152  phase_mismatch(ms);
1153  }
1154 }
1155 
1156 static void handle_msgin(struct mesh_state *ms)
1157 {
1158  int i, code;
1159  struct scsi_cmnd *cmd = ms->current_req;
1160  struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
1161 
1162  if (ms->n_msgin == 0)
1163  return;
1164  code = ms->msgin[0];
1165  if (ALLOW_DEBUG(ms->conn_tgt)) {
1166  printk(KERN_DEBUG "got %d message bytes:", ms->n_msgin);
1167  for (i = 0; i < ms->n_msgin; ++i)
1168  printk(" %x", ms->msgin[i]);
1169  printk("\n");
1170  }
1171  dlog(ms, "msgin msg=%.8x",
1172  MKWORD(ms->n_msgin, code, ms->msgin[1], ms->msgin[2]));
1173 
1174  ms->expect_reply = 0;
1175  ms->n_msgout = 0;
1176  if (ms->n_msgin < msgin_length(ms))
1177  goto reject;
1178  if (cmd)
1179  cmd->SCp.Message = code;
1180  switch (code) {
1181  case COMMAND_COMPLETE:
1182  break;
1183  case EXTENDED_MESSAGE:
1184  switch (ms->msgin[2]) {
1186  ms->data_ptr += (ms->msgin[3] << 24) + ms->msgin[6]
1187  + (ms->msgin[4] << 16) + (ms->msgin[5] << 8);
1188  break;
1189  case EXTENDED_SDTR:
1190  if (tp->sdtr_state != sdtr_sent) {
1191  /* reply with an SDTR */
1192  add_sdtr_msg(ms);
1193  /* limit period to at least his value,
1194  offset to no more than his */
1195  if (ms->msgout[3] < ms->msgin[3])
1196  ms->msgout[3] = ms->msgin[3];
1197  if (ms->msgout[4] > ms->msgin[4])
1198  ms->msgout[4] = ms->msgin[4];
1199  set_sdtr(ms, ms->msgout[3], ms->msgout[4]);
1200  ms->msgphase = msg_out;
1201  } else {
1202  set_sdtr(ms, ms->msgin[3], ms->msgin[4]);
1203  }
1204  break;
1205  default:
1206  goto reject;
1207  }
1208  break;
1209  case SAVE_POINTERS:
1210  tp->saved_ptr = ms->data_ptr;
1211  break;
1212  case RESTORE_POINTERS:
1213  ms->data_ptr = tp->saved_ptr;
1214  break;
1215  case DISCONNECT:
1216  ms->phase = disconnecting;
1217  break;
1218  case ABORT:
1219  break;
1220  case MESSAGE_REJECT:
1221  if (tp->sdtr_state == sdtr_sent)
1222  set_sdtr(ms, 0, 0);
1223  break;
1224  case NOP:
1225  break;
1226  default:
1227  if (IDENTIFY_BASE <= code && code <= IDENTIFY_BASE + 7) {
1228  if (cmd == NULL) {
1229  do_abort(ms);
1230  ms->msgphase = msg_out;
1231  } else if (code != cmd->device->lun + IDENTIFY_BASE) {
1232  printk(KERN_WARNING "mesh: lun mismatch "
1233  "(%d != %d) on reselection from "
1234  "target %d\n", code - IDENTIFY_BASE,
1235  cmd->device->lun, ms->conn_tgt);
1236  }
1237  break;
1238  }
1239  goto reject;
1240  }
1241  return;
1242 
1243  reject:
1244  printk(KERN_WARNING "mesh: rejecting message from target %d:",
1245  ms->conn_tgt);
1246  for (i = 0; i < ms->n_msgin; ++i)
1247  printk(" %x", ms->msgin[i]);
1248  printk("\n");
1249  ms->msgout[0] = MESSAGE_REJECT;
1250  ms->n_msgout = 1;
1251  ms->msgphase = msg_out;
1252 }
1253 
1254 /*
1255  * Set up DMA commands for transferring data.
1256  */
1257 static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
1258 {
1259  int i, dma_cmd, total, off, dtot;
1260  struct scatterlist *scl;
1261  struct dbdma_cmd *dcmds;
1262 
1263  dma_cmd = ms->tgts[ms->conn_tgt].data_goes_out?
1264  OUTPUT_MORE: INPUT_MORE;
1265  dcmds = ms->dma_cmds;
1266  dtot = 0;
1267  if (cmd) {
1268  int nseg;
1269 
1270  cmd->SCp.this_residual = scsi_bufflen(cmd);
1271 
1272  nseg = scsi_dma_map(cmd);
1273  BUG_ON(nseg < 0);
1274 
1275  if (nseg) {
1276  total = 0;
1277  off = ms->data_ptr;
1278 
1279  scsi_for_each_sg(cmd, scl, nseg, i) {
1280  u32 dma_addr = sg_dma_address(scl);
1281  u32 dma_len = sg_dma_len(scl);
1282 
1283  total += scl->length;
1284  if (off >= dma_len) {
1285  off -= dma_len;
1286  continue;
1287  }
1288  if (dma_len > 0xffff)
1289  panic("mesh: scatterlist element >= 64k");
1290  st_le16(&dcmds->req_count, dma_len - off);
1291  st_le16(&dcmds->command, dma_cmd);
1292  st_le32(&dcmds->phy_addr, dma_addr + off);
1293  dcmds->xfer_status = 0;
1294  ++dcmds;
1295  dtot += dma_len - off;
1296  off = 0;
1297  }
1298  }
1299  }
1300  if (dtot == 0) {
1301  /* Either the target has overrun our buffer,
1302  or the caller didn't provide a buffer. */
1303  static char mesh_extra_buf[64];
1304 
1305  dtot = sizeof(mesh_extra_buf);
1306  st_le16(&dcmds->req_count, dtot);
1307  st_le32(&dcmds->phy_addr, virt_to_phys(mesh_extra_buf));
1308  dcmds->xfer_status = 0;
1309  ++dcmds;
1310  }
1311  dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
1312  st_le16(&dcmds[-1].command, dma_cmd);
1313  memset(dcmds, 0, sizeof(*dcmds));
1314  st_le16(&dcmds->command, DBDMA_STOP);
1315  ms->dma_count = dtot;
1316 }
1317 
1318 static void halt_dma(struct mesh_state *ms)
1319 {
1320  volatile struct dbdma_regs __iomem *md = ms->dma;
1321  volatile struct mesh_regs __iomem *mr = ms->mesh;
1322  struct scsi_cmnd *cmd = ms->current_req;
1323  int t, nb;
1324 
1325  if (!ms->tgts[ms->conn_tgt].data_goes_out) {
1326  /* wait a little while until the fifo drains */
1327  t = 50;
1328  while (t > 0 && in_8(&mr->fifo_count) != 0
1329  && (in_le32(&md->status) & ACTIVE) != 0) {
1330  --t;
1331  udelay(1);
1332  }
1333  }
1334  out_le32(&md->control, RUN << 16); /* turn off RUN bit */
1335  nb = (mr->count_hi << 8) + mr->count_lo;
1336  dlog(ms, "halt_dma fc/count=%.6x",
1337  MKWORD(0, mr->fifo_count, 0, nb));
1338  if (ms->tgts[ms->conn_tgt].data_goes_out)
1339  nb += mr->fifo_count;
1340  /* nb is the number of bytes not yet transferred
1341  to/from the target. */
1342  ms->data_ptr -= nb;
1343  dlog(ms, "data_ptr %x", ms->data_ptr);
1344  if (ms->data_ptr < 0) {
1345  printk(KERN_ERR "mesh: halt_dma: data_ptr=%d (nb=%d, ms=%p)\n",
1346  ms->data_ptr, nb, ms);
1347  ms->data_ptr = 0;
1348 #ifdef MESH_DBG
1349  dumplog(ms, ms->conn_tgt);
1350  dumpslog(ms);
1351 #endif /* MESH_DBG */
1352  } else if (cmd && scsi_bufflen(cmd) &&
1353  ms->data_ptr > scsi_bufflen(cmd)) {
1354  printk(KERN_DEBUG "mesh: target %d overrun, "
1355  "data_ptr=%x total=%x goes_out=%d\n",
1356  ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd),
1357  ms->tgts[ms->conn_tgt].data_goes_out);
1358  }
1359  scsi_dma_unmap(cmd);
1360  ms->dma_started = 0;
1361 }
1362 
1363 static void phase_mismatch(struct mesh_state *ms)
1364 {
1365  volatile struct mesh_regs __iomem *mr = ms->mesh;
1366  int phase;
1367 
1368  dlog(ms, "phasemm ch/cl/seq/fc=%.8x",
1369  MKWORD(mr->count_hi, mr->count_lo, mr->sequence, mr->fifo_count));
1370  phase = in_8(&mr->bus_status0) & BS0_PHASE;
1371  if (ms->msgphase == msg_out_xxx && phase == BP_MSGOUT) {
1372  /* output the last byte of the message, without ATN */
1373  out_8(&mr->count_lo, 1);
1374  out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg);
1375  mesh_flush_io(mr);
1376  udelay(1);
1377  out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]);
1378  ms->msgphase = msg_out_last;
1379  return;
1380  }
1381 
1382  if (ms->msgphase == msg_in) {
1383  get_msgin(ms);
1384  if (ms->n_msgin)
1385  handle_msgin(ms);
1386  }
1387 
1388  if (ms->dma_started)
1389  halt_dma(ms);
1390  if (mr->fifo_count) {
1391  out_8(&mr->sequence, SEQ_FLUSHFIFO);
1392  mesh_flush_io(mr);
1393  udelay(1);
1394  }
1395 
1396  ms->msgphase = msg_none;
1397  switch (phase) {
1398  case BP_DATAIN:
1399  ms->tgts[ms->conn_tgt].data_goes_out = 0;
1400  ms->phase = dataing;
1401  break;
1402  case BP_DATAOUT:
1403  ms->tgts[ms->conn_tgt].data_goes_out = 1;
1404  ms->phase = dataing;
1405  break;
1406  case BP_COMMAND:
1407  ms->phase = commanding;
1408  break;
1409  case BP_STATUS:
1410  ms->phase = statusing;
1411  break;
1412  case BP_MSGIN:
1413  ms->msgphase = msg_in;
1414  ms->n_msgin = 0;
1415  break;
1416  case BP_MSGOUT:
1417  ms->msgphase = msg_out;
1418  if (ms->n_msgout == 0) {
1419  if (ms->aborting) {
1420  do_abort(ms);
1421  } else {
1422  if (ms->last_n_msgout == 0) {
1424  "mesh: no msg to repeat\n");
1425  ms->msgout[0] = NOP;
1426  ms->last_n_msgout = 1;
1427  }
1428  ms->n_msgout = ms->last_n_msgout;
1429  }
1430  }
1431  break;
1432  default:
1433  printk(KERN_DEBUG "mesh: unknown scsi phase %x\n", phase);
1434  ms->stat = DID_ERROR;
1435  mesh_done(ms, 1);
1436  return;
1437  }
1438 
1439  start_phase(ms);
1440 }
1441 
1442 static void cmd_complete(struct mesh_state *ms)
1443 {
1444  volatile struct mesh_regs __iomem *mr = ms->mesh;
1445  struct scsi_cmnd *cmd = ms->current_req;
1446  struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
1447  int seq, n, t;
1448 
1449  dlog(ms, "cmd_complete fc=%x", mr->fifo_count);
1450  seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0);
1451  switch (ms->msgphase) {
1452  case msg_out_xxx:
1453  /* huh? we expected a phase mismatch */
1454  ms->n_msgin = 0;
1455  ms->msgphase = msg_in;
1456  /* fall through */
1457 
1458  case msg_in:
1459  /* should have some message bytes in fifo */
1460  get_msgin(ms);
1461  n = msgin_length(ms);
1462  if (ms->n_msgin < n) {
1463  out_8(&mr->count_lo, n - ms->n_msgin);
1464  out_8(&mr->sequence, SEQ_MSGIN + seq);
1465  } else {
1466  ms->msgphase = msg_none;
1467  handle_msgin(ms);
1468  start_phase(ms);
1469  }
1470  break;
1471 
1472  case msg_in_bad:
1473  out_8(&mr->sequence, SEQ_FLUSHFIFO);
1474  mesh_flush_io(mr);
1475  udelay(1);
1476  out_8(&mr->count_lo, 1);
1477  out_8(&mr->sequence, SEQ_MSGIN + SEQ_ATN + use_active_neg);
1478  break;
1479 
1480  case msg_out:
1481  /*
1482  * To get the right timing on ATN wrt ACK, we have
1483  * to get the MESH to drop ACK, wait until REQ gets
1484  * asserted, then drop ATN. To do this we first
1485  * issue a SEQ_MSGOUT with ATN and wait for REQ,
1486  * then change the command to a SEQ_MSGOUT w/o ATN.
1487  * If we don't see REQ in a reasonable time, we
1488  * change the command to SEQ_MSGIN with ATN,
1489  * wait for the phase mismatch interrupt, then
1490  * issue the SEQ_MSGOUT without ATN.
1491  */
1492  out_8(&mr->count_lo, 1);
1493  out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg + SEQ_ATN);
1494  t = 30; /* wait up to 30us */
1495  while ((in_8(&mr->bus_status0) & BS0_REQ) == 0 && --t >= 0)
1496  udelay(1);
1497  dlog(ms, "last_mbyte err/exc/fc/cl=%.8x",
1498  MKWORD(mr->error, mr->exception,
1499  mr->fifo_count, mr->count_lo));
1500  if (in_8(&mr->interrupt) & (INT_ERROR | INT_EXCEPTION)) {
1501  /* whoops, target didn't do what we expected */
1502  ms->last_n_msgout = ms->n_msgout;
1503  ms->n_msgout = 0;
1504  if (in_8(&mr->interrupt) & INT_ERROR) {
1505  printk(KERN_ERR "mesh: error %x in msg_out\n",
1506  in_8(&mr->error));
1507  handle_error(ms);
1508  return;
1509  }
1510  if (in_8(&mr->exception) != EXC_PHASEMM)
1511  printk(KERN_ERR "mesh: exc %x in msg_out\n",
1512  in_8(&mr->exception));
1513  else
1514  printk(KERN_DEBUG "mesh: bs0=%x in msg_out\n",
1515  in_8(&mr->bus_status0));
1516  handle_exception(ms);
1517  return;
1518  }
1519  if (in_8(&mr->bus_status0) & BS0_REQ) {
1520  out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg);
1521  mesh_flush_io(mr);
1522  udelay(1);
1523  out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]);
1524  ms->msgphase = msg_out_last;
1525  } else {
1526  out_8(&mr->sequence, SEQ_MSGIN + use_active_neg + SEQ_ATN);
1527  ms->msgphase = msg_out_xxx;
1528  }
1529  break;
1530 
1531  case msg_out_last:
1532  ms->last_n_msgout = ms->n_msgout;
1533  ms->n_msgout = 0;
1534  ms->msgphase = ms->expect_reply? msg_in: msg_none;
1535  start_phase(ms);
1536  break;
1537 
1538  case msg_none:
1539  switch (ms->phase) {
1540  case idle:
1541  printk(KERN_ERR "mesh: interrupt in idle phase?\n");
1542  dumpslog(ms);
1543  return;
1544  case selecting:
1545  dlog(ms, "Selecting phase at command completion",0);
1546  ms->msgout[0] = IDENTIFY(ALLOW_RESEL(ms->conn_tgt),
1547  (cmd? cmd->device->lun: 0));
1548  ms->n_msgout = 1;
1549  ms->expect_reply = 0;
1550  if (ms->aborting) {
1551  ms->msgout[0] = ABORT;
1552  ms->n_msgout++;
1553  } else if (tp->sdtr_state == do_sdtr) {
1554  /* add SDTR message */
1555  add_sdtr_msg(ms);
1556  ms->expect_reply = 1;
1557  tp->sdtr_state = sdtr_sent;
1558  }
1559  ms->msgphase = msg_out;
1560  /*
1561  * We need to wait for REQ before dropping ATN.
1562  * We wait for at most 30us, then fall back to
1563  * a scheme where we issue a SEQ_COMMAND with ATN,
1564  * which will give us a phase mismatch interrupt
1565  * when REQ does come, and then we send the message.
1566  */
1567  t = 230; /* wait up to 230us */
1568  while ((in_8(&mr->bus_status0) & BS0_REQ) == 0) {
1569  if (--t < 0) {
1570  dlog(ms, "impatient for req", ms->n_msgout);
1571  ms->msgphase = msg_none;
1572  break;
1573  }
1574  udelay(1);
1575  }
1576  break;
1577  case dataing:
1578  if (ms->dma_count != 0) {
1579  start_phase(ms);
1580  return;
1581  }
1582  /*
1583  * We can get a phase mismatch here if the target
1584  * changes to the status phase, even though we have
1585  * had a command complete interrupt. Then, if we
1586  * issue the SEQ_STATUS command, we'll get a sequence
1587  * error interrupt. Which isn't so bad except that
1588  * occasionally the mesh actually executes the
1589  * SEQ_STATUS *as well as* giving us the sequence
1590  * error and phase mismatch exception.
1591  */
1592  out_8(&mr->sequence, 0);
1593  out_8(&mr->interrupt,
1595  halt_dma(ms);
1596  break;
1597  case statusing:
1598  if (cmd) {
1599  cmd->SCp.Status = mr->fifo;
1600  if (DEBUG_TARGET(cmd))
1601  printk(KERN_DEBUG "mesh: status is %x\n",
1602  cmd->SCp.Status);
1603  }
1604  ms->msgphase = msg_in;
1605  break;
1606  case busfreeing:
1607  mesh_done(ms, 1);
1608  return;
1609  case disconnecting:
1610  ms->current_req = NULL;
1611  ms->phase = idle;
1612  mesh_start(ms);
1613  return;
1614  default:
1615  break;
1616  }
1617  ++ms->phase;
1618  start_phase(ms);
1619  break;
1620  }
1621 }
1622 
1623 
1624 /*
1625  * Called by midlayer with host locked to queue a new
1626  * request
1627  */
1628 static int mesh_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
1629 {
1630  struct mesh_state *ms;
1631 
1632  cmd->scsi_done = done;
1633  cmd->host_scribble = NULL;
1634 
1635  ms = (struct mesh_state *) cmd->device->host->hostdata;
1636 
1637  if (ms->request_q == NULL)
1638  ms->request_q = cmd;
1639  else
1640  ms->request_qtail->host_scribble = (void *) cmd;
1641  ms->request_qtail = cmd;
1642 
1643  if (ms->phase == idle)
1644  mesh_start(ms);
1645 
1646  return 0;
1647 }
1648 
1649 static DEF_SCSI_QCMD(mesh_queue)
1650 
1651 /*
1652  * Called to handle interrupts, either call by the interrupt
1653  * handler (do_mesh_interrupt) or by other functions in
1654  * exceptional circumstances
1655  */
1656 static void mesh_interrupt(struct mesh_state *ms)
1657 {
1658  volatile struct mesh_regs __iomem *mr = ms->mesh;
1659  int intr;
1660 
1661 #if 0
1662  if (ALLOW_DEBUG(ms->conn_tgt))
1663  printk(KERN_DEBUG "mesh_intr, bs0=%x int=%x exc=%x err=%x "
1664  "phase=%d msgphase=%d\n", mr->bus_status0,
1665  mr->interrupt, mr->exception, mr->error,
1666  ms->phase, ms->msgphase);
1667 #endif
1668  while ((intr = in_8(&mr->interrupt)) != 0) {
1669  dlog(ms, "interrupt intr/err/exc/seq=%.8x",
1670  MKWORD(intr, mr->error, mr->exception, mr->sequence));
1671  if (intr & INT_ERROR) {
1672  handle_error(ms);
1673  } else if (intr & INT_EXCEPTION) {
1674  handle_exception(ms);
1675  } else if (intr & INT_CMDDONE) {
1676  out_8(&mr->interrupt, INT_CMDDONE);
1677  cmd_complete(ms);
1678  }
1679  }
1680 }
1681 
1682 /* Todo: here we can at least try to remove the command from the
1683  * queue if it isn't connected yet, and for pending command, assert
1684  * ATN until the bus gets freed.
1685  */
1686 static int mesh_abort(struct scsi_cmnd *cmd)
1687 {
1688  struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata;
1689 
1690  printk(KERN_DEBUG "mesh_abort(%p)\n", cmd);
1691  mesh_dump_regs(ms);
1692  dumplog(ms, cmd->device->id);
1693  dumpslog(ms);
1694  return FAILED;
1695 }
1696 
1697 /*
1698  * Called by the midlayer with the lock held to reset the
1699  * SCSI host and bus.
1700  * The midlayer will wait for devices to come back, we don't need
1701  * to do that ourselves
1702  */
1703 static int mesh_host_reset(struct scsi_cmnd *cmd)
1704 {
1705  struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata;
1706  volatile struct mesh_regs __iomem *mr = ms->mesh;
1707  volatile struct dbdma_regs __iomem *md = ms->dma;
1708  unsigned long flags;
1709 
1710  printk(KERN_DEBUG "mesh_host_reset\n");
1711 
1712  spin_lock_irqsave(ms->host->host_lock, flags);
1713 
1714  /* Reset the controller & dbdma channel */
1715  out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */
1716  out_8(&mr->exception, 0xff); /* clear all exception bits */
1717  out_8(&mr->error, 0xff); /* clear all error bits */
1718  out_8(&mr->sequence, SEQ_RESETMESH);
1719  mesh_flush_io(mr);
1720  udelay(1);
1721  out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
1722  out_8(&mr->source_id, ms->host->this_id);
1723  out_8(&mr->sel_timeout, 25); /* 250ms */
1725 
1726  /* Reset the bus */
1727  out_8(&mr->bus_status1, BS1_RST); /* assert RST */
1728  mesh_flush_io(mr);
1729  udelay(30); /* leave it on for >= 25us */
1730  out_8(&mr->bus_status1, 0); /* negate RST */
1731 
1732  /* Complete pending commands */
1733  handle_reset(ms);
1734 
1735  spin_unlock_irqrestore(ms->host->host_lock, flags);
1736  return SUCCESS;
1737 }
1738 
1739 static void set_mesh_power(struct mesh_state *ms, int state)
1740 {
1741  if (!machine_is(powermac))
1742  return;
1743  if (state) {
1744  pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 1);
1745  msleep(200);
1746  } else {
1747  pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 0);
1748  msleep(10);
1749  }
1750 }
1751 
1752 
1753 #ifdef CONFIG_PM
1754 static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
1755 {
1756  struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1757  unsigned long flags;
1758 
1759  switch (mesg.event) {
1760  case PM_EVENT_SUSPEND:
1761  case PM_EVENT_HIBERNATE:
1762  case PM_EVENT_FREEZE:
1763  break;
1764  default:
1765  return 0;
1766  }
1767  if (ms->phase == sleeping)
1768  return 0;
1769 
1771  spin_lock_irqsave(ms->host->host_lock, flags);
1772  while(ms->phase != idle) {
1773  spin_unlock_irqrestore(ms->host->host_lock, flags);
1774  msleep(10);
1775  spin_lock_irqsave(ms->host->host_lock, flags);
1776  }
1777  ms->phase = sleeping;
1778  spin_unlock_irqrestore(ms->host->host_lock, flags);
1779  disable_irq(ms->meshintr);
1780  set_mesh_power(ms, 0);
1781 
1782  return 0;
1783 }
1784 
1785 static int mesh_resume(struct macio_dev *mdev)
1786 {
1787  struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1788  unsigned long flags;
1789 
1790  if (ms->phase != sleeping)
1791  return 0;
1792 
1793  set_mesh_power(ms, 1);
1794  mesh_init(ms);
1795  spin_lock_irqsave(ms->host->host_lock, flags);
1796  mesh_start(ms);
1797  spin_unlock_irqrestore(ms->host->host_lock, flags);
1798  enable_irq(ms->meshintr);
1800 
1801  return 0;
1802 }
1803 
1804 #endif /* CONFIG_PM */
1805 
1806 /*
1807  * If we leave drives set for synchronous transfers (especially
1808  * CDROMs), and reboot to MacOS, it gets confused, poor thing.
1809  * So, on reboot we reset the SCSI bus.
1810  */
1811 static int mesh_shutdown(struct macio_dev *mdev)
1812 {
1813  struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1814  volatile struct mesh_regs __iomem *mr;
1815  unsigned long flags;
1816 
1817  printk(KERN_INFO "resetting MESH scsi bus(es)\n");
1818  spin_lock_irqsave(ms->host->host_lock, flags);
1819  mr = ms->mesh;
1820  out_8(&mr->intr_mask, 0);
1821  out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
1822  out_8(&mr->bus_status1, BS1_RST);
1823  mesh_flush_io(mr);
1824  udelay(30);
1825  out_8(&mr->bus_status1, 0);
1826  spin_unlock_irqrestore(ms->host->host_lock, flags);
1827 
1828  return 0;
1829 }
1830 
1831 static struct scsi_host_template mesh_template = {
1832  .proc_name = "mesh",
1833  .name = "MESH",
1834  .queuecommand = mesh_queue,
1835  .eh_abort_handler = mesh_abort,
1836  .eh_host_reset_handler = mesh_host_reset,
1837  .can_queue = 20,
1838  .this_id = 7,
1839  .sg_tablesize = SG_ALL,
1840  .cmd_per_lun = 2,
1841  .use_clustering = DISABLE_CLUSTERING,
1842 };
1843 
1844 static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
1845 {
1846  struct device_node *mesh = macio_get_of_node(mdev);
1847  struct pci_dev* pdev = macio_get_pci_dev(mdev);
1848  int tgt, minper;
1849  const int *cfp;
1850  struct mesh_state *ms;
1851  struct Scsi_Host *mesh_host;
1852  void *dma_cmd_space;
1853  dma_addr_t dma_cmd_bus;
1854 
1855  switch (mdev->bus->chip->type) {
1856  case macio_heathrow:
1857  case macio_gatwick:
1858  case macio_paddington:
1859  use_active_neg = 0;
1860  break;
1861  default:
1862  use_active_neg = SEQ_ACTIVE_NEG;
1863  }
1864 
1865  if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
1866  printk(KERN_ERR "mesh: expected 2 addrs and 2 intrs"
1867  " (got %d,%d)\n", macio_resource_count(mdev),
1868  macio_irq_count(mdev));
1869  return -ENODEV;
1870  }
1871 
1872  if (macio_request_resources(mdev, "mesh") != 0) {
1873  printk(KERN_ERR "mesh: unable to request memory resources");
1874  return -EBUSY;
1875  }
1876  mesh_host = scsi_host_alloc(&mesh_template, sizeof(struct mesh_state));
1877  if (mesh_host == NULL) {
1878  printk(KERN_ERR "mesh: couldn't register host");
1879  goto out_release;
1880  }
1881 
1882  /* Old junk for root discovery, that will die ultimately */
1883 #if !defined(MODULE)
1884  note_scsi_host(mesh, mesh_host);
1885 #endif
1886 
1887  mesh_host->base = macio_resource_start(mdev, 0);
1888  mesh_host->irq = macio_irq(mdev, 0);
1889  ms = (struct mesh_state *) mesh_host->hostdata;
1890  macio_set_drvdata(mdev, ms);
1891  ms->host = mesh_host;
1892  ms->mdev = mdev;
1893  ms->pdev = pdev;
1894 
1895  ms->mesh = ioremap(macio_resource_start(mdev, 0), 0x1000);
1896  if (ms->mesh == NULL) {
1897  printk(KERN_ERR "mesh: can't map registers\n");
1898  goto out_free;
1899  }
1900  ms->dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
1901  if (ms->dma == NULL) {
1902  printk(KERN_ERR "mesh: can't map registers\n");
1903  iounmap(ms->mesh);
1904  goto out_free;
1905  }
1906 
1907  ms->meshintr = macio_irq(mdev, 0);
1908  ms->dmaintr = macio_irq(mdev, 1);
1909 
1910  /* Space for dma command list: +1 for stop command,
1911  * +1 to allow for aligning.
1912  */
1913  ms->dma_cmd_size = (mesh_host->sg_tablesize + 2) * sizeof(struct dbdma_cmd);
1914 
1915  /* We use the PCI APIs for now until the generic one gets fixed
1916  * enough or until we get some macio-specific versions
1917  */
1918  dma_cmd_space = pci_alloc_consistent(macio_get_pci_dev(mdev),
1919  ms->dma_cmd_size,
1920  &dma_cmd_bus);
1921  if (dma_cmd_space == NULL) {
1922  printk(KERN_ERR "mesh: can't allocate DMA table\n");
1923  goto out_unmap;
1924  }
1925  memset(dma_cmd_space, 0, ms->dma_cmd_size);
1926 
1927  ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space);
1928  ms->dma_cmd_space = dma_cmd_space;
1929  ms->dma_cmd_bus = dma_cmd_bus + ((unsigned long)ms->dma_cmds)
1930  - (unsigned long)dma_cmd_space;
1931  ms->current_req = NULL;
1932  for (tgt = 0; tgt < 8; ++tgt) {
1933  ms->tgts[tgt].sdtr_state = do_sdtr;
1934  ms->tgts[tgt].sync_params = ASYNC_PARAMS;
1935  ms->tgts[tgt].current_req = NULL;
1936  }
1937 
1938  if ((cfp = of_get_property(mesh, "clock-frequency", NULL)))
1939  ms->clk_freq = *cfp;
1940  else {
1941  printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n");
1942  ms->clk_freq = 50000000;
1943  }
1944 
1945  /* The maximum sync rate is clock / 5; increase
1946  * mesh_sync_period if necessary.
1947  */
1948  minper = 1000000000 / (ms->clk_freq / 5); /* ns */
1949  if (mesh_sync_period < minper)
1950  mesh_sync_period = minper;
1951 
1952  /* Power up the chip */
1953  set_mesh_power(ms, 1);
1954 
1955  /* Set it up */
1956  mesh_init(ms);
1957 
1958  /* Request interrupt */
1959  if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) {
1960  printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr);
1961  goto out_shutdown;
1962  }
1963 
1964  /* Add scsi host & scan */
1965  if (scsi_add_host(mesh_host, &mdev->ofdev.dev))
1966  goto out_release_irq;
1967  scsi_scan_host(mesh_host);
1968 
1969  return 0;
1970 
1971  out_release_irq:
1972  free_irq(ms->meshintr, ms);
1973  out_shutdown:
1974  /* shutdown & reset bus in case of error or macos can be confused
1975  * at reboot if the bus was set to synchronous mode already
1976  */
1977  mesh_shutdown(mdev);
1978  set_mesh_power(ms, 0);
1979  pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
1980  ms->dma_cmd_space, ms->dma_cmd_bus);
1981  out_unmap:
1982  iounmap(ms->dma);
1983  iounmap(ms->mesh);
1984  out_free:
1985  scsi_host_put(mesh_host);
1986  out_release:
1988 
1989  return -ENODEV;
1990 }
1991 
1992 static int mesh_remove(struct macio_dev *mdev)
1993 {
1994  struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1995  struct Scsi_Host *mesh_host = ms->host;
1996 
1997  scsi_remove_host(mesh_host);
1998 
1999  free_irq(ms->meshintr, ms);
2000 
2001  /* Reset scsi bus */
2002  mesh_shutdown(mdev);
2003 
2004  /* Shut down chip & termination */
2005  set_mesh_power(ms, 0);
2006 
2007  /* Unmap registers & dma controller */
2008  iounmap(ms->mesh);
2009  iounmap(ms->dma);
2010 
2011  /* Free DMA commands memory */
2012  pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
2013  ms->dma_cmd_space, ms->dma_cmd_bus);
2014 
2015  /* Release memory resources */
2017 
2018  scsi_host_put(mesh_host);
2019 
2020  return 0;
2021 }
2022 
2023 
2024 static struct of_device_id mesh_match[] =
2025 {
2026  {
2027  .name = "mesh",
2028  },
2029  {
2030  .type = "scsi",
2031  .compatible = "chrp,mesh0"
2032  },
2033  {},
2034 };
2035 MODULE_DEVICE_TABLE (of, mesh_match);
2036 
2037 static struct macio_driver mesh_driver =
2038 {
2039  .driver = {
2040  .name = "mesh",
2041  .owner = THIS_MODULE,
2042  .of_match_table = mesh_match,
2043  },
2044  .probe = mesh_probe,
2045  .remove = mesh_remove,
2046  .shutdown = mesh_shutdown,
2047 #ifdef CONFIG_PM
2048  .suspend = mesh_suspend,
2049  .resume = mesh_resume,
2050 #endif
2051 };
2052 
2053 
2054 static int __init init_mesh(void)
2055 {
2056 
2057  /* Calculate sync rate from module parameters */
2058  if (sync_rate > 10)
2059  sync_rate = 10;
2060  if (sync_rate > 0) {
2061  printk(KERN_INFO "mesh: configured for synchronous %d MB/s\n", sync_rate);
2062  mesh_sync_period = 1000 / sync_rate; /* ns */
2063  mesh_sync_offset = 15;
2064  } else
2065  printk(KERN_INFO "mesh: configured for asynchronous\n");
2066 
2067  return macio_register_driver(&mesh_driver);
2068 }
2069 
2070 static void __exit exit_mesh(void)
2071 {
2072  return macio_unregister_driver(&mesh_driver);
2073 }
2074 
2075 module_init(init_mesh);
2076 module_exit(exit_mesh);