21 #include <linux/module.h>
22 #include <linux/kernel.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
28 #include <linux/stat.h>
30 #include <linux/reboot.h>
34 #include <asm/pgtable.h>
38 #include <asm/processor.h>
39 #include <asm/machdep.h>
41 #include <asm/pci-bridge.h>
44 #include <scsi/scsi.h>
53 #define KERN_DEBUG KERN_WARNING
60 static int sync_rate = CONFIG_SCSI_MESH_SYNC_RATE;
61 static int sync_targets = 0xff;
62 static int resel_targets = 0xff;
63 static int debug_targets = 0;
64 static int init_reset_delay = CONFIG_SCSI_MESH_RESET_DELAY_MS;
69 MODULE_PARM_DESC(sync_targets,
"Bitmask of targets allowed to set synchronous");
71 MODULE_PARM_DESC(resel_targets,
"Bitmask of targets allowed to set disconnect");
77 static int mesh_sync_period = 100;
78 static int mesh_sync_offset = 0;
79 static unsigned char use_active_neg = 0;
81 #define ALLOW_SYNC(tgt) ((sync_targets >> (tgt)) & 1)
82 #define ALLOW_RESEL(tgt) ((resel_targets >> (tgt)) & 1)
83 #define ALLOW_DEBUG(tgt) ((debug_targets >> (tgt)) & 1)
84 #define DEBUG_TARGET(cmd) ((cmd) && ALLOW_DEBUG((cmd)->device->id))
89 #define NUM_DBG_EVENTS 13
185 static void mesh_done(
struct mesh_state *
ms,
int start_next);
199 static inline u32 readtb(
void)
205 asm (
"mftb %0" :
"=r" (
tb) : );
217 tlp = &tp->log[tp->log_ix];
218 slp = &ms->log[ms->log_ix];
222 tlp->
bs0 = ms->
mesh->bus_status0;
223 tlp->
bs1 = ms->
mesh->bus_status1;
245 i = tp->log_ix - tp->n_log;
260 }
while (i != tp->log_ix);
270 i = ms->log_ix - ms->n_log;
285 }
while (i != ms->log_ix);
290 static inline void dlog(
struct mesh_state *ms,
char *fmt,
int a)
294 static inline void dumpslog(
struct mesh_state *ms)
299 #define MKWORD(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
305 volatile struct dbdma_regs __iomem *
md = ms->
dma;
312 "exc=%2x err=%2x im=%2x int=%2x sp=%2x\n",
325 for (t = 0; t < 8; ++
t) {
377 if (init_reset_delay) {
420 printk(
" use_sg=%d buffer=%p bufflen=%u\n",
421 scsi_sg_count(cmd), scsi_sglist(cmd), scsi_bufflen(cmd));
425 panic(
"mesh: double DMA start !\n");
435 ms->
tgts[
id].saved_ptr = 0;
440 dlog(ms,
"start cmd=%x", (
int) cmd);
444 dlog(ms,
"about to arb, intr/exc/err/fc=%.8x",
456 dlog(ms,
"busy b4 arb, intr/exc/err/fc=%.8x",
459 for (t = 100; t > 0; --
t) {
463 dlog(ms,
"intr b4 arb, intr/exc/err/fc=%.8x",
501 dlog(ms,
"intr after disresel, intr/exc/err/fc=%.8x",
507 dlog(ms,
"after intr after disresel, intr/exc/err/fc=%.8x",
514 for (t = 230; t > 0; --
t) {
519 dlog(ms,
"after arb, intr/exc/err/fc=%.8x",
524 dlog(ms,
"resel? after arb, intr/exc/err/fc=%.8x",
535 dlog(ms,
"tried reset after arb, intr/exc/err/fc=%.8x",
537 #ifndef MESH_MULTIPLE_HOSTS
541 " to reselection!\n");
584 mesh_start_cmd(ms, cmd);
588 static void mesh_done(
struct mesh_state *ms,
int start_next)
605 if ((cmd->
cmnd[0] == 0 || cmd->
cmnd[0] == 0x12 || cmd->
cmnd[0] == 3)
606 && cmd->request_buffer != 0) {
607 unsigned char *
b = cmd->request_buffer;
609 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
614 mesh_completed(ms, cmd);
618 mesh_flush_io(ms->
mesh);
625 static inline void add_sdtr_msg(
struct mesh_state *ms)
632 ms->
msgout[i+3] = mesh_sync_period/4;
662 tr = (ms->
clk_freq + 250000) / 500000;
665 v = (v + 99999) / 100000 - 2;
668 tr = ((ms->
clk_freq / (v + 2)) + 199999) / 200000;
678 static void start_phase(
struct mesh_state *ms)
682 volatile struct dbdma_regs
__iomem *md = ms->
dma;
686 dlog(ms,
"start_phase nmo/exc/fc/seq = %.8x",
732 dlog(ms,
"bus0 was %.2x explicitly asserting ATN", mr->
bus_status0);
739 dlog(ms,
"hace: after explicit ATN bus0=%.2x",mr->
bus_status0);
778 for (i = 0; i < 6; ++
i)
785 set_dma_cmds(ms, cmd);
810 dlog(ms,
"enbresel intr/exc/err/fc=%.8x",
823 static inline void get_msgin(
struct mesh_state *ms)
837 static inline int msgin_length(
struct mesh_state *ms)
847 }
else if (0x20 <= b && b <= 0x2f) {
904 static int mesh_aborted_resels;
905 mesh_aborted_resels++;
912 dlog(ms,
"extra resel err/exc/fc = %.6x",
934 dlog(ms,
"reseldata %x", b);
936 for (t = 0; t < 8; ++
t)
937 if ((b & (1 << t)) != 0 && t != ms->
host->this_id)
939 if (b != (1 << t) + (1 << ms->
host->this_id)) {
963 dlog(ms,
"resel prev tgt=%d", prev);
982 dlog(ms,
"abort", 0);
985 static void handle_reset(
struct mesh_state *ms)
992 for (tgt = 0; tgt < 8; ++tgt) {
997 mesh_completed(ms, cmd);
1006 mesh_completed(ms, cmd);
1020 unsigned long flags;
1026 spin_unlock_irqrestore(dev->
host_lock, flags);
1030 static void handle_error(
struct mesh_state *ms)
1038 dlog(ms,
"error err/exc/fc/cl=%.8x",
1043 "waiting for end...");
1093 if (exc & EXC_RESELECTED) {
1096 static int mesh_resel_seqerr;
1097 mesh_resel_seqerr++;
1102 static int mesh_phasemm_seqerr;
1103 mesh_phasemm_seqerr++;
1131 if (exc & EXC_RESELECTED) {
1132 static int mesh_resel_exc;
1156 static void handle_msgin(
struct mesh_state *ms)
1164 code = ms->
msgin[0];
1171 dlog(ms,
"msgin msg=%.8x",
1176 if (ms->
n_msgin < msgin_length(ms))
1184 switch (ms->
msgin[2]) {
1187 + (ms->
msgin[4] << 16) + (ms->
msgin[5] << 8);
1233 "(%d != %d) on reselection from "
1259 int i, dma_cmd, total, off, dtot;
1261 struct dbdma_cmd *dcmds;
1264 OUTPUT_MORE: INPUT_MORE;
1270 cmd->
SCp.this_residual = scsi_bufflen(cmd);
1284 if (off >= dma_len) {
1288 if (dma_len > 0xffff)
1289 panic(
"mesh: scatterlist element >= 64k");
1290 st_le16(&dcmds->req_count, dma_len - off);
1291 st_le16(&dcmds->command, dma_cmd);
1292 st_le32(&dcmds->phy_addr, dma_addr + off);
1293 dcmds->xfer_status = 0;
1295 dtot += dma_len - off;
1303 static char mesh_extra_buf[64];
1305 dtot =
sizeof(mesh_extra_buf);
1306 st_le16(&dcmds->req_count, dtot);
1307 st_le32(&dcmds->phy_addr,
virt_to_phys(mesh_extra_buf));
1308 dcmds->xfer_status = 0;
1311 dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
1312 st_le16(&dcmds[-1].
command, dma_cmd);
1313 memset(dcmds, 0,
sizeof(*dcmds));
1314 st_le16(&dcmds->command, DBDMA_STOP);
1320 volatile struct dbdma_regs
__iomem *md = ms->
dma;
1336 dlog(ms,
"halt_dma fc/count=%.6x",
1343 dlog(ms,
"data_ptr %x", ms->
data_ptr);
1352 }
else if (cmd && scsi_bufflen(cmd) &&
1353 ms->
data_ptr > scsi_bufflen(cmd)) {
1355 "data_ptr=%x total=%x goes_out=%d\n",
1363 static void phase_mismatch(
struct mesh_state *ms)
1368 dlog(ms,
"phasemm ch/cl/seq/fc=%.8x",
1424 "mesh: no msg to repeat\n");
1442 static void cmd_complete(
struct mesh_state *ms)
1449 dlog(ms,
"cmd_complete fc=%x", mr->
fifo_count);
1461 n = msgin_length(ms);
1497 dlog(ms,
"last_mbyte err/exc/fc/cl=%.8x",
1539 switch (ms->
phase) {
1545 dlog(ms,
"Selecting phase at command completion",0);
1547 (cmd? cmd->
device->lun: 0));
1570 dlog(ms,
"impatient for req", ms->
n_msgout);
1666 ms->phase, ms->msgphase);
1669 dlog(ms,
"interrupt intr/err/exc/seq=%.8x",
1686 static int mesh_abort(
struct scsi_cmnd *cmd)
1692 dumplog(ms, cmd->
device->id);
1703 static int mesh_host_reset(
struct scsi_cmnd *cmd)
1707 volatile struct dbdma_regs
__iomem *md = ms->
dma;
1708 unsigned long flags;
1735 spin_unlock_irqrestore(ms->
host->host_lock,
flags);
1741 if (!machine_is(powermac))
1744 pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->
mdev), 0, 1);
1747 pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->
mdev), 0, 0);
1757 unsigned long flags;
1759 switch (mesg.
event) {
1773 spin_unlock_irqrestore(ms->
host->host_lock, flags);
1778 spin_unlock_irqrestore(ms->
host->host_lock, flags);
1780 set_mesh_power(ms, 0);
1785 static int mesh_resume(
struct macio_dev *mdev)
1788 unsigned long flags;
1793 set_mesh_power(ms, 1);
1797 spin_unlock_irqrestore(ms->
host->host_lock, flags);
1811 static int mesh_shutdown(
struct macio_dev *mdev)
1815 unsigned long flags;
1826 spin_unlock_irqrestore(ms->
host->host_lock, flags);
1832 .proc_name =
"mesh",
1834 .queuecommand = mesh_queue,
1835 .eh_abort_handler = mesh_abort,
1836 .eh_host_reset_handler = mesh_host_reset,
1846 struct device_node *mesh = macio_get_of_node(mdev);
1852 void *dma_cmd_space;
1855 switch (mdev->bus->chip->type) {
1856 case macio_heathrow:
1858 case macio_paddington:
1865 if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
1867 " (got %d,%d)\n", macio_resource_count(mdev),
1868 macio_irq_count(mdev));
1877 if (mesh_host ==
NULL) {
1883 #if !defined(MODULE)
1887 mesh_host->
base = macio_resource_start(mdev, 0);
1888 mesh_host->
irq = macio_irq(mdev, 0);
1889 ms = (
struct mesh_state *) mesh_host->hostdata;
1890 macio_set_drvdata(mdev, ms);
1891 ms->
host = mesh_host;
1895 ms->
mesh =
ioremap(macio_resource_start(mdev, 0), 0x1000);
1900 ms->
dma =
ioremap(macio_resource_start(mdev, 1), 0x1000);
1908 ms->
dmaintr = macio_irq(mdev, 1);
1921 if (dma_cmd_space ==
NULL) {
1927 ms->
dma_cmds = (
struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space);
1930 - (
unsigned long)dma_cmd_space;
1932 for (tgt = 0; tgt < 8; ++tgt) {
1948 minper = 1000000000 / (ms->
clk_freq / 5);
1949 if (mesh_sync_period < minper)
1950 mesh_sync_period = minper;
1953 set_mesh_power(ms, 1);
1965 if (scsi_add_host(mesh_host, &mdev->ofdev.dev))
1966 goto out_release_irq;
1977 mesh_shutdown(mdev);
1978 set_mesh_power(ms, 0);
1992 static int mesh_remove(
struct macio_dev *mdev)
2002 mesh_shutdown(mdev);
2005 set_mesh_power(ms, 0);
2031 .compatible =
"chrp,mesh0"
2037 static struct macio_driver mesh_driver =
2042 .of_match_table = mesh_match,
2044 .probe = mesh_probe,
2045 .remove = mesh_remove,
2046 .shutdown = mesh_shutdown,
2048 .suspend = mesh_suspend,
2049 .resume = mesh_resume,
2054 static int __init init_mesh(
void)
2060 if (sync_rate > 0) {
2061 printk(
KERN_INFO "mesh: configured for synchronous %d MB/s\n", sync_rate);
2062 mesh_sync_period = 1000 / sync_rate;
2063 mesh_sync_offset = 15;
2070 static void __exit exit_mesh(
void)