Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sata_sx4.c
Go to the documentation of this file.
1 /*
2  * sata_sx4.c - Promise SATA
3  *
4  * Maintained by: Jeff Garzik <[email protected]>
5  * Please ALWAYS copy [email protected]
6  * on emails.
7  *
8  * Copyright 2003-2004 Red Hat, Inc.
9  *
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; see the file COPYING. If not, write to
23  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24  *
25  *
26  * libata documentation is available via 'make {ps|pdf}docs',
27  * as Documentation/DocBook/libata.*
28  *
29  * Hardware documentation available under NDA.
30  *
31  */
32 
33 /*
34  Theory of operation
35  -------------------
36 
37  The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
38  engine, DIMM memory, and four ATA engines (one per SATA port).
39  Data is copied to/from DIMM memory by the HDMA engine, before
40  handing off to one (or more) of the ATA engines. The ATA
41  engines operate solely on DIMM memory.
42 
43  The SX4 behaves like a PATA chip, with no SATA controls or
44  knowledge whatsoever, leading to the presumption that
45  PATA<->SATA bridges exist on SX4 boards, external to the
46  PDC20621 chip itself.
47 
48  The chip is quite capable, supporting an XOR engine and linked
49  hardware commands (permits a string to transactions to be
50  submitted and waited-on as a single unit), and an optional
51  microprocessor.
52 
53  The limiting factor is largely software. This Linux driver was
54  written to multiplex the single HDMA engine to copy disk
55  transactions into a fixed DIMM memory space, from where an ATA
56  engine takes over. As a result, each WRITE looks like this:
57 
58  submit HDMA packet to hardware
59  hardware copies data from system memory to DIMM
60  hardware raises interrupt
61 
62  submit ATA packet to hardware
63  hardware executes ATA WRITE command, w/ data in DIMM
64  hardware raises interrupt
65 
66  and each READ looks like this:
67 
68  submit ATA packet to hardware
69  hardware executes ATA READ command, w/ data in DIMM
70  hardware raises interrupt
71 
72  submit HDMA packet to hardware
73  hardware copies data from DIMM to system memory
74  hardware raises interrupt
75 
76  This is a very slow, lock-step way of doing things that can
77  certainly be improved by motivated kernel hackers.
78 
79  */
80 
81 #include <linux/kernel.h>
82 #include <linux/module.h>
83 #include <linux/pci.h>
84 #include <linux/slab.h>
85 #include <linux/init.h>
86 #include <linux/blkdev.h>
87 #include <linux/delay.h>
88 #include <linux/interrupt.h>
89 #include <linux/device.h>
90 #include <scsi/scsi_host.h>
91 #include <scsi/scsi_cmnd.h>
92 #include <linux/libata.h>
93 #include "sata_promise.h"
94 
95 #define DRV_NAME "sata_sx4"
96 #define DRV_VERSION "0.12"
97 
98 
99 enum {
102 
103  PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
104 
105  PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
106  PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
107  PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
108  PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
109 
110  PDC_CTLSTAT = 0x60, /* IDEn control / status */
111 
115  PDC_20621_PAGE_SIZE = (32 * 1024),
116 
117  /* chosen, not constant, values; we design our own DIMM mem map */
118  PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
119  PDC_20621_DIMM_BASE = 0x00200000,
120  PDC_20621_DIMM_DATA = (64 * 1024),
121  PDC_DIMM_DATA_STEP = (256 * 1024),
122  PDC_DIMM_WINDOW_STEP = (8 * 1024),
123  PDC_DIMM_HOST_PRD = (6 * 1024),
124  PDC_DIMM_HOST_PKT = (128 * 0),
125  PDC_DIMM_HPKT_PRD = (128 * 1),
126  PDC_DIMM_ATA_PKT = (128 * 2),
127  PDC_DIMM_APKT_PRD = (128 * 3),
133 
134  PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
135 
136  PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
137  (1<<23),
138 
139  board_20621 = 0, /* FastTrak S150 SX4 */
140 
141  PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
142  PDC_RESET = (1 << 11), /* HDMA/ATA reset */
143  PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
144 
147 
155  PDC_I2C_WRITE = 0, /* master -> slave */
156  PDC_I2C_READ = (1 << 6), /* master <- slave */
157  PDC_I2C_START = (1 << 7), /* start I2C proto */
158  PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
159  PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
160  PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
182  PCI_PLL_INIT = 0x8A531824,
183  PCI_X_TCOUNT = 0xEE1E5CFF,
184 
185  /* PDC_TIME_CONTROL bits */
186  PDC_TIMER_BUZZER = (1 << 10),
187  PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
188  PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
189  PDC_TIMER_ENABLE = (1 << 7),
190  PDC_TIMER_MASK_INT = (1 << 5),
191  PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
195 };
196 
197 #define ECC_ERASE_BUF_SZ (128 * 1024)
198 
199 struct pdc_port_priv {
201  u8 *pkt;
203 };
204 
206  unsigned int doing_hdma;
207  unsigned int hdma_prod;
208  unsigned int hdma_cons;
209  struct {
211  unsigned int seq;
212  unsigned long pkt_ofs;
213  } hdma[32];
214 };
215 
216 
217 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
218 static void pdc_error_handler(struct ata_port *ap);
219 static void pdc_freeze(struct ata_port *ap);
220 static void pdc_thaw(struct ata_port *ap);
221 static int pdc_port_start(struct ata_port *ap);
222 static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
223 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
224 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
225 static unsigned int pdc20621_dimm_init(struct ata_host *host);
226 static int pdc20621_detect_dimm(struct ata_host *host);
227 static unsigned int pdc20621_i2c_read(struct ata_host *host,
228  u32 device, u32 subaddr, u32 *pdata);
229 static int pdc20621_prog_dimm0(struct ata_host *host);
230 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
231 #ifdef ATA_VERBOSE_DEBUG
232 static void pdc20621_get_from_dimm(struct ata_host *host,
233  void *psource, u32 offset, u32 size);
234 #endif
235 static void pdc20621_put_to_dimm(struct ata_host *host,
236  void *psource, u32 offset, u32 size);
237 static void pdc20621_irq_clear(struct ata_port *ap);
238 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
239 static int pdc_softreset(struct ata_link *link, unsigned int *class,
240  unsigned long deadline);
241 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
242 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
243 
244 
245 static struct scsi_host_template pdc_sata_sht = {
247  .sg_tablesize = LIBATA_MAX_PRD,
248  .dma_boundary = ATA_DMA_BOUNDARY,
249 };
250 
251 /* TODO: inherit from base port_ops after converting to new EH */
252 static struct ata_port_operations pdc_20621_ops = {
253  .inherits = &ata_sff_port_ops,
254 
255  .check_atapi_dma = pdc_check_atapi_dma,
256  .qc_prep = pdc20621_qc_prep,
257  .qc_issue = pdc20621_qc_issue,
258 
259  .freeze = pdc_freeze,
260  .thaw = pdc_thaw,
261  .softreset = pdc_softreset,
262  .error_handler = pdc_error_handler,
263  .lost_interrupt = ATA_OP_NULL,
264  .post_internal_cmd = pdc_post_internal_cmd,
265 
266  .port_start = pdc_port_start,
267 
268  .sff_tf_load = pdc_tf_load_mmio,
269  .sff_exec_command = pdc_exec_command_mmio,
270  .sff_irq_clear = pdc20621_irq_clear,
271 };
272 
273 static const struct ata_port_info pdc_port_info[] = {
274  /* board_20621 */
275  {
276  .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
278  .pio_mask = ATA_PIO4,
279  .mwdma_mask = ATA_MWDMA2,
280  .udma_mask = ATA_UDMA6,
281  .port_ops = &pdc_20621_ops,
282  },
283 
284 };
285 
286 static const struct pci_device_id pdc_sata_pci_tbl[] = {
287  { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
288 
289  { } /* terminate list */
290 };
291 
292 static struct pci_driver pdc_sata_pci_driver = {
293  .name = DRV_NAME,
294  .id_table = pdc_sata_pci_tbl,
295  .probe = pdc_sata_init_one,
296  .remove = ata_pci_remove_one,
297 };
298 
299 
300 static int pdc_port_start(struct ata_port *ap)
301 {
302  struct device *dev = ap->host->dev;
303  struct pdc_port_priv *pp;
304 
305  pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
306  if (!pp)
307  return -ENOMEM;
308 
309  pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
310  if (!pp->pkt)
311  return -ENOMEM;
312 
313  ap->private_data = pp;
314 
315  return 0;
316 }
317 
318 static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
319  unsigned int portno,
320  unsigned int total_len)
321 {
322  u32 addr;
323  unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
324  __le32 *buf32 = (__le32 *) buf;
325 
326  /* output ATA packet S/G table */
329  VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
330  buf32[dw] = cpu_to_le32(addr);
331  buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
332 
333  VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
335  (PDC_DIMM_WINDOW_STEP * portno) +
337  buf32[dw], buf32[dw + 1]);
338 }
339 
340 static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
341  unsigned int portno,
342  unsigned int total_len)
343 {
344  u32 addr;
345  unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
346  __le32 *buf32 = (__le32 *) buf;
347 
348  /* output Host DMA packet S/G table */
351 
352  buf32[dw] = cpu_to_le32(addr);
353  buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
354 
355  VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
357  (PDC_DIMM_WINDOW_STEP * portno) +
359  buf32[dw], buf32[dw + 1]);
360 }
361 
362 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
363  unsigned int devno, u8 *buf,
364  unsigned int portno)
365 {
366  unsigned int i, dw;
367  __le32 *buf32 = (__le32 *) buf;
368  u8 dev_reg;
369 
370  unsigned int dimm_sg = PDC_20621_DIMM_BASE +
373  VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
374 
375  i = PDC_DIMM_ATA_PKT;
376 
377  /*
378  * Set up ATA packet
379  */
380  if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
381  buf[i++] = PDC_PKT_READ;
382  else if (tf->protocol == ATA_PROT_NODATA)
383  buf[i++] = PDC_PKT_NODATA;
384  else
385  buf[i++] = 0;
386  buf[i++] = 0; /* reserved */
387  buf[i++] = portno + 1; /* seq. id */
388  buf[i++] = 0xff; /* delay seq. id */
389 
390  /* dimm dma S/G, and next-pkt */
391  dw = i >> 2;
392  if (tf->protocol == ATA_PROT_NODATA)
393  buf32[dw] = 0;
394  else
395  buf32[dw] = cpu_to_le32(dimm_sg);
396  buf32[dw + 1] = 0;
397  i += 8;
398 
399  if (devno == 0)
400  dev_reg = ATA_DEVICE_OBS;
401  else
402  dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
403 
404  /* select device */
405  buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
406  buf[i++] = dev_reg;
407 
408  /* device control register */
409  buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
410  buf[i++] = tf->ctl;
411 
412  return i;
413 }
414 
415 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
416  unsigned int portno)
417 {
418  unsigned int dw;
419  u32 tmp;
420  __le32 *buf32 = (__le32 *) buf;
421 
422  unsigned int host_sg = PDC_20621_DIMM_BASE +
425  unsigned int dimm_sg = PDC_20621_DIMM_BASE +
428  VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
429  VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
430 
431  dw = PDC_DIMM_HOST_PKT >> 2;
432 
433  /*
434  * Set up Host DMA packet
435  */
436  if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
437  tmp = PDC_PKT_READ;
438  else
439  tmp = 0;
440  tmp |= ((portno + 1 + 4) << 16); /* seq. id */
441  tmp |= (0xff << 24); /* delay seq. id */
442  buf32[dw + 0] = cpu_to_le32(tmp);
443  buf32[dw + 1] = cpu_to_le32(host_sg);
444  buf32[dw + 2] = cpu_to_le32(dimm_sg);
445  buf32[dw + 3] = 0;
446 
447  VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
450  buf32[dw + 0],
451  buf32[dw + 1],
452  buf32[dw + 2],
453  buf32[dw + 3]);
454 }
455 
456 static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
457 {
458  struct scatterlist *sg;
459  struct ata_port *ap = qc->ap;
460  struct pdc_port_priv *pp = ap->private_data;
461  void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
462  void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
463  unsigned int portno = ap->port_no;
464  unsigned int i, si, idx, total_len = 0, sgt_len;
465  __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
466 
467  WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
468 
469  VPRINTK("ata%u: ENTER\n", ap->print_id);
470 
471  /* hard-code chip #0 */
472  mmio += PDC_CHIP0_OFS;
473 
474  /*
475  * Build S/G table
476  */
477  idx = 0;
478  for_each_sg(qc->sg, sg, qc->n_elem, si) {
479  buf[idx++] = cpu_to_le32(sg_dma_address(sg));
480  buf[idx++] = cpu_to_le32(sg_dma_len(sg));
481  total_len += sg_dma_len(sg);
482  }
483  buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
484  sgt_len = idx * 4;
485 
486  /*
487  * Build ATA, host DMA packets
488  */
489  pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
490  pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
491 
492  pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
493  i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
494 
495  if (qc->tf.flags & ATA_TFLAG_LBA48)
496  i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
497  else
498  i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
499 
500  pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
501 
502  /* copy three S/G tables and two packets to DIMM MMIO window */
503  memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
505  memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
507  &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
508 
509  /* force host FIFO dump */
510  writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
511 
512  readl(dimm_mmio); /* MMIO PCI posting flush */
513 
514  VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
515 }
516 
517 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
518 {
519  struct ata_port *ap = qc->ap;
520  struct pdc_port_priv *pp = ap->private_data;
521  void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
522  void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
523  unsigned int portno = ap->port_no;
524  unsigned int i;
525 
526  VPRINTK("ata%u: ENTER\n", ap->print_id);
527 
528  /* hard-code chip #0 */
529  mmio += PDC_CHIP0_OFS;
530 
531  i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
532 
533  if (qc->tf.flags & ATA_TFLAG_LBA48)
534  i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
535  else
536  i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
537 
538  pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
539 
540  /* copy three S/G tables and two packets to DIMM MMIO window */
541  memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
543 
544  /* force host FIFO dump */
545  writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
546 
547  readl(dimm_mmio); /* MMIO PCI posting flush */
548 
549  VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
550 }
551 
552 static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
553 {
554  switch (qc->tf.protocol) {
555  case ATA_PROT_DMA:
556  pdc20621_dma_prep(qc);
557  break;
558  case ATA_PROT_NODATA:
559  pdc20621_nodata_prep(qc);
560  break;
561  default:
562  break;
563  }
564 }
565 
566 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
567  unsigned int seq,
568  u32 pkt_ofs)
569 {
570  struct ata_port *ap = qc->ap;
571  struct ata_host *host = ap->host;
572  void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
573 
574  /* hard-code chip #0 */
575  mmio += PDC_CHIP0_OFS;
576 
577  writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
578  readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
579 
580  writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
581  readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
582 }
583 
584 static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
585  unsigned int seq,
586  u32 pkt_ofs)
587 {
588  struct ata_port *ap = qc->ap;
589  struct pdc_host_priv *pp = ap->host->private_data;
590  unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
591 
592  if (!pp->doing_hdma) {
593  __pdc20621_push_hdma(qc, seq, pkt_ofs);
594  pp->doing_hdma = 1;
595  return;
596  }
597 
598  pp->hdma[idx].qc = qc;
599  pp->hdma[idx].seq = seq;
600  pp->hdma[idx].pkt_ofs = pkt_ofs;
601  pp->hdma_prod++;
602 }
603 
604 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
605 {
606  struct ata_port *ap = qc->ap;
607  struct pdc_host_priv *pp = ap->host->private_data;
608  unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
609 
610  /* if nothing on queue, we're done */
611  if (pp->hdma_prod == pp->hdma_cons) {
612  pp->doing_hdma = 0;
613  return;
614  }
615 
616  __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
617  pp->hdma[idx].pkt_ofs);
618  pp->hdma_cons++;
619 }
620 
621 #ifdef ATA_VERBOSE_DEBUG
622 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
623 {
624  struct ata_port *ap = qc->ap;
625  unsigned int port_no = ap->port_no;
626  void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
627 
628  dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
629  dimm_mmio += PDC_DIMM_HOST_PKT;
630 
631  printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
632  printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
633  printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
634  printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
635 }
636 #else
637 static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
638 #endif /* ATA_VERBOSE_DEBUG */
639 
640 static void pdc20621_packet_start(struct ata_queued_cmd *qc)
641 {
642  struct ata_port *ap = qc->ap;
643  struct ata_host *host = ap->host;
644  unsigned int port_no = ap->port_no;
645  void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
646  unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
647  u8 seq = (u8) (port_no + 1);
648  unsigned int port_ofs;
649 
650  /* hard-code chip #0 */
651  mmio += PDC_CHIP0_OFS;
652 
653  VPRINTK("ata%u: ENTER\n", ap->print_id);
654 
655  wmb(); /* flush PRD, pkt writes */
656 
658 
659  /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
660  if (rw && qc->tf.protocol == ATA_PROT_DMA) {
661  seq += 4;
662 
663  pdc20621_dump_hdma(qc);
664  pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
665  VPRINTK("queued ofs 0x%x (%u), seq %u\n",
666  port_ofs + PDC_DIMM_HOST_PKT,
667  port_ofs + PDC_DIMM_HOST_PKT,
668  seq);
669  } else {
670  writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
671  readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
672 
673  writel(port_ofs + PDC_DIMM_ATA_PKT,
674  ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
675  readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
676  VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
677  port_ofs + PDC_DIMM_ATA_PKT,
678  port_ofs + PDC_DIMM_ATA_PKT,
679  seq);
680  }
681 }
682 
683 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
684 {
685  switch (qc->tf.protocol) {
686  case ATA_PROT_NODATA:
687  if (qc->tf.flags & ATA_TFLAG_POLLING)
688  break;
689  /*FALLTHROUGH*/
690  case ATA_PROT_DMA:
691  pdc20621_packet_start(qc);
692  return 0;
693 
694  case ATAPI_PROT_DMA:
695  BUG();
696  break;
697 
698  default:
699  break;
700  }
701 
702  return ata_sff_qc_issue(qc);
703 }
704 
705 static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
706  struct ata_queued_cmd *qc,
707  unsigned int doing_hdma,
708  void __iomem *mmio)
709 {
710  unsigned int port_no = ap->port_no;
711  unsigned int port_ofs =
713  u8 status;
714  unsigned int handled = 0;
715 
716  VPRINTK("ENTER\n");
717 
718  if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
719  (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
720 
721  /* step two - DMA from DIMM to host */
722  if (doing_hdma) {
723  VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
724  readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
725  /* get drive status; clear intr; complete txn */
726  qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
727  ata_qc_complete(qc);
728  pdc20621_pop_hdma(qc);
729  }
730 
731  /* step one - exec ATA command */
732  else {
733  u8 seq = (u8) (port_no + 1 + 4);
734  VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
735  readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
736 
737  /* submit hdma pkt */
738  pdc20621_dump_hdma(qc);
739  pdc20621_push_hdma(qc, seq,
740  port_ofs + PDC_DIMM_HOST_PKT);
741  }
742  handled = 1;
743 
744  } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
745 
746  /* step one - DMA from host to DIMM */
747  if (doing_hdma) {
748  u8 seq = (u8) (port_no + 1);
749  VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
750  readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
751 
752  /* submit ata pkt */
753  writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
754  readl(mmio + PDC_20621_SEQCTL + (seq * 4));
755  writel(port_ofs + PDC_DIMM_ATA_PKT,
756  ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
757  readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
758  }
759 
760  /* step two - execute ATA command */
761  else {
762  VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
763  readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
764  /* get drive status; clear intr; complete txn */
765  qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
766  ata_qc_complete(qc);
767  pdc20621_pop_hdma(qc);
768  }
769  handled = 1;
770 
771  /* command completion, but no data xfer */
772  } else if (qc->tf.protocol == ATA_PROT_NODATA) {
773 
774  status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
775  DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
776  qc->err_mask |= ac_err_mask(status);
777  ata_qc_complete(qc);
778  handled = 1;
779 
780  } else {
781  ap->stats.idle_irq++;
782  }
783 
784  return handled;
785 }
786 
787 static void pdc20621_irq_clear(struct ata_port *ap)
788 {
789  ioread8(ap->ioaddr.status_addr);
790 }
791 
792 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
793 {
794  struct ata_host *host = dev_instance;
795  struct ata_port *ap;
796  u32 mask = 0;
797  unsigned int i, tmp, port_no;
798  unsigned int handled = 0;
799  void __iomem *mmio_base;
800 
801  VPRINTK("ENTER\n");
802 
803  if (!host || !host->iomap[PDC_MMIO_BAR]) {
804  VPRINTK("QUICK EXIT\n");
805  return IRQ_NONE;
806  }
807 
808  mmio_base = host->iomap[PDC_MMIO_BAR];
809 
810  /* reading should also clear interrupts */
811  mmio_base += PDC_CHIP0_OFS;
812  mask = readl(mmio_base + PDC_20621_SEQMASK);
813  VPRINTK("mask == 0x%x\n", mask);
814 
815  if (mask == 0xffffffff) {
816  VPRINTK("QUICK EXIT 2\n");
817  return IRQ_NONE;
818  }
819  mask &= 0xffff; /* only 16 tags possible */
820  if (!mask) {
821  VPRINTK("QUICK EXIT 3\n");
822  return IRQ_NONE;
823  }
824 
825  spin_lock(&host->lock);
826 
827  for (i = 1; i < 9; i++) {
828  port_no = i - 1;
829  if (port_no > 3)
830  port_no -= 4;
831  if (port_no >= host->n_ports)
832  ap = NULL;
833  else
834  ap = host->ports[port_no];
835  tmp = mask & (1 << i);
836  VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
837  if (tmp && ap) {
838  struct ata_queued_cmd *qc;
839 
840  qc = ata_qc_from_tag(ap, ap->link.active_tag);
841  if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
842  handled += pdc20621_host_intr(ap, qc, (i > 4),
843  mmio_base);
844  }
845  }
846 
847  spin_unlock(&host->lock);
848 
849  VPRINTK("mask == 0x%x\n", mask);
850 
851  VPRINTK("EXIT\n");
852 
853  return IRQ_RETVAL(handled);
854 }
855 
856 static void pdc_freeze(struct ata_port *ap)
857 {
858  void __iomem *mmio = ap->ioaddr.cmd_addr;
859  u32 tmp;
860 
861  /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
862 
863  tmp = readl(mmio + PDC_CTLSTAT);
864  tmp |= PDC_MASK_INT;
865  tmp &= ~PDC_DMA_ENABLE;
866  writel(tmp, mmio + PDC_CTLSTAT);
867  readl(mmio + PDC_CTLSTAT); /* flush */
868 }
869 
870 static void pdc_thaw(struct ata_port *ap)
871 {
872  void __iomem *mmio = ap->ioaddr.cmd_addr;
873  u32 tmp;
874 
875  /* FIXME: start HDMA engine, if zero ATA engines running */
876 
877  /* clear IRQ */
878  ioread8(ap->ioaddr.status_addr);
879 
880  /* turn IRQ back on */
881  tmp = readl(mmio + PDC_CTLSTAT);
882  tmp &= ~PDC_MASK_INT;
883  writel(tmp, mmio + PDC_CTLSTAT);
884  readl(mmio + PDC_CTLSTAT); /* flush */
885 }
886 
887 static void pdc_reset_port(struct ata_port *ap)
888 {
889  void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
890  unsigned int i;
891  u32 tmp;
892 
893  /* FIXME: handle HDMA copy engine */
894 
895  for (i = 11; i > 0; i--) {
896  tmp = readl(mmio);
897  if (tmp & PDC_RESET)
898  break;
899 
900  udelay(100);
901 
902  tmp |= PDC_RESET;
903  writel(tmp, mmio);
904  }
905 
906  tmp &= ~PDC_RESET;
907  writel(tmp, mmio);
908  readl(mmio); /* flush */
909 }
910 
911 static int pdc_softreset(struct ata_link *link, unsigned int *class,
912  unsigned long deadline)
913 {
914  pdc_reset_port(link->ap);
915  return ata_sff_softreset(link, class, deadline);
916 }
917 
918 static void pdc_error_handler(struct ata_port *ap)
919 {
920  if (!(ap->pflags & ATA_PFLAG_FROZEN))
921  pdc_reset_port(ap);
922 
924 }
925 
926 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
927 {
928  struct ata_port *ap = qc->ap;
929 
930  /* make DMA engine forget about the failed command */
931  if (qc->flags & ATA_QCFLAG_FAILED)
932  pdc_reset_port(ap);
933 }
934 
935 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
936 {
937  u8 *scsicmd = qc->scsicmd->cmnd;
938  int pio = 1; /* atapi dma off by default */
939 
940  /* Whitelist commands that may use DMA. */
941  switch (scsicmd[0]) {
942  case WRITE_12:
943  case WRITE_10:
944  case WRITE_6:
945  case READ_12:
946  case READ_10:
947  case READ_6:
948  case 0xad: /* READ_DVD_STRUCTURE */
949  case 0xbe: /* READ_CD */
950  pio = 0;
951  }
952  /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
953  if (scsicmd[0] == WRITE_10) {
954  unsigned int lba =
955  (scsicmd[2] << 24) |
956  (scsicmd[3] << 16) |
957  (scsicmd[4] << 8) |
958  scsicmd[5];
959  if (lba >= 0xFFFF4FA2)
960  pio = 1;
961  }
962  return pio;
963 }
964 
965 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
966 {
967  WARN_ON(tf->protocol == ATA_PROT_DMA ||
968  tf->protocol == ATAPI_PROT_DMA);
969  ata_sff_tf_load(ap, tf);
970 }
971 
972 
973 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
974 {
975  WARN_ON(tf->protocol == ATA_PROT_DMA ||
976  tf->protocol == ATAPI_PROT_DMA);
977  ata_sff_exec_command(ap, tf);
978 }
979 
980 
981 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
982 {
983  port->cmd_addr = base;
984  port->data_addr = base;
985  port->feature_addr =
986  port->error_addr = base + 0x4;
987  port->nsect_addr = base + 0x8;
988  port->lbal_addr = base + 0xc;
989  port->lbam_addr = base + 0x10;
990  port->lbah_addr = base + 0x14;
991  port->device_addr = base + 0x18;
992  port->command_addr =
993  port->status_addr = base + 0x1c;
994  port->altstatus_addr =
995  port->ctl_addr = base + 0x38;
996 }
997 
998 
999 #ifdef ATA_VERBOSE_DEBUG
1000 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
1001  u32 offset, u32 size)
1002 {
1003  u32 window_size;
1004  u16 idx;
1005  u8 page_mask;
1006  long dist;
1007  void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1008  void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1009 
1010  /* hard-code chip #0 */
1011  mmio += PDC_CHIP0_OFS;
1012 
1013  page_mask = 0x00;
1014  window_size = 0x2000 * 4; /* 32K byte uchar size */
1015  idx = (u16) (offset / window_size);
1016 
1017  writel(0x01, mmio + PDC_GENERAL_CTLR);
1018  readl(mmio + PDC_GENERAL_CTLR);
1019  writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1020  readl(mmio + PDC_DIMM_WINDOW_CTLR);
1021 
1022  offset -= (idx * window_size);
1023  idx++;
1024  dist = ((long) (window_size - (offset + size))) >= 0 ? size :
1025  (long) (window_size - offset);
1026  memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
1027  dist);
1028 
1029  psource += dist;
1030  size -= dist;
1031  for (; (long) size >= (long) window_size ;) {
1032  writel(0x01, mmio + PDC_GENERAL_CTLR);
1033  readl(mmio + PDC_GENERAL_CTLR);
1034  writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1035  readl(mmio + PDC_DIMM_WINDOW_CTLR);
1036  memcpy_fromio((char *) psource, (char *) (dimm_mmio),
1037  window_size / 4);
1038  psource += window_size;
1039  size -= window_size;
1040  idx++;
1041  }
1042 
1043  if (size) {
1044  writel(0x01, mmio + PDC_GENERAL_CTLR);
1045  readl(mmio + PDC_GENERAL_CTLR);
1046  writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1047  readl(mmio + PDC_DIMM_WINDOW_CTLR);
1048  memcpy_fromio((char *) psource, (char *) (dimm_mmio),
1049  size / 4);
1050  }
1051 }
1052 #endif
1053 
1054 
1055 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
1056  u32 offset, u32 size)
1057 {
1058  u32 window_size;
1059  u16 idx;
1060  u8 page_mask;
1061  long dist;
1062  void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1063  void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1064 
1065  /* hard-code chip #0 */
1066  mmio += PDC_CHIP0_OFS;
1067 
1068  page_mask = 0x00;
1069  window_size = 0x2000 * 4; /* 32K byte uchar size */
1070  idx = (u16) (offset / window_size);
1071 
1072  writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1073  readl(mmio + PDC_DIMM_WINDOW_CTLR);
1074  offset -= (idx * window_size);
1075  idx++;
1076  dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1077  (long) (window_size - offset);
1078  memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1079  writel(0x01, mmio + PDC_GENERAL_CTLR);
1080  readl(mmio + PDC_GENERAL_CTLR);
1081 
1082  psource += dist;
1083  size -= dist;
1084  for (; (long) size >= (long) window_size ;) {
1085  writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1086  readl(mmio + PDC_DIMM_WINDOW_CTLR);
1087  memcpy_toio(dimm_mmio, psource, window_size / 4);
1088  writel(0x01, mmio + PDC_GENERAL_CTLR);
1089  readl(mmio + PDC_GENERAL_CTLR);
1090  psource += window_size;
1091  size -= window_size;
1092  idx++;
1093  }
1094 
1095  if (size) {
1096  writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1097  readl(mmio + PDC_DIMM_WINDOW_CTLR);
1098  memcpy_toio(dimm_mmio, psource, size / 4);
1099  writel(0x01, mmio + PDC_GENERAL_CTLR);
1100  readl(mmio + PDC_GENERAL_CTLR);
1101  }
1102 }
1103 
1104 
1105 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1106  u32 subaddr, u32 *pdata)
1107 {
1108  void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1109  u32 i2creg = 0;
1110  u32 status;
1111  u32 count = 0;
1112 
1113  /* hard-code chip #0 */
1114  mmio += PDC_CHIP0_OFS;
1115 
1116  i2creg |= device << 24;
1117  i2creg |= subaddr << 16;
1118 
1119  /* Set the device and subaddress */
1120  writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1121  readl(mmio + PDC_I2C_ADDR_DATA);
1122 
1123  /* Write Control to perform read operation, mask int */
1125  mmio + PDC_I2C_CONTROL);
1126 
1127  for (count = 0; count <= 1000; count ++) {
1128  status = readl(mmio + PDC_I2C_CONTROL);
1129  if (status & PDC_I2C_COMPLETE) {
1130  status = readl(mmio + PDC_I2C_ADDR_DATA);
1131  break;
1132  } else if (count == 1000)
1133  return 0;
1134  }
1135 
1136  *pdata = (status >> 8) & 0x000000ff;
1137  return 1;
1138 }
1139 
1140 
1141 static int pdc20621_detect_dimm(struct ata_host *host)
1142 {
1143  u32 data = 0;
1144  if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1145  PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1146  if (data == 100)
1147  return 100;
1148  } else
1149  return 0;
1150 
1151  if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1152  if (data <= 0x75)
1153  return 133;
1154  } else
1155  return 0;
1156 
1157  return 0;
1158 }
1159 
1160 
1161 static int pdc20621_prog_dimm0(struct ata_host *host)
1162 {
1163  u32 spd0[50];
1164  u32 data = 0;
1165  int size, i;
1166  u8 bdimmsize;
1167  void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1168  static const struct {
1169  unsigned int reg;
1170  unsigned int ofs;
1171  } pdc_i2c_read_data [] = {
1172  { PDC_DIMM_SPD_TYPE, 11 },
1173  { PDC_DIMM_SPD_FRESH_RATE, 12 },
1174  { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1175  { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1176  { PDC_DIMM_SPD_ROW_NUM, 3 },
1177  { PDC_DIMM_SPD_BANK_NUM, 17 },
1178  { PDC_DIMM_SPD_MODULE_ROW, 5 },
1183  { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1184  };
1185 
1186  /* hard-code chip #0 */
1187  mmio += PDC_CHIP0_OFS;
1188 
1189  for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1190  pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1191  pdc_i2c_read_data[i].reg,
1192  &spd0[pdc_i2c_read_data[i].ofs]);
1193 
1194  data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1195  data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1196  ((((spd0[27] + 9) / 10) - 1) << 8) ;
1197  data |= (((((spd0[29] > spd0[28])
1198  ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1199  data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1200 
1201  if (spd0[18] & 0x08)
1202  data |= ((0x03) << 14);
1203  else if (spd0[18] & 0x04)
1204  data |= ((0x02) << 14);
1205  else if (spd0[18] & 0x01)
1206  data |= ((0x01) << 14);
1207  else
1208  data |= (0 << 14);
1209 
1210  /*
1211  Calculate the size of bDIMMSize (power of 2) and
1212  merge the DIMM size by program start/end address.
1213  */
1214 
1215  bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1216  size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1217  data |= (((size / 16) - 1) << 16);
1218  data |= (0 << 23);
1219  data |= 8;
1220  writel(data, mmio + PDC_DIMM0_CONTROL);
1221  readl(mmio + PDC_DIMM0_CONTROL);
1222  return size;
1223 }
1224 
1225 
1226 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1227 {
1228  u32 data, spd0;
1229  int error, i;
1230  void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1231 
1232  /* hard-code chip #0 */
1233  mmio += PDC_CHIP0_OFS;
1234 
1235  /*
1236  Set To Default : DIMM Module Global Control Register (0x022259F1)
1237  DIMM Arbitration Disable (bit 20)
1238  DIMM Data/Control Output Driving Selection (bit12 - bit15)
1239  Refresh Enable (bit 17)
1240  */
1241 
1242  data = 0x022259F1;
1243  writel(data, mmio + PDC_SDRAM_CONTROL);
1244  readl(mmio + PDC_SDRAM_CONTROL);
1245 
1246  /* Turn on for ECC */
1247  pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1248  PDC_DIMM_SPD_TYPE, &spd0);
1249  if (spd0 == 0x02) {
1250  data |= (0x01 << 16);
1251  writel(data, mmio + PDC_SDRAM_CONTROL);
1252  readl(mmio + PDC_SDRAM_CONTROL);
1253  printk(KERN_ERR "Local DIMM ECC Enabled\n");
1254  }
1255 
1256  /* DIMM Initialization Select/Enable (bit 18/19) */
1257  data &= (~(1<<18));
1258  data |= (1<<19);
1259  writel(data, mmio + PDC_SDRAM_CONTROL);
1260 
1261  error = 1;
1262  for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1263  data = readl(mmio + PDC_SDRAM_CONTROL);
1264  if (!(data & (1<<19))) {
1265  error = 0;
1266  break;
1267  }
1268  msleep(i*100);
1269  }
1270  return error;
1271 }
1272 
1273 
1274 static unsigned int pdc20621_dimm_init(struct ata_host *host)
1275 {
1276  int speed, size, length;
1277  u32 addr, spd0, pci_status;
1278  u32 time_period = 0;
1279  u32 tcount = 0;
1280  u32 ticks = 0;
1281  u32 clock = 0;
1282  u32 fparam = 0;
1283  void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1284 
1285  /* hard-code chip #0 */
1286  mmio += PDC_CHIP0_OFS;
1287 
1288  /* Initialize PLL based upon PCI Bus Frequency */
1289 
1290  /* Initialize Time Period Register */
1291  writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1292  time_period = readl(mmio + PDC_TIME_PERIOD);
1293  VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1294 
1295  /* Enable timer */
1297  readl(mmio + PDC_TIME_CONTROL);
1298 
1299  /* Wait 3 seconds */
1300  msleep(3000);
1301 
1302  /*
1303  When timer is enabled, counter is decreased every internal
1304  clock cycle.
1305  */
1306 
1307  tcount = readl(mmio + PDC_TIME_COUNTER);
1308  VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1309 
1310  /*
1311  If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1312  register should be >= (0xffffffff - 3x10^8).
1313  */
1314  if (tcount >= PCI_X_TCOUNT) {
1315  ticks = (time_period - tcount);
1316  VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1317 
1318  clock = (ticks / 300000);
1319  VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1320 
1321  clock = (clock * 33);
1322  VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1323 
1324  /* PLL F Param (bit 22:16) */
1325  fparam = (1400000 / clock) - 2;
1326  VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1327 
1328  /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1329  pci_status = (0x8a001824 | (fparam << 16));
1330  } else
1331  pci_status = PCI_PLL_INIT;
1332 
1333  /* Initialize PLL. */
1334  VPRINTK("pci_status: 0x%x\n", pci_status);
1335  writel(pci_status, mmio + PDC_CTL_STATUS);
1336  readl(mmio + PDC_CTL_STATUS);
1337 
1338  /*
1339  Read SPD of DIMM by I2C interface,
1340  and program the DIMM Module Controller.
1341  */
1342  if (!(speed = pdc20621_detect_dimm(host))) {
1343  printk(KERN_ERR "Detect Local DIMM Fail\n");
1344  return 1; /* DIMM error */
1345  }
1346  VPRINTK("Local DIMM Speed = %d\n", speed);
1347 
1348  /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1349  size = pdc20621_prog_dimm0(host);
1350  VPRINTK("Local DIMM Size = %dMB\n", size);
1351 
1352  /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1353  if (pdc20621_prog_dimm_global(host)) {
1354  printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1355  return 1;
1356  }
1357 
1358 #ifdef ATA_VERBOSE_DEBUG
1359  {
1360  u8 test_parttern1[40] =
1361  {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1362  'N','o','t',' ','Y','e','t',' ',
1363  'D','e','f','i','n','e','d',' ',
1364  '1','.','1','0',
1365  '9','8','0','3','1','6','1','2',0,0};
1366  u8 test_parttern2[40] = {0};
1367 
1368  pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1369  pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1370 
1371  pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1372  pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1373  printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1374  test_parttern2[1], &(test_parttern2[2]));
1375  pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1376  40);
1377  printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1378  test_parttern2[1], &(test_parttern2[2]));
1379 
1380  pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1381  pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1382  printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1383  test_parttern2[1], &(test_parttern2[2]));
1384  }
1385 #endif
1386 
1387  /* ECC initiliazation. */
1388 
1389  pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1390  PDC_DIMM_SPD_TYPE, &spd0);
1391  if (spd0 == 0x02) {
1392  void *buf;
1393  VPRINTK("Start ECC initialization\n");
1394  addr = 0;
1395  length = size * 1024 * 1024;
1396  buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1397  while (addr < length) {
1398  pdc20621_put_to_dimm(host, buf, addr,
1400  addr += ECC_ERASE_BUF_SZ;
1401  }
1402  kfree(buf);
1403  VPRINTK("Finish ECC initialization\n");
1404  }
1405  return 0;
1406 }
1407 
1408 
1409 static void pdc_20621_init(struct ata_host *host)
1410 {
1411  u32 tmp;
1412  void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1413 
1414  /* hard-code chip #0 */
1415  mmio += PDC_CHIP0_OFS;
1416 
1417  /*
1418  * Select page 0x40 for our 32k DIMM window
1419  */
1420  tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1421  tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1422  writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1423 
1424  /*
1425  * Reset Host DMA
1426  */
1427  tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1428  tmp |= PDC_RESET;
1429  writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1430  readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1431 
1432  udelay(10);
1433 
1434  tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1435  tmp &= ~PDC_RESET;
1436  writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1437  readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1438 }
1439 
1440 static int pdc_sata_init_one(struct pci_dev *pdev,
1441  const struct pci_device_id *ent)
1442 {
1443  const struct ata_port_info *ppi[] =
1444  { &pdc_port_info[ent->driver_data], NULL };
1445  struct ata_host *host;
1446  struct pdc_host_priv *hpriv;
1447  int i, rc;
1448 
1450 
1451  /* allocate host */
1452  host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1453  hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1454  if (!host || !hpriv)
1455  return -ENOMEM;
1456 
1457  host->private_data = hpriv;
1458 
1459  /* acquire resources and fill host */
1460  rc = pcim_enable_device(pdev);
1461  if (rc)
1462  return rc;
1463 
1464  rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1465  DRV_NAME);
1466  if (rc == -EBUSY)
1467  pcim_pin_device(pdev);
1468  if (rc)
1469  return rc;
1470  host->iomap = pcim_iomap_table(pdev);
1471 
1472  for (i = 0; i < 4; i++) {
1473  struct ata_port *ap = host->ports[i];
1474  void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1475  unsigned int offset = 0x200 + i * 0x80;
1476 
1477  pdc_sata_setup_port(&ap->ioaddr, base + offset);
1478 
1479  ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1480  ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1481  ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1482  }
1483 
1484  /* configure and activate */
1485  rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1486  if (rc)
1487  return rc;
1488  rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1489  if (rc)
1490  return rc;
1491 
1492  if (pdc20621_dimm_init(host))
1493  return -ENOMEM;
1494  pdc_20621_init(host);
1495 
1496  pci_set_master(pdev);
1497  return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1498  IRQF_SHARED, &pdc_sata_sht);
1499 }
1500 
1501 module_pci_driver(pdc_sata_pci_driver);
1502 
1503 MODULE_AUTHOR("Jeff Garzik");
1504 MODULE_DESCRIPTION("Promise SATA low-level driver");
1505 MODULE_LICENSE("GPL");
1506 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);