Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pata_ep93xx.c
Go to the documentation of this file.
1 /*
2  * EP93XX PATA controller driver.
3  *
4  * Copyright (c) 2012, Metasoft s.c.
5  * Rafal Prylowski <[email protected]>
6  *
7  * Based on pata_scc.c, pata_icside.c and on earlier version of EP93XX
8  * PATA driver by Lennert Buytenhek and Alessandro Zummo.
9  * Read/Write timings, resource management and other improvements
10  * from driver by Joao Ramos and Bartlomiej Zolnierkiewicz.
11  * DMA engine support based on spi-ep93xx.c by Mika Westerberg.
12  *
13  * Original copyrights:
14  *
15  * Support for Cirrus Logic's EP93xx (EP9312, EP9315) CPUs
16  * PATA host controller driver.
17  *
18  * Copyright (c) 2009, Bartlomiej Zolnierkiewicz
19  *
20  * Heavily based on the ep93xx-ide.c driver:
21  *
22  * Copyright (c) 2009, Joao Ramos <[email protected]>
23  * INESC Inovacao (INOV)
24  *
25  * EP93XX PATA controller driver.
26  * Copyright (C) 2007 Lennert Buytenhek <[email protected]>
27  *
28  * An ATA driver for the Cirrus Logic EP93xx PATA controller.
29  *
30  * Based on an earlier version by Alessandro Zummo, which is:
31  * Copyright (C) 2006 Tower Technologies
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/blkdev.h>
38 #include <scsi/scsi_host.h>
39 #include <linux/ata.h>
40 #include <linux/libata.h>
41 #include <linux/platform_device.h>
42 #include <linux/delay.h>
43 #include <linux/dmaengine.h>
44 #include <linux/ktime.h>
45 
47 #include <mach/platform.h>
48 
49 #define DRV_NAME "ep93xx-ide"
50 #define DRV_VERSION "1.0"
51 
52 enum {
53  /* IDE Control Register */
54  IDECTRL = 0x00,
55  IDECTRL_CS0N = (1 << 0),
56  IDECTRL_CS1N = (1 << 1),
57  IDECTRL_DIORN = (1 << 5),
58  IDECTRL_DIOWN = (1 << 6),
59  IDECTRL_INTRQ = (1 << 9),
60  IDECTRL_IORDY = (1 << 10),
61  /*
62  * the device IDE register to be accessed is selected through
63  * IDECTRL register's specific bitfields 'DA', 'CS1N' and 'CS0N':
64  * b4 b3 b2 b1 b0
65  * A2 A1 A0 CS1N CS0N
66  * the values filled in this structure allows the value to be directly
67  * ORed to the IDECTRL register, hence giving directly the A[2:0] and
68  * CS1N/CS0N values for each IDE register.
69  * The values correspond to the transformation:
70  * ((real IDE address) << 2) | CS1N value << 1 | CS0N value
71  */
72  IDECTRL_ADDR_CMD = 0 + 2, /* CS1 */
83  IDECTRL_ADDR_ALTSTATUS = (0x06 << 2) + 1, /* CS0 */
84  IDECTRL_ADDR_CTL = (0x06 << 2) + 1, /* CS0 */
85 
86  /* IDE Configuration Register */
87  IDECFG = 0x04,
88  IDECFG_IDEEN = (1 << 0),
89  IDECFG_PIO = (1 << 1),
90  IDECFG_MDMA = (1 << 2),
91  IDECFG_UDMA = (1 << 3),
93  IDECFG_MODE_MASK = (0xf << 4),
95  IDECFG_WST_MASK = (0x3 << 8),
96 
97  /* MDMA Operation Register */
98  IDEMDMAOP = 0x08,
99 
100  /* UDMA Operation Register */
101  IDEUDMAOP = 0x0c,
102  IDEUDMAOP_UEN = (1 << 0),
103  IDEUDMAOP_RWOP = (1 << 1),
104 
105  /* PIO/MDMA/UDMA Data Registers */
106  IDEDATAOUT = 0x10,
107  IDEDATAIN = 0x14,
112 
113  /* UDMA Status Register */
114  IDEUDMASTS = 0x28,
115  IDEUDMASTS_DMAIDE = (1 << 16),
116  IDEUDMASTS_INTIDE = (1 << 17),
117  IDEUDMASTS_SBUSY = (1 << 18),
118  IDEUDMASTS_NDO = (1 << 24),
119  IDEUDMASTS_NDI = (1 << 25),
120  IDEUDMASTS_N4X = (1 << 26),
121 
122  /* UDMA Debug Status Register */
123  IDEUDMADEBUG = 0x2c,
124 };
125 
127  const struct platform_device *pdev;
129  struct ata_timing t;
130  bool iordy;
131 
132  unsigned long udma_in_phys;
133  unsigned long udma_out_phys;
134 
139 };
140 
141 static void ep93xx_pata_clear_regs(void __iomem *base)
142 {
144  IDECTRL_DIOWN, base + IDECTRL);
145 
146  writel(0, base + IDECFG);
147  writel(0, base + IDEMDMAOP);
148  writel(0, base + IDEUDMAOP);
149  writel(0, base + IDEDATAOUT);
150  writel(0, base + IDEDATAIN);
151  writel(0, base + IDEMDMADATAOUT);
152  writel(0, base + IDEMDMADATAIN);
153  writel(0, base + IDEUDMADATAOUT);
154  writel(0, base + IDEUDMADATAIN);
155  writel(0, base + IDEUDMADEBUG);
156 }
157 
158 static bool ep93xx_pata_check_iordy(void __iomem *base)
159 {
160  return !!(readl(base + IDECTRL) & IDECTRL_IORDY);
161 }
162 
163 /*
164  * According to EP93xx User's Guide, WST field of IDECFG specifies number
165  * of HCLK cycles to hold the data bus after a PIO write operation.
166  * It should be programmed to guarantee following delays:
167  *
168  * PIO Mode [ns]
169  * 0 30
170  * 1 20
171  * 2 15
172  * 3 10
173  * 4 5
174  *
175  * Maximum possible value for HCLK is 100MHz.
176  */
177 static int ep93xx_pata_get_wst(int pio_mode)
178 {
179  int val;
180 
181  if (pio_mode == 0)
182  val = 3;
183  else if (pio_mode < 3)
184  val = 2;
185  else
186  val = 1;
187 
188  return val << IDECFG_WST_SHIFT;
189 }
190 
191 static void ep93xx_pata_enable_pio(void __iomem *base, int pio_mode)
192 {
194  ep93xx_pata_get_wst(pio_mode) |
195  (pio_mode << IDECFG_MODE_SHIFT), base + IDECFG);
196 }
197 
198 /*
199  * Based on delay loop found in mach-pxa/mp900.c.
200  *
201  * Single iteration should take 5 cpu cycles. This is 25ns assuming the
202  * fastest ep93xx cpu speed (200MHz) and is better optimized for PIO4 timings
203  * than eg. 20ns.
204  */
205 static void ep93xx_pata_delay(unsigned long count)
206 {
207  __asm__ volatile (
208  "0:\n"
209  "mov r0, r0\n"
210  "subs %0, %1, #1\n"
211  "bge 0b\n"
212  : "=r" (count)
213  : "0" (count)
214  );
215 }
216 
217 static unsigned long ep93xx_pata_wait_for_iordy(void __iomem *base,
218  unsigned long t2)
219 {
220  /*
221  * According to ATA specification, IORDY pin can be first sampled
222  * tA = 35ns after activation of DIOR-/DIOW-. Maximum IORDY pulse
223  * width is tB = 1250ns.
224  *
225  * We are already t2 delay loop iterations after activation of
226  * DIOR-/DIOW-, so we set timeout to (1250 + 35) / 25 - t2 additional
227  * delay loop iterations.
228  */
229  unsigned long start = (1250 + 35) / 25 - t2;
230  unsigned long counter = start;
231 
232  while (!ep93xx_pata_check_iordy(base) && counter--)
233  ep93xx_pata_delay(1);
234  return start - counter;
235 }
236 
237 /* common part at start of ep93xx_pata_read/write() */
238 static void ep93xx_pata_rw_begin(void __iomem *base, unsigned long addr,
239  unsigned long t1)
240 {
241  writel(IDECTRL_DIOWN | IDECTRL_DIORN | addr, base + IDECTRL);
242  ep93xx_pata_delay(t1);
243 }
244 
245 /* common part at end of ep93xx_pata_read/write() */
246 static void ep93xx_pata_rw_end(void __iomem *base, unsigned long addr,
247  bool iordy, unsigned long t0, unsigned long t2,
248  unsigned long t2i)
249 {
250  ep93xx_pata_delay(t2);
251  /* lengthen t2 if needed */
252  if (iordy)
253  t2 += ep93xx_pata_wait_for_iordy(base, t2);
254  writel(IDECTRL_DIOWN | IDECTRL_DIORN | addr, base + IDECTRL);
255  if (t0 > t2 && t0 - t2 > t2i)
256  ep93xx_pata_delay(t0 - t2);
257  else
258  ep93xx_pata_delay(t2i);
259 }
260 
261 static u16 ep93xx_pata_read(struct ep93xx_pata_data *drv_data,
262  unsigned long addr,
263  bool reg)
264 {
265  void __iomem *base = drv_data->ide_base;
266  const struct ata_timing *t = &drv_data->t;
267  unsigned long t0 = reg ? t->cyc8b : t->cycle;
268  unsigned long t2 = reg ? t->act8b : t->active;
269  unsigned long t2i = reg ? t->rec8b : t->recover;
270 
271  ep93xx_pata_rw_begin(base, addr, t->setup);
272  writel(IDECTRL_DIOWN | addr, base + IDECTRL);
273  /*
274  * The IDEDATAIN register is loaded from the DD pins at the positive
275  * edge of the DIORN signal. (EP93xx UG p27-14)
276  */
277  ep93xx_pata_rw_end(base, addr, drv_data->iordy, t0, t2, t2i);
278  return readl(base + IDEDATAIN);
279 }
280 
281 /* IDE register read */
282 static u16 ep93xx_pata_read_reg(struct ep93xx_pata_data *drv_data,
283  unsigned long addr)
284 {
285  return ep93xx_pata_read(drv_data, addr, true);
286 }
287 
288 /* PIO data read */
289 static u16 ep93xx_pata_read_data(struct ep93xx_pata_data *drv_data,
290  unsigned long addr)
291 {
292  return ep93xx_pata_read(drv_data, addr, false);
293 }
294 
295 static void ep93xx_pata_write(struct ep93xx_pata_data *drv_data,
296  u16 value, unsigned long addr,
297  bool reg)
298 {
299  void __iomem *base = drv_data->ide_base;
300  const struct ata_timing *t = &drv_data->t;
301  unsigned long t0 = reg ? t->cyc8b : t->cycle;
302  unsigned long t2 = reg ? t->act8b : t->active;
303  unsigned long t2i = reg ? t->rec8b : t->recover;
304 
305  ep93xx_pata_rw_begin(base, addr, t->setup);
306  /*
307  * Value from IDEDATAOUT register is driven onto the DD pins when
308  * DIOWN is low. (EP93xx UG p27-13)
309  */
310  writel(value, base + IDEDATAOUT);
311  writel(IDECTRL_DIORN | addr, base + IDECTRL);
312  ep93xx_pata_rw_end(base, addr, drv_data->iordy, t0, t2, t2i);
313 }
314 
315 /* IDE register write */
316 static void ep93xx_pata_write_reg(struct ep93xx_pata_data *drv_data,
317  u16 value, unsigned long addr)
318 {
319  ep93xx_pata_write(drv_data, value, addr, true);
320 }
321 
322 /* PIO data write */
323 static void ep93xx_pata_write_data(struct ep93xx_pata_data *drv_data,
324  u16 value, unsigned long addr)
325 {
326  ep93xx_pata_write(drv_data, value, addr, false);
327 }
328 
329 static void ep93xx_pata_set_piomode(struct ata_port *ap,
330  struct ata_device *adev)
331 {
332  struct ep93xx_pata_data *drv_data = ap->host->private_data;
333  struct ata_device *pair = ata_dev_pair(adev);
334  /*
335  * Calculate timings for the delay loop, assuming ep93xx cpu speed
336  * is 200MHz (maximum possible for ep93xx). If actual cpu speed is
337  * slower, we will wait a bit longer in each delay.
338  * Additional division of cpu speed by 5, because single iteration
339  * of our delay loop takes 5 cpu cycles (25ns).
340  */
341  unsigned long T = 1000000 / (200 / 5);
342 
343  ata_timing_compute(adev, adev->pio_mode, &drv_data->t, T, 0);
344  if (pair && pair->pio_mode) {
345  struct ata_timing t;
346  ata_timing_compute(pair, pair->pio_mode, &t, T, 0);
347  ata_timing_merge(&t, &drv_data->t, &drv_data->t,
349  }
350  drv_data->iordy = ata_pio_need_iordy(adev);
351 
352  ep93xx_pata_enable_pio(drv_data->ide_base,
353  adev->pio_mode - XFER_PIO_0);
354 }
355 
356 /* Note: original code is ata_sff_check_status */
357 static u8 ep93xx_pata_check_status(struct ata_port *ap)
358 {
359  struct ep93xx_pata_data *drv_data = ap->host->private_data;
360 
361  return ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_STATUS);
362 }
363 
364 static u8 ep93xx_pata_check_altstatus(struct ata_port *ap)
365 {
366  struct ep93xx_pata_data *drv_data = ap->host->private_data;
367 
368  return ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_ALTSTATUS);
369 }
370 
371 /* Note: original code is ata_sff_tf_load */
372 static void ep93xx_pata_tf_load(struct ata_port *ap,
373  const struct ata_taskfile *tf)
374 {
375  struct ep93xx_pata_data *drv_data = ap->host->private_data;
376  unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
377 
378  if (tf->ctl != ap->last_ctl) {
379  ep93xx_pata_write_reg(drv_data, tf->ctl, IDECTRL_ADDR_CTL);
380  ap->last_ctl = tf->ctl;
381  ata_wait_idle(ap);
382  }
383 
384  if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
385  ep93xx_pata_write_reg(drv_data, tf->hob_feature,
387  ep93xx_pata_write_reg(drv_data, tf->hob_nsect,
389  ep93xx_pata_write_reg(drv_data, tf->hob_lbal,
391  ep93xx_pata_write_reg(drv_data, tf->hob_lbam,
393  ep93xx_pata_write_reg(drv_data, tf->hob_lbah,
395  }
396 
397  if (is_addr) {
398  ep93xx_pata_write_reg(drv_data, tf->feature,
400  ep93xx_pata_write_reg(drv_data, tf->nsect, IDECTRL_ADDR_NSECT);
401  ep93xx_pata_write_reg(drv_data, tf->lbal, IDECTRL_ADDR_LBAL);
402  ep93xx_pata_write_reg(drv_data, tf->lbam, IDECTRL_ADDR_LBAM);
403  ep93xx_pata_write_reg(drv_data, tf->lbah, IDECTRL_ADDR_LBAH);
404  }
405 
406  if (tf->flags & ATA_TFLAG_DEVICE)
407  ep93xx_pata_write_reg(drv_data, tf->device,
409 
410  ata_wait_idle(ap);
411 }
412 
413 /* Note: original code is ata_sff_tf_read */
414 static void ep93xx_pata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
415 {
416  struct ep93xx_pata_data *drv_data = ap->host->private_data;
417 
418  tf->command = ep93xx_pata_check_status(ap);
419  tf->feature = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE);
420  tf->nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT);
421  tf->lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL);
422  tf->lbam = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAM);
423  tf->lbah = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAH);
424  tf->device = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DEVICE);
425 
426  if (tf->flags & ATA_TFLAG_LBA48) {
427  ep93xx_pata_write_reg(drv_data, tf->ctl | ATA_HOB,
429  tf->hob_feature = ep93xx_pata_read_reg(drv_data,
431  tf->hob_nsect = ep93xx_pata_read_reg(drv_data,
433  tf->hob_lbal = ep93xx_pata_read_reg(drv_data,
435  tf->hob_lbam = ep93xx_pata_read_reg(drv_data,
437  tf->hob_lbah = ep93xx_pata_read_reg(drv_data,
439  ep93xx_pata_write_reg(drv_data, tf->ctl, IDECTRL_ADDR_CTL);
440  ap->last_ctl = tf->ctl;
441  }
442 }
443 
444 /* Note: original code is ata_sff_exec_command */
445 static void ep93xx_pata_exec_command(struct ata_port *ap,
446  const struct ata_taskfile *tf)
447 {
448  struct ep93xx_pata_data *drv_data = ap->host->private_data;
449 
450  ep93xx_pata_write_reg(drv_data, tf->command,
452  ata_sff_pause(ap);
453 }
454 
455 /* Note: original code is ata_sff_dev_select */
456 static void ep93xx_pata_dev_select(struct ata_port *ap, unsigned int device)
457 {
458  struct ep93xx_pata_data *drv_data = ap->host->private_data;
460 
461  if (device != 0)
462  tmp |= ATA_DEV1;
463 
464  ep93xx_pata_write_reg(drv_data, tmp, IDECTRL_ADDR_DEVICE);
465  ata_sff_pause(ap); /* needed; also flushes, for mmio */
466 }
467 
468 /* Note: original code is ata_sff_set_devctl */
469 static void ep93xx_pata_set_devctl(struct ata_port *ap, u8 ctl)
470 {
471  struct ep93xx_pata_data *drv_data = ap->host->private_data;
472 
473  ep93xx_pata_write_reg(drv_data, ctl, IDECTRL_ADDR_CTL);
474 }
475 
476 /* Note: original code is ata_sff_data_xfer */
477 static unsigned int ep93xx_pata_data_xfer(struct ata_device *adev,
478  unsigned char *buf,
479  unsigned int buflen, int rw)
480 {
481  struct ata_port *ap = adev->link->ap;
482  struct ep93xx_pata_data *drv_data = ap->host->private_data;
483  u16 *data = (u16 *)buf;
484  unsigned int words = buflen >> 1;
485 
486  /* Transfer multiple of 2 bytes */
487  while (words--)
488  if (rw == READ)
489  *data++ = cpu_to_le16(
490  ep93xx_pata_read_data(
491  drv_data, IDECTRL_ADDR_DATA));
492  else
493  ep93xx_pata_write_data(drv_data, le16_to_cpu(*data++),
495 
496  /* Transfer trailing 1 byte, if any. */
497  if (unlikely(buflen & 0x01)) {
498  unsigned char pad[2] = { };
499 
500  buf += buflen - 1;
501 
502  if (rw == READ) {
503  *pad = cpu_to_le16(
504  ep93xx_pata_read_data(
505  drv_data, IDECTRL_ADDR_DATA));
506  *buf = pad[0];
507  } else {
508  pad[0] = *buf;
509  ep93xx_pata_write_data(drv_data, le16_to_cpu(*pad),
511  }
512  words++;
513  }
514 
515  return words << 1;
516 }
517 
518 /* Note: original code is ata_devchk */
519 static bool ep93xx_pata_device_is_present(struct ata_port *ap,
520  unsigned int device)
521 {
522  struct ep93xx_pata_data *drv_data = ap->host->private_data;
523  u8 nsect, lbal;
524 
525  ap->ops->sff_dev_select(ap, device);
526 
527  ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_NSECT);
528  ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_LBAL);
529 
530  ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_NSECT);
531  ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_LBAL);
532 
533  ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_NSECT);
534  ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_LBAL);
535 
536  nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT);
537  lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL);
538 
539  if ((nsect == 0x55) && (lbal == 0xaa))
540  return true;
541 
542  return false;
543 }
544 
545 /* Note: original code is ata_sff_wait_after_reset */
546 static int ep93xx_pata_wait_after_reset(struct ata_link *link,
547  unsigned int devmask,
548  unsigned long deadline)
549 {
550  struct ata_port *ap = link->ap;
551  struct ep93xx_pata_data *drv_data = ap->host->private_data;
552  unsigned int dev0 = devmask & (1 << 0);
553  unsigned int dev1 = devmask & (1 << 1);
554  int rc, ret = 0;
555 
557 
558  /* always check readiness of the master device */
559  rc = ata_sff_wait_ready(link, deadline);
560  /*
561  * -ENODEV means the odd clown forgot the D7 pulldown resistor
562  * and TF status is 0xff, bail out on it too.
563  */
564  if (rc)
565  return rc;
566 
567  /*
568  * if device 1 was found in ata_devchk, wait for register
569  * access briefly, then wait for BSY to clear.
570  */
571  if (dev1) {
572  int i;
573 
574  ap->ops->sff_dev_select(ap, 1);
575 
576  /*
577  * Wait for register access. Some ATAPI devices fail
578  * to set nsect/lbal after reset, so don't waste too
579  * much time on it. We're gonna wait for !BSY anyway.
580  */
581  for (i = 0; i < 2; i++) {
582  u8 nsect, lbal;
583 
584  nsect = ep93xx_pata_read_reg(drv_data,
586  lbal = ep93xx_pata_read_reg(drv_data,
588  if (nsect == 1 && lbal == 1)
589  break;
590  msleep(50); /* give drive a breather */
591  }
592 
593  rc = ata_sff_wait_ready(link, deadline);
594  if (rc) {
595  if (rc != -ENODEV)
596  return rc;
597  ret = rc;
598  }
599  }
600  /* is all this really necessary? */
601  ap->ops->sff_dev_select(ap, 0);
602  if (dev1)
603  ap->ops->sff_dev_select(ap, 1);
604  if (dev0)
605  ap->ops->sff_dev_select(ap, 0);
606 
607  return ret;
608 }
609 
610 /* Note: original code is ata_bus_softreset */
611 static int ep93xx_pata_bus_softreset(struct ata_port *ap, unsigned int devmask,
612  unsigned long deadline)
613 {
614  struct ep93xx_pata_data *drv_data = ap->host->private_data;
615 
616  ep93xx_pata_write_reg(drv_data, ap->ctl, IDECTRL_ADDR_CTL);
617  udelay(20); /* FIXME: flush */
618  ep93xx_pata_write_reg(drv_data, ap->ctl | ATA_SRST, IDECTRL_ADDR_CTL);
619  udelay(20); /* FIXME: flush */
620  ep93xx_pata_write_reg(drv_data, ap->ctl, IDECTRL_ADDR_CTL);
621  ap->last_ctl = ap->ctl;
622 
623  return ep93xx_pata_wait_after_reset(&ap->link, devmask, deadline);
624 }
625 
626 static void ep93xx_pata_release_dma(struct ep93xx_pata_data *drv_data)
627 {
628  if (drv_data->dma_rx_channel) {
630  drv_data->dma_rx_channel = NULL;
631  }
632  if (drv_data->dma_tx_channel) {
634  drv_data->dma_tx_channel = NULL;
635  }
636 }
637 
638 static bool ep93xx_pata_dma_filter(struct dma_chan *chan, void *filter_param)
639 {
640  if (ep93xx_dma_chan_is_m2p(chan))
641  return false;
642 
643  chan->private = filter_param;
644  return true;
645 }
646 
647 static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
648 {
649  const struct platform_device *pdev = drv_data->pdev;
651  struct dma_slave_config conf;
652 
653  dma_cap_zero(mask);
654  dma_cap_set(DMA_SLAVE, mask);
655 
656  /*
657  * Request two channels for IDE. Another possibility would be
658  * to request only one channel, and reprogram it's direction at
659  * start of new transfer.
660  */
661  drv_data->dma_rx_data.port = EP93XX_DMA_IDE;
662  drv_data->dma_rx_data.direction = DMA_FROM_DEVICE;
663  drv_data->dma_rx_data.name = "ep93xx-pata-rx";
664  drv_data->dma_rx_channel = dma_request_channel(mask,
665  ep93xx_pata_dma_filter, &drv_data->dma_rx_data);
666  if (!drv_data->dma_rx_channel)
667  return;
668 
669  drv_data->dma_tx_data.port = EP93XX_DMA_IDE;
670  drv_data->dma_tx_data.direction = DMA_TO_DEVICE;
671  drv_data->dma_tx_data.name = "ep93xx-pata-tx";
672  drv_data->dma_tx_channel = dma_request_channel(mask,
673  ep93xx_pata_dma_filter, &drv_data->dma_tx_data);
674  if (!drv_data->dma_tx_channel) {
676  return;
677  }
678 
679  /* Configure receive channel direction and source address */
680  memset(&conf, 0, sizeof(conf));
681  conf.direction = DMA_FROM_DEVICE;
682  conf.src_addr = drv_data->udma_in_phys;
683  conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
684  if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) {
685  dev_err(&pdev->dev, "failed to configure rx dma channel\n");
686  ep93xx_pata_release_dma(drv_data);
687  return;
688  }
689 
690  /* Configure transmit channel direction and destination address */
691  memset(&conf, 0, sizeof(conf));
692  conf.direction = DMA_TO_DEVICE;
693  conf.dst_addr = drv_data->udma_out_phys;
694  conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
695  if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) {
696  dev_err(&pdev->dev, "failed to configure tx dma channel\n");
697  ep93xx_pata_release_dma(drv_data);
698  }
699 }
700 
701 static void ep93xx_pata_dma_start(struct ata_queued_cmd *qc)
702 {
703  struct dma_async_tx_descriptor *txd;
704  struct ep93xx_pata_data *drv_data = qc->ap->host->private_data;
705  void __iomem *base = drv_data->ide_base;
706  struct ata_device *adev = qc->dev;
707  u32 v = qc->dma_dir == DMA_TO_DEVICE ? IDEUDMAOP_RWOP : 0;
708  struct dma_chan *channel = qc->dma_dir == DMA_TO_DEVICE
709  ? drv_data->dma_tx_channel : drv_data->dma_rx_channel;
710 
711  txd = channel->device->device_prep_slave_sg(channel, qc->sg,
712  qc->n_elem, qc->dma_dir, DMA_CTRL_ACK, NULL);
713  if (!txd) {
714  dev_err(qc->ap->dev, "failed to prepare slave for sg dma\n");
715  return;
716  }
717  txd->callback = NULL;
718  txd->callback_param = NULL;
719 
720  if (dmaengine_submit(txd) < 0) {
721  dev_err(qc->ap->dev, "failed to submit dma transfer\n");
722  return;
723  }
724  dma_async_issue_pending(channel);
725 
726  /*
727  * When enabling UDMA operation, IDEUDMAOP register needs to be
728  * programmed in three step sequence:
729  * 1) set or clear the RWOP bit,
730  * 2) perform dummy read of the register,
731  * 3) set the UEN bit.
732  */
733  writel(v, base + IDEUDMAOP);
734  readl(base + IDEUDMAOP);
735  writel(v | IDEUDMAOP_UEN, base + IDEUDMAOP);
736 
738  ((adev->xfer_mode - XFER_UDMA_0) << IDECFG_MODE_SHIFT),
739  base + IDECFG);
740 }
741 
742 static void ep93xx_pata_dma_stop(struct ata_queued_cmd *qc)
743 {
744  struct ep93xx_pata_data *drv_data = qc->ap->host->private_data;
745  void __iomem *base = drv_data->ide_base;
746 
747  /* terminate all dma transfers, if not yet finished */
748  dmaengine_terminate_all(drv_data->dma_rx_channel);
749  dmaengine_terminate_all(drv_data->dma_tx_channel);
750 
751  /*
752  * To properly stop IDE-DMA, IDEUDMAOP register must to be cleared
753  * and IDECTRL register must be set to default value.
754  */
755  writel(0, base + IDEUDMAOP);
757  IDECTRL_CS0N | IDECTRL_CS1N, base + IDECTRL);
758 
759  ep93xx_pata_enable_pio(drv_data->ide_base,
760  qc->dev->pio_mode - XFER_PIO_0);
761 
762  ata_sff_dma_pause(qc->ap);
763 }
764 
765 static void ep93xx_pata_dma_setup(struct ata_queued_cmd *qc)
766 {
767  qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
768 }
769 
770 static u8 ep93xx_pata_dma_status(struct ata_port *ap)
771 {
772  struct ep93xx_pata_data *drv_data = ap->host->private_data;
773  u32 val = readl(drv_data->ide_base + IDEUDMASTS);
774 
775  /*
776  * UDMA Status Register bits:
777  *
778  * DMAIDE - DMA request signal from UDMA state machine,
779  * INTIDE - INT line generated by UDMA because of errors in the
780  * state machine,
781  * SBUSY - UDMA state machine busy, not in idle state,
782  * NDO - error for data-out not completed,
783  * NDI - error for data-in not completed,
784  * N4X - error for data transferred not multiplies of four
785  * 32-bit words.
786  * (EP93xx UG p27-17)
787  */
788  if (val & IDEUDMASTS_NDO || val & IDEUDMASTS_NDI ||
789  val & IDEUDMASTS_N4X || val & IDEUDMASTS_INTIDE)
790  return ATA_DMA_ERR;
791 
792  /* read INTRQ (INT[3]) pin input state */
793  if (readl(drv_data->ide_base + IDECTRL) & IDECTRL_INTRQ)
794  return ATA_DMA_INTR;
795 
796  if (val & IDEUDMASTS_SBUSY || val & IDEUDMASTS_DMAIDE)
797  return ATA_DMA_ACTIVE;
798 
799  return 0;
800 }
801 
802 /* Note: original code is ata_sff_softreset */
803 static int ep93xx_pata_softreset(struct ata_link *al, unsigned int *classes,
804  unsigned long deadline)
805 {
806  struct ata_port *ap = al->ap;
807  unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
808  unsigned int devmask = 0;
809  int rc;
810  u8 err;
811 
812  /* determine if device 0/1 are present */
813  if (ep93xx_pata_device_is_present(ap, 0))
814  devmask |= (1 << 0);
815  if (slave_possible && ep93xx_pata_device_is_present(ap, 1))
816  devmask |= (1 << 1);
817 
818  /* select device 0 again */
819  ap->ops->sff_dev_select(al->ap, 0);
820 
821  /* issue bus reset */
822  rc = ep93xx_pata_bus_softreset(ap, devmask, deadline);
823  /* if link is ocuppied, -ENODEV too is an error */
824  if (rc && (rc != -ENODEV || sata_scr_valid(al))) {
825  ata_link_printk(al, KERN_ERR, "SRST failed (errno=%d)\n",
826  rc);
827  return rc;
828  }
829 
830  /* determine by signature whether we have ATA or ATAPI devices */
831  classes[0] = ata_sff_dev_classify(&al->device[0], devmask & (1 << 0),
832  &err);
833  if (slave_possible && err != 0x81)
834  classes[1] = ata_sff_dev_classify(&al->device[1],
835  devmask & (1 << 1), &err);
836 
837  return 0;
838 }
839 
840 /* Note: original code is ata_sff_drain_fifo */
841 static void ep93xx_pata_drain_fifo(struct ata_queued_cmd *qc)
842 {
843  int count;
844  struct ata_port *ap;
845  struct ep93xx_pata_data *drv_data;
846 
847  /* We only need to flush incoming data when a command was running */
848  if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
849  return;
850 
851  ap = qc->ap;
852  drv_data = ap->host->private_data;
853  /* Drain up to 64K of data before we give up this recovery method */
854  for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
855  && count < 65536; count += 2)
856  ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DATA);
857 
858  /* Can become DEBUG later */
859  if (count)
861  "drained %d bytes to clear DRQ.\n", count);
862 
863 }
864 
865 static int ep93xx_pata_port_start(struct ata_port *ap)
866 {
867  struct ep93xx_pata_data *drv_data = ap->host->private_data;
868 
869  /*
870  * Set timings to safe values at startup (= number of ns from ATA
871  * specification), we'll switch to properly calculated values later.
872  */
873  drv_data->t = *ata_timing_find_mode(XFER_PIO_0);
874  return 0;
875 }
876 
877 static struct scsi_host_template ep93xx_pata_sht = {
879  /* ep93xx dma implementation limit */
880  .sg_tablesize = 32,
881  /* ep93xx dma can't transfer 65536 bytes at once */
882  .dma_boundary = 0x7fff,
883 };
884 
885 static struct ata_port_operations ep93xx_pata_port_ops = {
886  .inherits = &ata_bmdma_port_ops,
887 
888  .qc_prep = ata_noop_qc_prep,
889 
890  .softreset = ep93xx_pata_softreset,
891  .hardreset = ATA_OP_NULL,
892 
893  .sff_dev_select = ep93xx_pata_dev_select,
894  .sff_set_devctl = ep93xx_pata_set_devctl,
895  .sff_check_status = ep93xx_pata_check_status,
896  .sff_check_altstatus = ep93xx_pata_check_altstatus,
897  .sff_tf_load = ep93xx_pata_tf_load,
898  .sff_tf_read = ep93xx_pata_tf_read,
899  .sff_exec_command = ep93xx_pata_exec_command,
900  .sff_data_xfer = ep93xx_pata_data_xfer,
901  .sff_drain_fifo = ep93xx_pata_drain_fifo,
902  .sff_irq_clear = ATA_OP_NULL,
903 
904  .set_piomode = ep93xx_pata_set_piomode,
905 
906  .bmdma_setup = ep93xx_pata_dma_setup,
907  .bmdma_start = ep93xx_pata_dma_start,
908  .bmdma_stop = ep93xx_pata_dma_stop,
909  .bmdma_status = ep93xx_pata_dma_status,
910 
911  .cable_detect = ata_cable_unknown,
912  .port_start = ep93xx_pata_port_start,
913 };
914 
915 static int __devinit ep93xx_pata_probe(struct platform_device *pdev)
916 {
917  struct ep93xx_pata_data *drv_data;
918  struct ata_host *host;
919  struct ata_port *ap;
920  unsigned int irq;
921  struct resource *mem_res;
922  void __iomem *ide_base;
923  int err;
924 
925  err = ep93xx_ide_acquire_gpio(pdev);
926  if (err)
927  return err;
928 
929  /* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */
930  irq = platform_get_irq(pdev, 0);
931  if (irq < 0) {
932  err = -ENXIO;
933  goto err_rel_gpio;
934  }
935 
936  mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
937  if (!mem_res) {
938  err = -ENXIO;
939  goto err_rel_gpio;
940  }
941 
942  ide_base = devm_request_and_ioremap(&pdev->dev, mem_res);
943  if (!ide_base) {
944  err = -ENXIO;
945  goto err_rel_gpio;
946  }
947 
948  drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
949  if (!drv_data) {
950  err = -ENXIO;
951  goto err_rel_gpio;
952  }
953 
954  platform_set_drvdata(pdev, drv_data);
955  drv_data->pdev = pdev;
956  drv_data->ide_base = ide_base;
957  drv_data->udma_in_phys = mem_res->start + IDEUDMADATAIN;
958  drv_data->udma_out_phys = mem_res->start + IDEUDMADATAOUT;
959  ep93xx_pata_dma_init(drv_data);
960 
961  /* allocate host */
962  host = ata_host_alloc(&pdev->dev, 1);
963  if (!host) {
964  err = -ENXIO;
965  goto err_rel_dma;
966  }
967 
968  ep93xx_pata_clear_regs(ide_base);
969 
970  host->private_data = drv_data;
971 
972  ap = host->ports[0];
973  ap->dev = &pdev->dev;
974  ap->ops = &ep93xx_pata_port_ops;
975  ap->flags |= ATA_FLAG_SLAVE_POSS;
976  ap->pio_mask = ATA_PIO4;
977 
978  /*
979  * Maximum UDMA modes:
980  * EP931x rev.E0 - UDMA2
981  * EP931x rev.E1 - UDMA3
982  * EP931x rev.E2 - UDMA4
983  *
984  * MWDMA support was removed from EP931x rev.E2,
985  * so this driver supports only UDMA modes.
986  */
987  if (drv_data->dma_rx_channel && drv_data->dma_tx_channel) {
988  int chip_rev = ep93xx_chip_revision();
989 
990  if (chip_rev == EP93XX_CHIP_REV_E1)
991  ap->udma_mask = ATA_UDMA3;
992  else if (chip_rev == EP93XX_CHIP_REV_E2)
993  ap->udma_mask = ATA_UDMA4;
994  else
995  ap->udma_mask = ATA_UDMA2;
996  }
997 
998  /* defaults, pio 0 */
999  ep93xx_pata_enable_pio(ide_base, 0);
1000 
1001  dev_info(&pdev->dev, "version " DRV_VERSION "\n");
1002 
1003  /* activate host */
1004  err = ata_host_activate(host, irq, ata_bmdma_interrupt, 0,
1005  &ep93xx_pata_sht);
1006  if (err == 0)
1007  return 0;
1008 
1009 err_rel_dma:
1010  ep93xx_pata_release_dma(drv_data);
1011 err_rel_gpio:
1013  return err;
1014 }
1015 
1016 static int __devexit ep93xx_pata_remove(struct platform_device *pdev)
1017 {
1018  struct ata_host *host = platform_get_drvdata(pdev);
1019  struct ep93xx_pata_data *drv_data = host->private_data;
1020 
1021  ata_host_detach(host);
1022  ep93xx_pata_release_dma(drv_data);
1023  ep93xx_pata_clear_regs(drv_data->ide_base);
1025  return 0;
1026 }
1027 
1028 static struct platform_driver ep93xx_pata_platform_driver = {
1029  .driver = {
1030  .name = DRV_NAME,
1031  .owner = THIS_MODULE,
1032  },
1033  .probe = ep93xx_pata_probe,
1034  .remove = __devexit_p(ep93xx_pata_remove),
1035 };
1036 
1037 module_platform_driver(ep93xx_pata_platform_driver);
1038 
1039 MODULE_AUTHOR("Alessandro Zummo, Lennert Buytenhek, Joao Ramos, "
1040  "Bartlomiej Zolnierkiewicz, Rafal Prylowski");
1041 MODULE_DESCRIPTION("low-level driver for cirrus ep93xx IDE controller");
1042 MODULE_LICENSE("GPL");
1044 MODULE_ALIAS("platform:pata_ep93xx");