Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sh_mmcif.c
Go to the documentation of this file.
1 /*
2  * MMCIF eMMC driver.
3  *
4  * Copyright (C) 2010 Renesas Solutions Corp.
5  * Yusuke Goda <[email protected]>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License.
10  *
11  *
12  * TODO
13  * 1. DMA
14  * 2. Power management
15  * 3. Handle MMC errors better
16  *
17  */
18 
19 /*
20  * The MMCIF driver is now processing MMC requests asynchronously, according
21  * to the Linux MMC API requirement.
22  *
23  * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
24  * data, and optional stop. To achieve asynchronous processing each of these
25  * stages is split into two halves: a top and a bottom half. The top half
26  * initialises the hardware, installs a timeout handler to handle completion
27  * timeouts, and returns. In case of the command stage this immediately returns
28  * control to the caller, leaving all further processing to run asynchronously.
29  * All further request processing is performed by the bottom halves.
30  *
31  * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
32  * thread, a DMA completion callback, if DMA is used, a timeout work, and
33  * request- and stage-specific handler methods.
34  *
35  * Each bottom half run begins with either a hardware interrupt, a DMA callback
36  * invocation, or a timeout work run. In case of an error or a successful
37  * processing completion, the MMC core is informed and the request processing is
38  * finished. In case processing has to continue, i.e., if data has to be read
39  * from or written to the card, or if a stop command has to be sent, the next
40  * top half is called, which performs the necessary hardware handling and
41  * reschedules the timeout work. This returns the driver state machine into the
42  * bottom half waiting state.
43  */
44 
45 #include <linux/bitops.h>
46 #include <linux/clk.h>
47 #include <linux/completion.h>
48 #include <linux/delay.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/dmaengine.h>
51 #include <linux/mmc/card.h>
52 #include <linux/mmc/core.h>
53 #include <linux/mmc/host.h>
54 #include <linux/mmc/mmc.h>
55 #include <linux/mmc/sdio.h>
56 #include <linux/mmc/sh_mmcif.h>
57 #include <linux/mmc/slot-gpio.h>
58 #include <linux/mod_devicetable.h>
59 #include <linux/pagemap.h>
60 #include <linux/platform_device.h>
61 #include <linux/pm_qos.h>
62 #include <linux/pm_runtime.h>
63 #include <linux/spinlock.h>
64 #include <linux/module.h>
65 
66 #define DRIVER_NAME "sh_mmcif"
67 #define DRIVER_VERSION "2010-04-28"
68 
69 /* CE_CMD_SET */
70 #define CMD_MASK 0x3f000000
71 #define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
72 #define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
73 #define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
74 #define CMD_SET_RBSY (1 << 21) /* R1b */
75 #define CMD_SET_CCSEN (1 << 20)
76 #define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
77 #define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
78 #define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
79 #define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
80 #define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
81 #define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
82 #define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
83 #define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
84 #define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
85 #define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
86 #define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
87 #define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
88 #define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
89 #define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
90 #define CMD_SET_CCSH (1 << 5)
91 #define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
92 #define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
93 #define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
94 
95 /* CE_CMD_CTRL */
96 #define CMD_CTRL_BREAK (1 << 0)
97 
98 /* CE_BLOCK_SET */
99 #define BLOCK_SIZE_MASK 0x0000ffff
100 
101 /* CE_INT */
102 #define INT_CCSDE (1 << 29)
103 #define INT_CMD12DRE (1 << 26)
104 #define INT_CMD12RBE (1 << 25)
105 #define INT_CMD12CRE (1 << 24)
106 #define INT_DTRANE (1 << 23)
107 #define INT_BUFRE (1 << 22)
108 #define INT_BUFWEN (1 << 21)
109 #define INT_BUFREN (1 << 20)
110 #define INT_CCSRCV (1 << 19)
111 #define INT_RBSYE (1 << 17)
112 #define INT_CRSPE (1 << 16)
113 #define INT_CMDVIO (1 << 15)
114 #define INT_BUFVIO (1 << 14)
115 #define INT_WDATERR (1 << 11)
116 #define INT_RDATERR (1 << 10)
117 #define INT_RIDXERR (1 << 9)
118 #define INT_RSPERR (1 << 8)
119 #define INT_CCSTO (1 << 5)
120 #define INT_CRCSTO (1 << 4)
121 #define INT_WDATTO (1 << 3)
122 #define INT_RDATTO (1 << 2)
123 #define INT_RBSYTO (1 << 1)
124 #define INT_RSPTO (1 << 0)
125 #define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
126  INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
127  INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
128  INT_RDATTO | INT_RBSYTO | INT_RSPTO)
129 
130 /* CE_INT_MASK */
131 #define MASK_ALL 0x00000000
132 #define MASK_MCCSDE (1 << 29)
133 #define MASK_MCMD12DRE (1 << 26)
134 #define MASK_MCMD12RBE (1 << 25)
135 #define MASK_MCMD12CRE (1 << 24)
136 #define MASK_MDTRANE (1 << 23)
137 #define MASK_MBUFRE (1 << 22)
138 #define MASK_MBUFWEN (1 << 21)
139 #define MASK_MBUFREN (1 << 20)
140 #define MASK_MCCSRCV (1 << 19)
141 #define MASK_MRBSYE (1 << 17)
142 #define MASK_MCRSPE (1 << 16)
143 #define MASK_MCMDVIO (1 << 15)
144 #define MASK_MBUFVIO (1 << 14)
145 #define MASK_MWDATERR (1 << 11)
146 #define MASK_MRDATERR (1 << 10)
147 #define MASK_MRIDXERR (1 << 9)
148 #define MASK_MRSPERR (1 << 8)
149 #define MASK_MCCSTO (1 << 5)
150 #define MASK_MCRCSTO (1 << 4)
151 #define MASK_MWDATTO (1 << 3)
152 #define MASK_MRDATTO (1 << 2)
153 #define MASK_MRBSYTO (1 << 1)
154 #define MASK_MRSPTO (1 << 0)
155 
156 #define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
157  MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
158  MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
159  MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
160 
161 /* CE_HOST_STS1 */
162 #define STS1_CMDSEQ (1 << 31)
163 
164 /* CE_HOST_STS2 */
165 #define STS2_CRCSTE (1 << 31)
166 #define STS2_CRC16E (1 << 30)
167 #define STS2_AC12CRCE (1 << 29)
168 #define STS2_RSPCRC7E (1 << 28)
169 #define STS2_CRCSTEBE (1 << 27)
170 #define STS2_RDATEBE (1 << 26)
171 #define STS2_AC12REBE (1 << 25)
172 #define STS2_RSPEBE (1 << 24)
173 #define STS2_AC12IDXE (1 << 23)
174 #define STS2_RSPIDXE (1 << 22)
175 #define STS2_CCSTO (1 << 15)
176 #define STS2_RDATTO (1 << 14)
177 #define STS2_DATBSYTO (1 << 13)
178 #define STS2_CRCSTTO (1 << 12)
179 #define STS2_AC12BSYTO (1 << 11)
180 #define STS2_RSPBSYTO (1 << 10)
181 #define STS2_AC12RSPTO (1 << 9)
182 #define STS2_RSPTO (1 << 8)
183 #define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
184  STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
185 #define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
186  STS2_DATBSYTO | STS2_CRCSTTO | \
187  STS2_AC12BSYTO | STS2_RSPBSYTO | \
188  STS2_AC12RSPTO | STS2_RSPTO)
189 
190 #define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
191 #define CLKDEV_MMC_DATA 20000000 /* 20MHz */
192 #define CLKDEV_INIT 400000 /* 400 KHz */
193 
198 };
199 
210 };
211 
213  struct mmc_host *mmc;
214  struct mmc_request *mrq;
216  struct clk *hclk;
217  unsigned int clk;
219  bool sd_error;
220  bool dying;
221  long timeout;
222  void __iomem *addr;
224  spinlock_t lock; /* protect sh_mmcif_host::state */
228  size_t blocksize;
229  int sg_idx;
231  bool power;
233 
234  /* DMA support */
235  struct dma_chan *chan_rx;
236  struct dma_chan *chan_tx;
239 };
240 
241 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
242  unsigned int reg, u32 val)
243 {
244  writel(val | readl(host->addr + reg), host->addr + reg);
245 }
246 
247 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
248  unsigned int reg, u32 val)
249 {
250  writel(~val & readl(host->addr + reg), host->addr + reg);
251 }
252 
253 static void mmcif_dma_complete(void *arg)
254 {
255  struct sh_mmcif_host *host = arg;
256  struct mmc_data *data = host->mrq->data;
257 
258  dev_dbg(&host->pd->dev, "Command completed\n");
259 
260  if (WARN(!data, "%s: NULL data in DMA completion!\n",
261  dev_name(&host->pd->dev)))
262  return;
263 
264  if (data->flags & MMC_DATA_READ)
265  dma_unmap_sg(host->chan_rx->device->dev,
266  data->sg, data->sg_len,
268  else
269  dma_unmap_sg(host->chan_tx->device->dev,
270  data->sg, data->sg_len,
271  DMA_TO_DEVICE);
272 
273  complete(&host->dma_complete);
274 }
275 
276 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
277 {
278  struct mmc_data *data = host->mrq->data;
279  struct scatterlist *sg = data->sg;
281  struct dma_chan *chan = host->chan_rx;
283  int ret;
284 
285  ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
287  if (ret > 0) {
288  host->dma_active = true;
289  desc = dmaengine_prep_slave_sg(chan, sg, ret,
291  }
292 
293  if (desc) {
294  desc->callback = mmcif_dma_complete;
295  desc->callback_param = host;
296  cookie = dmaengine_submit(desc);
297  sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
298  dma_async_issue_pending(chan);
299  }
300  dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
301  __func__, data->sg_len, ret, cookie);
302 
303  if (!desc) {
304  /* DMA failed, fall back to PIO */
305  if (ret >= 0)
306  ret = -EIO;
307  host->chan_rx = NULL;
308  host->dma_active = false;
309  dma_release_channel(chan);
310  /* Free the Tx channel too */
311  chan = host->chan_tx;
312  if (chan) {
313  host->chan_tx = NULL;
314  dma_release_channel(chan);
315  }
316  dev_warn(&host->pd->dev,
317  "DMA failed: %d, falling back to PIO\n", ret);
318  sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
319  }
320 
321  dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
322  desc, cookie, data->sg_len);
323 }
324 
325 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
326 {
327  struct mmc_data *data = host->mrq->data;
328  struct scatterlist *sg = data->sg;
329  struct dma_async_tx_descriptor *desc = NULL;
330  struct dma_chan *chan = host->chan_tx;
331  dma_cookie_t cookie = -EINVAL;
332  int ret;
333 
334  ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
335  DMA_TO_DEVICE);
336  if (ret > 0) {
337  host->dma_active = true;
338  desc = dmaengine_prep_slave_sg(chan, sg, ret,
340  }
341 
342  if (desc) {
343  desc->callback = mmcif_dma_complete;
344  desc->callback_param = host;
345  cookie = dmaengine_submit(desc);
346  sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
347  dma_async_issue_pending(chan);
348  }
349  dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
350  __func__, data->sg_len, ret, cookie);
351 
352  if (!desc) {
353  /* DMA failed, fall back to PIO */
354  if (ret >= 0)
355  ret = -EIO;
356  host->chan_tx = NULL;
357  host->dma_active = false;
358  dma_release_channel(chan);
359  /* Free the Rx channel too */
360  chan = host->chan_rx;
361  if (chan) {
362  host->chan_rx = NULL;
363  dma_release_channel(chan);
364  }
365  dev_warn(&host->pd->dev,
366  "DMA failed: %d, falling back to PIO\n", ret);
367  sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
368  }
369 
370  dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
371  desc, cookie);
372 }
373 
374 static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
375  struct sh_mmcif_plat_data *pdata)
376 {
377  struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
378  struct dma_slave_config cfg;
380  int ret;
381 
382  host->dma_active = false;
383 
384  if (!pdata)
385  return;
386 
387  if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
388  return;
389 
390  /* We can only either use DMA for both Tx and Rx or not use it at all */
391  dma_cap_zero(mask);
392  dma_cap_set(DMA_SLAVE, mask);
393 
395  (void *)pdata->slave_id_tx);
396  dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
397  host->chan_tx);
398 
399  if (!host->chan_tx)
400  return;
401 
402  cfg.slave_id = pdata->slave_id_tx;
403  cfg.direction = DMA_MEM_TO_DEV;
404  cfg.dst_addr = res->start + MMCIF_CE_DATA;
405  cfg.src_addr = 0;
406  ret = dmaengine_slave_config(host->chan_tx, &cfg);
407  if (ret < 0)
408  goto ecfgtx;
409 
411  (void *)pdata->slave_id_rx);
412  dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
413  host->chan_rx);
414 
415  if (!host->chan_rx)
416  goto erqrx;
417 
418  cfg.slave_id = pdata->slave_id_rx;
419  cfg.direction = DMA_DEV_TO_MEM;
420  cfg.dst_addr = 0;
421  cfg.src_addr = res->start + MMCIF_CE_DATA;
422  ret = dmaengine_slave_config(host->chan_rx, &cfg);
423  if (ret < 0)
424  goto ecfgrx;
425 
426  init_completion(&host->dma_complete);
427 
428  return;
429 
430 ecfgrx:
432  host->chan_rx = NULL;
433 erqrx:
434 ecfgtx:
436  host->chan_tx = NULL;
437 }
438 
439 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
440 {
441  sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
442  /* Descriptors are freed automatically */
443  if (host->chan_tx) {
444  struct dma_chan *chan = host->chan_tx;
445  host->chan_tx = NULL;
446  dma_release_channel(chan);
447  }
448  if (host->chan_rx) {
449  struct dma_chan *chan = host->chan_rx;
450  host->chan_rx = NULL;
451  dma_release_channel(chan);
452  }
453 
454  host->dma_active = false;
455 }
456 
457 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
458 {
459  struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
460  bool sup_pclk = p ? p->sup_pclk : false;
461 
462  sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
463  sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
464 
465  if (!clk)
466  return;
467  if (sup_pclk && clk == host->clk)
468  sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
469  else
470  sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
471  ((fls(DIV_ROUND_UP(host->clk,
472  clk) - 1) - 1) << 16));
473 
474  sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
475 }
476 
477 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
478 {
479  u32 tmp;
480 
481  tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
482 
483  sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
484  sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
485  sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
487  /* byte swap on */
488  sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
489 }
490 
491 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
492 {
493  u32 state1, state2;
494  int ret, timeout;
495 
496  host->sd_error = false;
497 
498  state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
499  state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
500  dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
501  dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
502 
503  if (state1 & STS1_CMDSEQ) {
504  sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
505  sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
506  for (timeout = 10000000; timeout; timeout--) {
507  if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
508  & STS1_CMDSEQ))
509  break;
510  mdelay(1);
511  }
512  if (!timeout) {
513  dev_err(&host->pd->dev,
514  "Forced end of command sequence timeout err\n");
515  return -EIO;
516  }
517  sh_mmcif_sync_reset(host);
518  dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
519  return -EIO;
520  }
521 
522  if (state2 & STS2_CRC_ERR) {
523  dev_dbg(&host->pd->dev, ": CRC error\n");
524  ret = -EIO;
525  } else if (state2 & STS2_TIMEOUT_ERR) {
526  dev_dbg(&host->pd->dev, ": Timeout\n");
527  ret = -ETIMEDOUT;
528  } else {
529  dev_dbg(&host->pd->dev, ": End/Index error\n");
530  ret = -EIO;
531  }
532  return ret;
533 }
534 
535 static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
536 {
537  struct mmc_data *data = host->mrq->data;
538 
539  host->sg_blkidx += host->blocksize;
540 
541  /* data->sg->length must be a multiple of host->blocksize? */
542  BUG_ON(host->sg_blkidx > data->sg->length);
543 
544  if (host->sg_blkidx == data->sg->length) {
545  host->sg_blkidx = 0;
546  if (++host->sg_idx < data->sg_len)
547  host->pio_ptr = sg_virt(++data->sg);
548  } else {
549  host->pio_ptr = p;
550  }
551 
552  if (host->sg_idx == data->sg_len)
553  return false;
554 
555  return true;
556 }
557 
558 static void sh_mmcif_single_read(struct sh_mmcif_host *host,
559  struct mmc_request *mrq)
560 {
561  host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
562  BLOCK_SIZE_MASK) + 3;
563 
566 
567  /* buf read enable */
568  sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
569 }
570 
571 static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
572 {
573  struct mmc_data *data = host->mrq->data;
574  u32 *p = sg_virt(data->sg);
575  int i;
576 
577  if (host->sd_error) {
578  data->error = sh_mmcif_error_manage(host);
579  return false;
580  }
581 
582  for (i = 0; i < host->blocksize / 4; i++)
583  *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
584 
585  /* buffer read end */
586  sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
588 
589  return true;
590 }
591 
592 static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
593  struct mmc_request *mrq)
594 {
595  struct mmc_data *data = mrq->data;
596 
597  if (!data->sg_len || !data->sg->length)
598  return;
599 
600  host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
602 
604  host->sg_idx = 0;
605  host->sg_blkidx = 0;
606  host->pio_ptr = sg_virt(data->sg);
608  sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
609 }
610 
611 static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
612 {
613  struct mmc_data *data = host->mrq->data;
614  u32 *p = host->pio_ptr;
615  int i;
616 
617  if (host->sd_error) {
618  data->error = sh_mmcif_error_manage(host);
619  return false;
620  }
621 
622  BUG_ON(!data->sg->length);
623 
624  for (i = 0; i < host->blocksize / 4; i++)
625  *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
626 
627  if (!sh_mmcif_next_block(host, p))
628  return false;
629 
631  sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
632 
633  return true;
634 }
635 
636 static void sh_mmcif_single_write(struct sh_mmcif_host *host,
637  struct mmc_request *mrq)
638 {
639  host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
640  BLOCK_SIZE_MASK) + 3;
641 
644 
645  /* buf write enable */
646  sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
647 }
648 
649 static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
650 {
651  struct mmc_data *data = host->mrq->data;
652  u32 *p = sg_virt(data->sg);
653  int i;
654 
655  if (host->sd_error) {
656  data->error = sh_mmcif_error_manage(host);
657  return false;
658  }
659 
660  for (i = 0; i < host->blocksize / 4; i++)
661  sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
662 
663  /* buffer write end */
664  sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
666 
667  return true;
668 }
669 
670 static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
671  struct mmc_request *mrq)
672 {
673  struct mmc_data *data = mrq->data;
674 
675  if (!data->sg_len || !data->sg->length)
676  return;
677 
678  host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
680 
682  host->sg_idx = 0;
683  host->sg_blkidx = 0;
684  host->pio_ptr = sg_virt(data->sg);
686  sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
687 }
688 
689 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
690 {
691  struct mmc_data *data = host->mrq->data;
692  u32 *p = host->pio_ptr;
693  int i;
694 
695  if (host->sd_error) {
696  data->error = sh_mmcif_error_manage(host);
697  return false;
698  }
699 
700  BUG_ON(!data->sg->length);
701 
702  for (i = 0; i < host->blocksize / 4; i++)
703  sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
704 
705  if (!sh_mmcif_next_block(host, p))
706  return false;
707 
709  sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
710 
711  return true;
712 }
713 
714 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
715  struct mmc_command *cmd)
716 {
717  if (cmd->flags & MMC_RSP_136) {
718  cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
719  cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
720  cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
721  cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
722  } else
723  cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
724 }
725 
726 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
727  struct mmc_command *cmd)
728 {
729  cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
730 }
731 
732 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
733  struct mmc_request *mrq)
734 {
735  struct mmc_data *data = mrq->data;
736  struct mmc_command *cmd = mrq->cmd;
737  u32 opc = cmd->opcode;
738  u32 tmp = 0;
739 
740  /* Response Type check */
741  switch (mmc_resp_type(cmd)) {
742  case MMC_RSP_NONE:
743  tmp |= CMD_SET_RTYP_NO;
744  break;
745  case MMC_RSP_R1:
746  case MMC_RSP_R1B:
747  case MMC_RSP_R3:
748  tmp |= CMD_SET_RTYP_6B;
749  break;
750  case MMC_RSP_R2:
751  tmp |= CMD_SET_RTYP_17B;
752  break;
753  default:
754  dev_err(&host->pd->dev, "Unsupported response type.\n");
755  break;
756  }
757  switch (opc) {
758  /* RBSY */
759  case MMC_SWITCH:
761  case MMC_SET_WRITE_PROT:
762  case MMC_CLR_WRITE_PROT:
763  case MMC_ERASE:
764  tmp |= CMD_SET_RBSY;
765  break;
766  }
767  /* WDAT / DATW */
768  if (data) {
769  tmp |= CMD_SET_WDAT;
770  switch (host->bus_width) {
771  case MMC_BUS_WIDTH_1:
772  tmp |= CMD_SET_DATW_1;
773  break;
774  case MMC_BUS_WIDTH_4:
775  tmp |= CMD_SET_DATW_4;
776  break;
777  case MMC_BUS_WIDTH_8:
778  tmp |= CMD_SET_DATW_8;
779  break;
780  default:
781  dev_err(&host->pd->dev, "Unsupported bus width.\n");
782  break;
783  }
784  }
785  /* DWEN */
786  if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
787  tmp |= CMD_SET_DWEN;
788  /* CMLTE/CMD12EN */
789  if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
791  sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
792  data->blocks << 16);
793  }
794  /* RIDXC[1:0] check bits */
795  if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
796  opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
797  tmp |= CMD_SET_RIDXC_BITS;
798  /* RCRC7C[1:0] check bits */
799  if (opc == MMC_SEND_OP_COND)
800  tmp |= CMD_SET_CRC7C_BITS;
801  /* RCRC7C[1:0] internal CRC7 */
802  if (opc == MMC_ALL_SEND_CID ||
803  opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
804  tmp |= CMD_SET_CRC7C_INTERNAL;
805 
806  return (opc << 24) | tmp;
807 }
808 
809 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
810  struct mmc_request *mrq, u32 opc)
811 {
812  switch (opc) {
814  sh_mmcif_multi_read(host, mrq);
815  return 0;
817  sh_mmcif_multi_write(host, mrq);
818  return 0;
819  case MMC_WRITE_BLOCK:
820  sh_mmcif_single_write(host, mrq);
821  return 0;
823  case MMC_SEND_EXT_CSD:
824  sh_mmcif_single_read(host, mrq);
825  return 0;
826  default:
827  dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
828  return -EINVAL;
829  }
830 }
831 
832 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
833  struct mmc_request *mrq)
834 {
835  struct mmc_command *cmd = mrq->cmd;
836  u32 opc = cmd->opcode;
837  u32 mask;
838 
839  switch (opc) {
840  /* response busy check */
841  case MMC_SWITCH:
843  case MMC_SET_WRITE_PROT:
844  case MMC_CLR_WRITE_PROT:
845  case MMC_ERASE:
846  mask = MASK_START_CMD | MASK_MRBSYE;
847  break;
848  default:
849  mask = MASK_START_CMD | MASK_MCRSPE;
850  break;
851  }
852 
853  if (mrq->data) {
854  sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
855  sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
856  mrq->data->blksz);
857  }
858  opc = sh_mmcif_set_cmd(host, mrq);
859 
860  sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
861  sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
862  /* set arg */
863  sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
864  /* set cmd */
865  sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
866 
869 }
870 
871 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
872  struct mmc_request *mrq)
873 {
874  switch (mrq->cmd->opcode) {
876  sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
877  break;
879  sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
880  break;
881  default:
882  dev_err(&host->pd->dev, "unsupported stop cmd\n");
883  mrq->stop->error = sh_mmcif_error_manage(host);
884  return;
885  }
886 
889 }
890 
891 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
892 {
893  struct sh_mmcif_host *host = mmc_priv(mmc);
894  unsigned long flags;
895 
896  spin_lock_irqsave(&host->lock, flags);
897  if (host->state != STATE_IDLE) {
898  spin_unlock_irqrestore(&host->lock, flags);
899  mrq->cmd->error = -EAGAIN;
900  mmc_request_done(mmc, mrq);
901  return;
902  }
903 
904  host->state = STATE_REQUEST;
905  spin_unlock_irqrestore(&host->lock, flags);
906 
907  switch (mrq->cmd->opcode) {
908  /* MMCIF does not support SD/SDIO command */
909  case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */
910  case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
911  if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
912  break;
913  case MMC_APP_CMD:
914  host->state = STATE_IDLE;
915  mrq->cmd->error = -ETIMEDOUT;
916  mmc_request_done(mmc, mrq);
917  return;
918  default:
919  break;
920  }
921 
922  host->mrq = mrq;
923 
924  sh_mmcif_start_cmd(host, mrq);
925 }
926 
927 static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
928 {
929  int ret = clk_enable(host->hclk);
930 
931  if (!ret) {
932  host->clk = clk_get_rate(host->hclk);
933  host->mmc->f_max = host->clk / 2;
934  host->mmc->f_min = host->clk / 512;
935  }
936 
937  return ret;
938 }
939 
940 static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
941 {
942  struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
943  struct mmc_host *mmc = host->mmc;
944 
945  if (pd && pd->set_pwr)
946  pd->set_pwr(host->pd, ios->power_mode != MMC_POWER_OFF);
947  if (!IS_ERR(mmc->supply.vmmc))
948  /* Errors ignored... */
949  mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
950  ios->power_mode ? ios->vdd : 0);
951 }
952 
953 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
954 {
955  struct sh_mmcif_host *host = mmc_priv(mmc);
956  unsigned long flags;
957 
958  spin_lock_irqsave(&host->lock, flags);
959  if (host->state != STATE_IDLE) {
960  spin_unlock_irqrestore(&host->lock, flags);
961  return;
962  }
963 
964  host->state = STATE_IOS;
965  spin_unlock_irqrestore(&host->lock, flags);
966 
967  if (ios->power_mode == MMC_POWER_UP) {
968  if (!host->card_present) {
969  /* See if we also get DMA */
970  sh_mmcif_request_dma(host, host->pd->dev.platform_data);
971  host->card_present = true;
972  }
973  sh_mmcif_set_power(host, ios);
974  } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
975  /* clock stop */
976  sh_mmcif_clock_control(host, 0);
977  if (ios->power_mode == MMC_POWER_OFF) {
978  if (host->card_present) {
979  sh_mmcif_release_dma(host);
980  host->card_present = false;
981  }
982  }
983  if (host->power) {
984  pm_runtime_put(&host->pd->dev);
985  clk_disable(host->hclk);
986  host->power = false;
987  if (ios->power_mode == MMC_POWER_OFF)
988  sh_mmcif_set_power(host, ios);
989  }
990  host->state = STATE_IDLE;
991  return;
992  }
993 
994  if (ios->clock) {
995  if (!host->power) {
996  sh_mmcif_clk_update(host);
997  pm_runtime_get_sync(&host->pd->dev);
998  host->power = true;
999  sh_mmcif_sync_reset(host);
1000  }
1001  sh_mmcif_clock_control(host, ios->clock);
1002  }
1003 
1004  host->bus_width = ios->bus_width;
1005  host->state = STATE_IDLE;
1006 }
1007 
1008 static int sh_mmcif_get_cd(struct mmc_host *mmc)
1009 {
1010  struct sh_mmcif_host *host = mmc_priv(mmc);
1011  struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
1012  int ret = mmc_gpio_get_cd(mmc);
1013 
1014  if (ret >= 0)
1015  return ret;
1016 
1017  if (!p || !p->get_cd)
1018  return -ENOSYS;
1019  else
1020  return p->get_cd(host->pd);
1021 }
1022 
1023 static struct mmc_host_ops sh_mmcif_ops = {
1024  .request = sh_mmcif_request,
1025  .set_ios = sh_mmcif_set_ios,
1026  .get_cd = sh_mmcif_get_cd,
1027 };
1028 
1029 static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1030 {
1031  struct mmc_command *cmd = host->mrq->cmd;
1032  struct mmc_data *data = host->mrq->data;
1033  long time;
1034 
1035  if (host->sd_error) {
1036  switch (cmd->opcode) {
1037  case MMC_ALL_SEND_CID:
1038  case MMC_SELECT_CARD:
1039  case MMC_APP_CMD:
1040  cmd->error = -ETIMEDOUT;
1041  host->sd_error = false;
1042  break;
1043  default:
1044  cmd->error = sh_mmcif_error_manage(host);
1045  dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
1046  cmd->opcode, cmd->error);
1047  break;
1048  }
1049  return false;
1050  }
1051  if (!(cmd->flags & MMC_RSP_PRESENT)) {
1052  cmd->error = 0;
1053  return false;
1054  }
1055 
1056  sh_mmcif_get_response(host, cmd);
1057 
1058  if (!data)
1059  return false;
1060 
1061  if (data->flags & MMC_DATA_READ) {
1062  if (host->chan_rx)
1063  sh_mmcif_start_dma_rx(host);
1064  } else {
1065  if (host->chan_tx)
1066  sh_mmcif_start_dma_tx(host);
1067  }
1068 
1069  if (!host->dma_active) {
1070  data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1071  if (!data->error)
1072  return true;
1073  return false;
1074  }
1075 
1076  /* Running in the IRQ thread, can sleep */
1078  host->timeout);
1079  if (host->sd_error) {
1080  dev_err(host->mmc->parent,
1081  "Error IRQ while waiting for DMA completion!\n");
1082  /* Woken up by an error IRQ: abort DMA */
1083  if (data->flags & MMC_DATA_READ)
1084  dmaengine_terminate_all(host->chan_rx);
1085  else
1086  dmaengine_terminate_all(host->chan_tx);
1087  data->error = sh_mmcif_error_manage(host);
1088  } else if (!time) {
1089  data->error = -ETIMEDOUT;
1090  } else if (time < 0) {
1091  data->error = time;
1092  }
1093  sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1095  host->dma_active = false;
1096 
1097  if (data->error)
1098  data->bytes_xfered = 0;
1099 
1100  return false;
1101 }
1102 
1103 static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1104 {
1105  struct sh_mmcif_host *host = dev_id;
1106  struct mmc_request *mrq = host->mrq;
1107 
1109 
1110  /*
1111  * All handlers return true, if processing continues, and false, if the
1112  * request has to be completed - successfully or not
1113  */
1114  switch (host->wait_for) {
1116  /* We're too late, the timeout has already kicked in */
1117  return IRQ_HANDLED;
1118  case MMCIF_WAIT_FOR_CMD:
1119  if (sh_mmcif_end_cmd(host))
1120  /* Wait for data */
1121  return IRQ_HANDLED;
1122  break;
1123  case MMCIF_WAIT_FOR_MREAD:
1124  if (sh_mmcif_mread_block(host))
1125  /* Wait for more data */
1126  return IRQ_HANDLED;
1127  break;
1128  case MMCIF_WAIT_FOR_READ:
1129  if (sh_mmcif_read_block(host))
1130  /* Wait for data end */
1131  return IRQ_HANDLED;
1132  break;
1133  case MMCIF_WAIT_FOR_MWRITE:
1134  if (sh_mmcif_mwrite_block(host))
1135  /* Wait data to write */
1136  return IRQ_HANDLED;
1137  break;
1138  case MMCIF_WAIT_FOR_WRITE:
1139  if (sh_mmcif_write_block(host))
1140  /* Wait for data end */
1141  return IRQ_HANDLED;
1142  break;
1143  case MMCIF_WAIT_FOR_STOP:
1144  if (host->sd_error) {
1145  mrq->stop->error = sh_mmcif_error_manage(host);
1146  break;
1147  }
1148  sh_mmcif_get_cmd12response(host, mrq->stop);
1149  mrq->stop->error = 0;
1150  break;
1153  if (host->sd_error)
1154  mrq->data->error = sh_mmcif_error_manage(host);
1155  break;
1156  default:
1157  BUG();
1158  }
1159 
1160  if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1161  struct mmc_data *data = mrq->data;
1162  if (!mrq->cmd->error && data && !data->error)
1163  data->bytes_xfered =
1164  data->blocks * data->blksz;
1165 
1166  if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1167  sh_mmcif_stop_cmd(host, mrq);
1168  if (!mrq->stop->error)
1169  return IRQ_HANDLED;
1170  }
1171  }
1172 
1174  host->state = STATE_IDLE;
1175  host->mrq = NULL;
1176  mmc_request_done(host->mmc, mrq);
1177 
1178  return IRQ_HANDLED;
1179 }
1180 
1181 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1182 {
1183  struct sh_mmcif_host *host = dev_id;
1184  u32 state;
1185  int err = 0;
1186 
1187  state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1188 
1189  if (state & INT_ERR_STS) {
1190  /* error interrupts - process first */
1191  sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
1192  sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
1193  err = 1;
1194  } else if (state & INT_RBSYE) {
1195  sh_mmcif_writel(host->addr, MMCIF_CE_INT,
1196  ~(INT_RBSYE | INT_CRSPE));
1197  sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
1198  } else if (state & INT_CRSPE) {
1199  sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE);
1200  sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
1201  } else if (state & INT_BUFREN) {
1202  sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN);
1203  sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
1204  } else if (state & INT_BUFWEN) {
1205  sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN);
1206  sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
1207  } else if (state & INT_CMD12DRE) {
1208  sh_mmcif_writel(host->addr, MMCIF_CE_INT,
1209  ~(INT_CMD12DRE | INT_CMD12RBE |
1210  INT_CMD12CRE | INT_BUFRE));
1211  sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
1212  } else if (state & INT_BUFRE) {
1213  sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
1214  sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
1215  } else if (state & INT_DTRANE) {
1216  sh_mmcif_writel(host->addr, MMCIF_CE_INT,
1217  ~(INT_CMD12DRE | INT_CMD12RBE |
1218  INT_CMD12CRE | INT_DTRANE));
1219  sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
1220  } else if (state & INT_CMD12RBE) {
1221  sh_mmcif_writel(host->addr, MMCIF_CE_INT,
1222  ~(INT_CMD12RBE | INT_CMD12CRE));
1223  sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
1224  } else {
1225  dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
1226  sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
1227  sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
1228  err = 1;
1229  }
1230  if (err) {
1231  host->sd_error = true;
1232  dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
1233  }
1234  if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1235  if (!host->dma_active)
1236  return IRQ_WAKE_THREAD;
1237  else if (host->sd_error)
1238  mmcif_dma_complete(host);
1239  } else {
1240  dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
1241  }
1242 
1243  return IRQ_HANDLED;
1244 }
1245 
1246 static void mmcif_timeout_work(struct work_struct *work)
1247 {
1248  struct delayed_work *d = container_of(work, struct delayed_work, work);
1249  struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1250  struct mmc_request *mrq = host->mrq;
1251 
1252  if (host->dying)
1253  /* Don't run after mmc_remove_host() */
1254  return;
1255 
1256  /*
1257  * Handle races with cancel_delayed_work(), unless
1258  * cancel_delayed_work_sync() is used
1259  */
1260  switch (host->wait_for) {
1261  case MMCIF_WAIT_FOR_CMD:
1262  mrq->cmd->error = sh_mmcif_error_manage(host);
1263  break;
1264  case MMCIF_WAIT_FOR_STOP:
1265  mrq->stop->error = sh_mmcif_error_manage(host);
1266  break;
1267  case MMCIF_WAIT_FOR_MREAD:
1268  case MMCIF_WAIT_FOR_MWRITE:
1269  case MMCIF_WAIT_FOR_READ:
1270  case MMCIF_WAIT_FOR_WRITE:
1273  mrq->data->error = sh_mmcif_error_manage(host);
1274  break;
1275  default:
1276  BUG();
1277  }
1278 
1279  host->state = STATE_IDLE;
1281  host->mrq = NULL;
1282  mmc_request_done(host->mmc, mrq);
1283 }
1284 
1285 static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
1286 {
1287  struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
1288  struct mmc_host *mmc = host->mmc;
1289 
1290  mmc_regulator_get_supply(mmc);
1291 
1292  if (!pd)
1293  return;
1294 
1295  if (!mmc->ocr_avail)
1296  mmc->ocr_avail = pd->ocr;
1297  else if (pd->ocr)
1298  dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1299 }
1300 
1301 static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1302 {
1303  int ret = 0, irq[2];
1304  struct mmc_host *mmc;
1305  struct sh_mmcif_host *host;
1306  struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
1307  struct resource *res;
1308  void __iomem *reg;
1309  char clk_name[8];
1310 
1311  irq[0] = platform_get_irq(pdev, 0);
1312  irq[1] = platform_get_irq(pdev, 1);
1313  if (irq[0] < 0 || irq[1] < 0) {
1314  dev_err(&pdev->dev, "Get irq error\n");
1315  return -ENXIO;
1316  }
1317  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1318  if (!res) {
1319  dev_err(&pdev->dev, "platform_get_resource error.\n");
1320  return -ENXIO;
1321  }
1322  reg = ioremap(res->start, resource_size(res));
1323  if (!reg) {
1324  dev_err(&pdev->dev, "ioremap error.\n");
1325  return -ENOMEM;
1326  }
1327 
1328  mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
1329  if (!mmc) {
1330  ret = -ENOMEM;
1331  goto ealloch;
1332  }
1333  host = mmc_priv(mmc);
1334  host->mmc = mmc;
1335  host->addr = reg;
1336  host->timeout = 1000;
1337 
1338  host->pd = pdev;
1339 
1340  spin_lock_init(&host->lock);
1341 
1342  mmc->ops = &sh_mmcif_ops;
1343  sh_mmcif_init_ocr(host);
1344 
1345  mmc->caps = MMC_CAP_MMC_HIGHSPEED;
1346  if (pd && pd->caps)
1347  mmc->caps |= pd->caps;
1348  mmc->max_segs = 32;
1349  mmc->max_blk_size = 512;
1350  mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
1351  mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1352  mmc->max_seg_size = mmc->max_req_size;
1353 
1354  platform_set_drvdata(pdev, host);
1355 
1356  pm_runtime_enable(&pdev->dev);
1357  host->power = false;
1358 
1359  snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
1360  host->hclk = clk_get(&pdev->dev, clk_name);
1361  if (IS_ERR(host->hclk)) {
1362  ret = PTR_ERR(host->hclk);
1363  dev_err(&pdev->dev, "cannot get clock \"%s\": %d\n", clk_name, ret);
1364  goto eclkget;
1365  }
1366  ret = sh_mmcif_clk_update(host);
1367  if (ret < 0)
1368  goto eclkupdate;
1369 
1370  ret = pm_runtime_resume(&pdev->dev);
1371  if (ret < 0)
1372  goto eresume;
1373 
1374  INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
1375 
1376  sh_mmcif_sync_reset(host);
1377  sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1378 
1379  ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
1380  if (ret) {
1381  dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
1382  goto ereqirq0;
1383  }
1384  ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
1385  if (ret) {
1386  dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1387  goto ereqirq1;
1388  }
1389 
1390  if (pd && pd->use_cd_gpio) {
1391  ret = mmc_gpio_request_cd(mmc, pd->cd_gpio);
1392  if (ret < 0)
1393  goto erqcd;
1394  }
1395 
1396  clk_disable(host->hclk);
1397  ret = mmc_add_host(mmc);
1398  if (ret < 0)
1399  goto emmcaddh;
1400 
1401  dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1402 
1403  dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
1404  dev_dbg(&pdev->dev, "chip ver H'%04x\n",
1405  sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
1406  return ret;
1407 
1408 emmcaddh:
1409  if (pd && pd->use_cd_gpio)
1410  mmc_gpio_free_cd(mmc);
1411 erqcd:
1412  free_irq(irq[1], host);
1413 ereqirq1:
1414  free_irq(irq[0], host);
1415 ereqirq0:
1416  pm_runtime_suspend(&pdev->dev);
1417 eresume:
1418  clk_disable(host->hclk);
1419 eclkupdate:
1420  clk_put(host->hclk);
1421 eclkget:
1422  pm_runtime_disable(&pdev->dev);
1423  mmc_free_host(mmc);
1424 ealloch:
1425  iounmap(reg);
1426  return ret;
1427 }
1428 
1429 static int __devexit sh_mmcif_remove(struct platform_device *pdev)
1430 {
1431  struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1432  struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
1433  int irq[2];
1434 
1435  host->dying = true;
1436  clk_enable(host->hclk);
1437  pm_runtime_get_sync(&pdev->dev);
1438 
1439  dev_pm_qos_hide_latency_limit(&pdev->dev);
1440 
1441  if (pd && pd->use_cd_gpio)
1442  mmc_gpio_free_cd(host->mmc);
1443 
1444  mmc_remove_host(host->mmc);
1445  sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1446 
1447  /*
1448  * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
1449  * mmc_remove_host() call above. But swapping order doesn't help either
1450  * (a query on the linux-mmc mailing list didn't bring any replies).
1451  */
1453 
1454  if (host->addr)
1455  iounmap(host->addr);
1456 
1457  irq[0] = platform_get_irq(pdev, 0);
1458  irq[1] = platform_get_irq(pdev, 1);
1459 
1460  free_irq(irq[0], host);
1461  free_irq(irq[1], host);
1462 
1463  platform_set_drvdata(pdev, NULL);
1464 
1465  clk_disable(host->hclk);
1466  mmc_free_host(host->mmc);
1467  pm_runtime_put_sync(&pdev->dev);
1468  pm_runtime_disable(&pdev->dev);
1469 
1470  return 0;
1471 }
1472 
1473 #ifdef CONFIG_PM
1474 static int sh_mmcif_suspend(struct device *dev)
1475 {
1476  struct sh_mmcif_host *host = dev_get_drvdata(dev);
1477  int ret = mmc_suspend_host(host->mmc);
1478 
1479  if (!ret)
1480  sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1481 
1482  return ret;
1483 }
1484 
1485 static int sh_mmcif_resume(struct device *dev)
1486 {
1487  struct sh_mmcif_host *host = dev_get_drvdata(dev);
1488 
1489  return mmc_resume_host(host->mmc);
1490 }
1491 #else
1492 #define sh_mmcif_suspend NULL
1493 #define sh_mmcif_resume NULL
1494 #endif /* CONFIG_PM */
1495 
1496 static const struct of_device_id mmcif_of_match[] = {
1497  { .compatible = "renesas,sh-mmcif" },
1498  { }
1499 };
1500 MODULE_DEVICE_TABLE(of, mmcif_of_match);
1501 
1502 static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1503  .suspend = sh_mmcif_suspend,
1504  .resume = sh_mmcif_resume,
1505 };
1506 
1507 static struct platform_driver sh_mmcif_driver = {
1508  .probe = sh_mmcif_probe,
1509  .remove = sh_mmcif_remove,
1510  .driver = {
1511  .name = DRIVER_NAME,
1512  .pm = &sh_mmcif_dev_pm_ops,
1513  .owner = THIS_MODULE,
1514  .of_match_table = mmcif_of_match,
1515  },
1516 };
1517 
1518 module_platform_driver(sh_mmcif_driver);
1519 
1520 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1521 MODULE_LICENSE("GPL");
1522 MODULE_ALIAS("platform:" DRIVER_NAME);
1523 MODULE_AUTHOR("Yusuke Goda <[email protected]>");