31 #include <linux/device.h>
41 #include <linux/module.h>
84 #ifdef CONFIG_MMC_DEBUG
86 #define STATUS_TO_TEXT(a, status, i) \
88 if (status & TMIO_STAT_##a) { \
99 STATUS_TO_TEXT(CARD_REMOVE, status, i);
100 STATUS_TO_TEXT(CARD_INSERT, status, i);
101 STATUS_TO_TEXT(SIGSTATE, status, i);
102 STATUS_TO_TEXT(WRPROTECT, status, i);
103 STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
104 STATUS_TO_TEXT(CARD_INSERT_A, status, i);
105 STATUS_TO_TEXT(SIGSTATE_A, status, i);
106 STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
107 STATUS_TO_TEXT(STOPBIT_ERR, status, i);
108 STATUS_TO_TEXT(ILL_FUNC, status, i);
109 STATUS_TO_TEXT(
CMD_BUSY, status, i);
110 STATUS_TO_TEXT(CMDRESPEND, status, i);
111 STATUS_TO_TEXT(
DATAEND, status, i);
112 STATUS_TO_TEXT(CRCFAIL, status, i);
113 STATUS_TO_TEXT(DATATIMEOUT, status, i);
114 STATUS_TO_TEXT(CMDTIMEOUT, status, i);
115 STATUS_TO_TEXT(RXOVERFLOW, status, i);
116 STATUS_TO_TEXT(TXUNDERRUN, status, i);
117 STATUS_TO_TEXT(RXRDY, status, i);
118 STATUS_TO_TEXT(TXRQ, status, i);
119 STATUS_TO_TEXT(ILL_ACCESS, status, i);
124 #define pr_debug_status(s) do { } while (0)
127 static void tmio_mmc_enable_sdio_irq(
struct mmc_host *mmc,
int enable)
148 for (
clock = host->
mmc->f_min, clk = 0x80000080;
149 new_clock >= (
clock<<1); clk >>= 1)
165 if (resource_size(res) > 0x100) {
184 if (resource_size(res) > 0x100) {
197 if (resource_size(res) > 0x100)
201 if (resource_size(res) > 0x100)
221 if (IS_ERR_OR_NULL(mrq)
224 spin_unlock_irqrestore(&host->
lock, flags);
229 "timeout waiting for hardware interrupt (CMD%u)\n",
243 spin_unlock_irqrestore(&host->
lock, flags);
245 tmio_mmc_reset(host);
255 static void tmio_mmc_finish_request(
struct tmio_mmc_host *host)
263 if (IS_ERR_OR_NULL(mrq)) {
264 spin_unlock_irqrestore(&host->
lock, flags);
275 spin_unlock_irqrestore(&host->
lock, flags);
277 if (mrq->
cmd->error || (mrq->
data && mrq->
data->error))
283 static void tmio_mmc_done_work(
struct work_struct *work)
287 tmio_mmc_finish_request(host);
292 #define APP_CMD 0x0040
293 #define RESP_NONE 0x0300
294 #define RESP_R1 0x0400
295 #define RESP_R1B 0x0500
296 #define RESP_R2 0x0600
297 #define RESP_R3 0x0700
298 #define DATA_PRESENT 0x0800
299 #define TRANSFER_READ 0x1000
300 #define TRANSFER_MULTI 0x2000
301 #define SECURITY_CMD 0x4000
368 pr_err(
"PIO IRQ in DMA mode!\n");
375 sg_virt = tmio_mmc_kmap_atomic(host->
sg_ptr, &flags);
376 buf = (
unsigned short *)(sg_virt + host->
sg_off);
379 if (count > data->
blksz)
382 pr_debug(
"count: %08x offset: %08x flags %08x\n",
393 tmio_mmc_kunmap_atomic(host->
sg_ptr, &flags, sg_virt);
396 tmio_mmc_next_sg(host);
401 static void tmio_mmc_check_bounce_buffer(
struct tmio_mmc_host *host)
405 void *sg_vaddr = tmio_mmc_kmap_atomic(host->
sg_orig, &flags);
407 tmio_mmc_kunmap_atomic(host->
sg_orig, &flags, sg_vaddr);
431 pr_debug(
"Completed data request\n");
444 tmio_mmc_check_bounce_buffer(host);
445 dev_dbg(&host->
pdev->dev,
"Complete Rx request %p\n",
448 dev_dbg(&host->
pdev->dev,
"Complete Tx request %p\n",
465 spin_lock(&host->
lock);
492 spin_unlock(&host->
lock);
501 spin_lock(&host->
lock);
515 for (i = 3, addr =
CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
516 cmd->
resp[i] = sd_ctrl_read32(host, addr);
519 cmd->
resp[0] = (cmd->
resp[0] << 8) | (cmd->
resp[1] >> 24);
520 cmd->
resp[1] = (cmd->
resp[1] << 8) | (cmd->
resp[2] >> 24);
521 cmd->
resp[2] = (cmd->
resp[2] << 8) | (cmd->
resp[3] >> 24);
553 spin_unlock(&host->
lock);
556 static void tmio_mmc_card_irq_status(
struct tmio_mmc_host *host,
566 static bool __tmio_mmc_card_detect_irq(
struct tmio_mmc_host *host,
587 unsigned int ireg,
status;
590 tmio_mmc_card_irq_status(host, &ireg, &status);
591 __tmio_mmc_card_detect_irq(host, ireg, status);
597 static bool __tmio_mmc_sdcard_irq(
struct tmio_mmc_host *host,
598 int ireg,
int status)
602 tmio_mmc_ack_mmc_irqs(host,
604 TMIO_STAT_CMDTIMEOUT);
605 tmio_mmc_cmd_irq(host, status);
612 tmio_mmc_pio_irq(host);
618 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
619 tmio_mmc_data_irq(host);
628 unsigned int ireg,
status;
631 tmio_mmc_card_irq_status(host, &ireg, &status);
632 __tmio_mmc_sdcard_irq(host, ireg, status);
643 unsigned int ireg,
status;
654 mmc_signal_sdio_irq(mmc);
663 unsigned int ireg,
status;
667 tmio_mmc_card_irq_status(host, &ireg, &status);
668 if (__tmio_mmc_card_detect_irq(host, ireg, status))
670 if (__tmio_mmc_sdcard_irq(host, ireg, status))
684 pr_debug(
"setup data transfer: blocksize %08x nr_blocks %d\n",
691 if (data->
blksz < 2 || (data->
blksz < 4 && !blksz_2bytes)) {
692 pr_err(
"%s: %d byte block unsupported in 4 bit mode\n",
698 tmio_mmc_init_sg(host, data);
721 if (IS_ERR(host->
mrq)) {
722 spin_unlock_irqrestore(&host->
lock, flags);
733 spin_unlock_irqrestore(&host->
lock, flags);
736 ret = tmio_mmc_start_data(host, mrq->
data);
741 ret = tmio_mmc_start_command(host, mrq->
cmd);
755 static int tmio_mmc_clk_update(
struct mmc_host *mmc)
777 if (!IS_ERR(mmc->
supply.vmmc))
779 mmc_regulator_set_ocr(mmc, mmc->
supply.vmmc,
789 static void tmio_mmc_set_ios(
struct mmc_host *mmc,
struct mmc_ios *ios)
799 if (IS_ERR(host->
mrq)) {
801 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
807 "%s.%d: CMD%u active since %lu, now %lu!\n",
811 spin_unlock_irqrestore(&host->
lock, flags);
819 spin_unlock_irqrestore(&host->
lock, flags);
829 tmio_mmc_clk_update(mmc);
830 pm_runtime_get_sync(dev);
833 tmio_mmc_set_clock(host, ios->
clock);
835 tmio_mmc_set_power(host, ios);
837 tmio_mmc_clk_start(host);
840 tmio_mmc_set_power(host, ios);
843 tmio_mmc_clk_stop(host);
866 "%s.%d: IOS interrupted: clk %u, mode %u",
874 static int tmio_mmc_get_ro(
struct mmc_host *mmc)
886 static int tmio_mmc_get_cd(
struct mmc_host *mmc)
901 .request = tmio_mmc_request,
902 .set_ios = tmio_mmc_set_ios,
903 .get_ro = tmio_mmc_get_ro,
904 .get_cd = tmio_mmc_get_cd,
905 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
913 mmc_regulator_get_supply(mmc);
940 _host = mmc_priv(mmc);
944 platform_set_drvdata(pdev, mmc);
950 _host->
bus_shift = resource_size(res_ctl) >> 10;
958 mmc->
ops = &tmio_mmc_ops;
967 tmio_mmc_init_ocr(_host);
973 _host->
power =
false;
975 ret = pm_runtime_resume(&pdev->
dev);
979 if (tmio_mmc_clk_update(mmc) < 0) {
997 pm_runtime_get_noresume(&pdev->
dev);
999 tmio_mmc_clk_stop(_host);
1000 tmio_mmc_reset(_host);
1016 tmio_mmc_enable_sdio_irq(mmc, 0);
1036 dev_pm_qos_expose_latency_limit(&pdev->
dev, 100);
1051 pm_runtime_disable(&pdev->
dev);
1074 pm_runtime_get_sync(&pdev->
dev);
1076 dev_pm_qos_hide_latency_limit(&pdev->
dev);
1083 pm_runtime_put_sync(&pdev->
dev);
1084 pm_runtime_disable(&pdev->
dev);
1110 tmio_mmc_reset(host);
1131 tmio_mmc_reset(host);