Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
core.c
Go to the documentation of this file.
1 /*
2  * linux/drivers/mmc/core/core.c
3  *
4  * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5  * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6  * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7  * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/suspend.h>
27 #include <linux/fault-inject.h>
28 #include <linux/random.h>
29 #include <linux/slab.h>
30 
31 #include <linux/mmc/card.h>
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/mmc.h>
34 #include <linux/mmc/sd.h>
35 
36 #include "core.h"
37 #include "bus.h"
38 #include "host.h"
39 #include "sdio_bus.h"
40 
41 #include "mmc_ops.h"
42 #include "sd_ops.h"
43 #include "sdio_ops.h"
44 
45 /*
46  * Background operations can take a long time, depending on the housekeeping
47  * operations the card has to perform.
48  */
49 #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
50 
51 static struct workqueue_struct *workqueue;
52 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
53 
54 /*
55  * Enabling software CRCs on the data blocks can be a significant (30%)
56  * performance cost, and for other reasons may not always be desired.
57  * So we allow it it to be disabled.
58  */
59 bool use_spi_crc = 1;
60 module_param(use_spi_crc, bool, 0);
61 
62 /*
63  * We normally treat cards as removed during suspend if they are not
64  * known to be on a non-removable bus, to avoid the risk of writing
65  * back data to a different card after resume. Allow this to be
66  * overridden if necessary.
67  */
68 #ifdef CONFIG_MMC_UNSAFE_RESUME
70 #else
72 #endif
76  removable,
77  "MMC/SD cards are removable and may be removed during suspend");
78 
79 /*
80  * Internal function. Schedule delayed work in the MMC work queue.
81  */
82 static int mmc_schedule_delayed_work(struct delayed_work *work,
83  unsigned long delay)
84 {
85  return queue_delayed_work(workqueue, work, delay);
86 }
87 
88 /*
89  * Internal function. Flush all scheduled work from the MMC work queue.
90  */
91 static void mmc_flush_scheduled_work(void)
92 {
93  flush_workqueue(workqueue);
94 }
95 
96 #ifdef CONFIG_FAIL_MMC_REQUEST
97 
98 /*
99  * Internal function. Inject random data errors.
100  * If mmc_data is NULL no errors are injected.
101  */
102 static void mmc_should_fail_request(struct mmc_host *host,
103  struct mmc_request *mrq)
104 {
105  struct mmc_command *cmd = mrq->cmd;
106  struct mmc_data *data = mrq->data;
107  static const int data_errors[] = {
108  -ETIMEDOUT,
109  -EILSEQ,
110  -EIO,
111  };
112 
113  if (!data)
114  return;
115 
116  if (cmd->error || data->error ||
117  !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
118  return;
119 
120  data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
121  data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
122 }
123 
124 #else /* CONFIG_FAIL_MMC_REQUEST */
125 
126 static inline void mmc_should_fail_request(struct mmc_host *host,
127  struct mmc_request *mrq)
128 {
129 }
130 
131 #endif /* CONFIG_FAIL_MMC_REQUEST */
132 
141 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
142 {
143  struct mmc_command *cmd = mrq->cmd;
144  int err = cmd->error;
145 
146  if (err && cmd->retries && mmc_host_is_spi(host)) {
147  if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
148  cmd->retries = 0;
149  }
150 
151  if (err && cmd->retries && !mmc_card_removed(host->card)) {
152  /*
153  * Request starter must handle retries - see
154  * mmc_wait_for_req_done().
155  */
156  if (mrq->done)
157  mrq->done(mrq);
158  } else {
159  mmc_should_fail_request(host, mrq);
160 
161  led_trigger_event(host->led, LED_OFF);
162 
163  pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
164  mmc_hostname(host), cmd->opcode, err,
165  cmd->resp[0], cmd->resp[1],
166  cmd->resp[2], cmd->resp[3]);
167 
168  if (mrq->data) {
169  pr_debug("%s: %d bytes transferred: %d\n",
170  mmc_hostname(host),
171  mrq->data->bytes_xfered, mrq->data->error);
172  }
173 
174  if (mrq->stop) {
175  pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
176  mmc_hostname(host), mrq->stop->opcode,
177  mrq->stop->error,
178  mrq->stop->resp[0], mrq->stop->resp[1],
179  mrq->stop->resp[2], mrq->stop->resp[3]);
180  }
181 
182  if (mrq->done)
183  mrq->done(mrq);
184 
185  mmc_host_clk_release(host);
186  }
187 }
188 
190 
191 static void
192 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
193 {
194 #ifdef CONFIG_MMC_DEBUG
195  unsigned int i, sz;
196  struct scatterlist *sg;
197 #endif
198 
199  if (mrq->sbc) {
200  pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
201  mmc_hostname(host), mrq->sbc->opcode,
202  mrq->sbc->arg, mrq->sbc->flags);
203  }
204 
205  pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
206  mmc_hostname(host), mrq->cmd->opcode,
207  mrq->cmd->arg, mrq->cmd->flags);
208 
209  if (mrq->data) {
210  pr_debug("%s: blksz %d blocks %d flags %08x "
211  "tsac %d ms nsac %d\n",
212  mmc_hostname(host), mrq->data->blksz,
213  mrq->data->blocks, mrq->data->flags,
214  mrq->data->timeout_ns / 1000000,
215  mrq->data->timeout_clks);
216  }
217 
218  if (mrq->stop) {
219  pr_debug("%s: CMD%u arg %08x flags %08x\n",
220  mmc_hostname(host), mrq->stop->opcode,
221  mrq->stop->arg, mrq->stop->flags);
222  }
223 
224  WARN_ON(!host->claimed);
225 
226  mrq->cmd->error = 0;
227  mrq->cmd->mrq = mrq;
228  if (mrq->data) {
229  BUG_ON(mrq->data->blksz > host->max_blk_size);
230  BUG_ON(mrq->data->blocks > host->max_blk_count);
231  BUG_ON(mrq->data->blocks * mrq->data->blksz >
232  host->max_req_size);
233 
234 #ifdef CONFIG_MMC_DEBUG
235  sz = 0;
236  for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
237  sz += sg->length;
238  BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
239 #endif
240 
241  mrq->cmd->data = mrq->data;
242  mrq->data->error = 0;
243  mrq->data->mrq = mrq;
244  if (mrq->stop) {
245  mrq->data->stop = mrq->stop;
246  mrq->stop->error = 0;
247  mrq->stop->mrq = mrq;
248  }
249  }
250  mmc_host_clk_hold(host);
251  led_trigger_event(host->led, LED_FULL);
252  host->ops->request(host, mrq);
253 }
254 
265 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
266 {
267  int err;
268  int timeout;
269  bool use_busy_signal;
270 
271  BUG_ON(!card);
272 
273  if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card))
274  return;
275 
276  err = mmc_read_bkops_status(card);
277  if (err) {
278  pr_err("%s: Failed to read bkops status: %d\n",
279  mmc_hostname(card->host), err);
280  return;
281  }
282 
283  if (!card->ext_csd.raw_bkops_status)
284  return;
285 
286  if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
287  from_exception)
288  return;
289 
290  mmc_claim_host(card->host);
291  if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
292  timeout = MMC_BKOPS_MAX_TIMEOUT;
293  use_busy_signal = true;
294  } else {
295  timeout = 0;
296  use_busy_signal = false;
297  }
298 
300  EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal);
301  if (err) {
302  pr_warn("%s: Error %d starting bkops\n",
303  mmc_hostname(card->host), err);
304  goto out;
305  }
306 
307  /*
308  * For urgent bkops status (LEVEL_2 and more)
309  * bkops executed synchronously, otherwise
310  * the operation is in progress
311  */
312  if (!use_busy_signal)
314 out:
315  mmc_release_host(card->host);
316 }
318 
319 static void mmc_wait_done(struct mmc_request *mrq)
320 {
321  complete(&mrq->completion);
322 }
323 
324 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
325 {
326  init_completion(&mrq->completion);
327  mrq->done = mmc_wait_done;
328  if (mmc_card_removed(host->card)) {
329  mrq->cmd->error = -ENOMEDIUM;
330  complete(&mrq->completion);
331  return -ENOMEDIUM;
332  }
333  mmc_start_request(host, mrq);
334  return 0;
335 }
336 
337 static void mmc_wait_for_req_done(struct mmc_host *host,
338  struct mmc_request *mrq)
339 {
340  struct mmc_command *cmd;
341 
342  while (1) {
344 
345  cmd = mrq->cmd;
346  if (!cmd->error || !cmd->retries ||
347  mmc_card_removed(host->card))
348  break;
349 
350  pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
351  mmc_hostname(host), cmd->opcode, cmd->error);
352  cmd->retries--;
353  cmd->error = 0;
354  host->ops->request(host, mrq);
355  }
356 }
357 
369 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
370  bool is_first_req)
371 {
372  if (host->ops->pre_req) {
373  mmc_host_clk_hold(host);
374  host->ops->pre_req(host, mrq, is_first_req);
375  mmc_host_clk_release(host);
376  }
377 }
378 
388 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
389  int err)
390 {
391  if (host->ops->post_req) {
392  mmc_host_clk_hold(host);
393  host->ops->post_req(host, mrq, err);
394  mmc_host_clk_release(host);
395  }
396 }
397 
414 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
415  struct mmc_async_req *areq, int *error)
416 {
417  int err = 0;
418  int start_err = 0;
419  struct mmc_async_req *data = host->areq;
420 
421  /* Prepare a new request */
422  if (areq)
423  mmc_pre_req(host, areq->mrq, !host->areq);
424 
425  if (host->areq) {
426  mmc_wait_for_req_done(host, host->areq->mrq);
427  err = host->areq->err_check(host->card, host->areq);
428  /*
429  * Check BKOPS urgency for each R1 response
430  */
431  if (host->card && mmc_card_mmc(host->card) &&
432  ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
433  (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
434  (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT))
435  mmc_start_bkops(host->card, true);
436  }
437 
438  if (!err && areq)
439  start_err = __mmc_start_req(host, areq->mrq);
440 
441  if (host->areq)
442  mmc_post_req(host, host->areq->mrq, 0);
443 
444  /* Cancel a prepared request if it was not started. */
445  if ((err || start_err) && areq)
446  mmc_post_req(host, areq->mrq, -EINVAL);
447 
448  if (err)
449  host->areq = NULL;
450  else
451  host->areq = areq;
452 
453  if (error)
454  *error = err;
455  return data;
456 }
458 
468 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
469 {
470  __mmc_start_req(host, mrq);
471  mmc_wait_for_req_done(host, mrq);
472 }
474 
483 {
484  int err;
485  u32 status;
486  unsigned long prg_wait;
487 
488  BUG_ON(!card);
489 
490  if (!card->ext_csd.hpi_en) {
491  pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
492  return 1;
493  }
494 
495  mmc_claim_host(card->host);
496  err = mmc_send_status(card, &status);
497  if (err) {
498  pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
499  goto out;
500  }
501 
502  switch (R1_CURRENT_STATE(status)) {
503  case R1_STATE_IDLE:
504  case R1_STATE_READY:
505  case R1_STATE_STBY:
506  case R1_STATE_TRAN:
507  /*
508  * In idle and transfer states, HPI is not needed and the caller
509  * can issue the next intended command immediately
510  */
511  goto out;
512  case R1_STATE_PRG:
513  break;
514  default:
515  /* In all other states, it's illegal to issue HPI */
516  pr_debug("%s: HPI cannot be sent. Card state=%d\n",
517  mmc_hostname(card->host), R1_CURRENT_STATE(status));
518  err = -EINVAL;
519  goto out;
520  }
521 
522  err = mmc_send_hpi_cmd(card, &status);
523  if (err)
524  goto out;
525 
526  prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
527  do {
528  err = mmc_send_status(card, &status);
529 
530  if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
531  break;
532  if (time_after(jiffies, prg_wait))
533  err = -ETIMEDOUT;
534  } while (!err);
535 
536 out:
537  mmc_release_host(card->host);
538  return err;
539 }
541 
552 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
553 {
554  struct mmc_request mrq = {NULL};
555 
556  WARN_ON(!host->claimed);
557 
558  memset(cmd->resp, 0, sizeof(cmd->resp));
559  cmd->retries = retries;
560 
561  mrq.cmd = cmd;
562  cmd->data = NULL;
563 
564  mmc_wait_for_req(host, &mrq);
565 
566  return cmd->error;
567 }
568 
570 
581 {
582  int err = 0;
583 
584  BUG_ON(!card);
585  err = mmc_interrupt_hpi(card);
586 
587  /*
588  * If err is EINVAL, we can't issue an HPI.
589  * It should complete the BKOPS.
590  */
591  if (!err || (err == -EINVAL)) {
593  err = 0;
594  }
595 
596  return err;
597 }
599 
601 {
602  int err;
603  u8 *ext_csd;
604 
605  /*
606  * In future work, we should consider storing the entire ext_csd.
607  */
608  ext_csd = kmalloc(512, GFP_KERNEL);
609  if (!ext_csd) {
610  pr_err("%s: could not allocate buffer to receive the ext_csd.\n",
611  mmc_hostname(card->host));
612  return -ENOMEM;
613  }
614 
615  mmc_claim_host(card->host);
616  err = mmc_send_ext_csd(card, ext_csd);
617  mmc_release_host(card->host);
618  if (err)
619  goto out;
620 
621  card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
622  card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
623 out:
624  kfree(ext_csd);
625  return err;
626 }
628 
637 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
638 {
639  unsigned int mult;
640 
641  /*
642  * SDIO cards only define an upper 1 s limit on access.
643  */
644  if (mmc_card_sdio(card)) {
645  data->timeout_ns = 1000000000;
646  data->timeout_clks = 0;
647  return;
648  }
649 
650  /*
651  * SD cards use a 100 multiplier rather than 10
652  */
653  mult = mmc_card_sd(card) ? 100 : 10;
654 
655  /*
656  * Scale up the multiplier (and therefore the timeout) by
657  * the r2w factor for writes.
658  */
659  if (data->flags & MMC_DATA_WRITE)
660  mult <<= card->csd.r2w_factor;
661 
662  data->timeout_ns = card->csd.tacc_ns * mult;
663  data->timeout_clks = card->csd.tacc_clks * mult;
664 
665  /*
666  * SD cards also have an upper limit on the timeout.
667  */
668  if (mmc_card_sd(card)) {
669  unsigned int timeout_us, limit_us;
670 
671  timeout_us = data->timeout_ns / 1000;
672  if (mmc_host_clk_rate(card->host))
673  timeout_us += data->timeout_clks * 1000 /
674  (mmc_host_clk_rate(card->host) / 1000);
675 
676  if (data->flags & MMC_DATA_WRITE)
677  /*
678  * The MMC spec "It is strongly recommended
679  * for hosts to implement more than 500ms
680  * timeout value even if the card indicates
681  * the 250ms maximum busy length." Even the
682  * previous value of 300ms is known to be
683  * insufficient for some cards.
684  */
685  limit_us = 3000000;
686  else
687  limit_us = 100000;
688 
689  /*
690  * SDHC cards always use these fixed values.
691  */
692  if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
693  data->timeout_ns = limit_us * 1000;
694  data->timeout_clks = 0;
695  }
696  }
697 
698  /*
699  * Some cards require longer data read timeout than indicated in CSD.
700  * Address this by setting the read timeout to a "reasonably high"
701  * value. For the cards tested, 300ms has proven enough. If necessary,
702  * this value can be increased if other problematic cards require this.
703  */
704  if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
705  data->timeout_ns = 300000000;
706  data->timeout_clks = 0;
707  }
708 
709  /*
710  * Some cards need very high timeouts if driven in SPI mode.
711  * The worst observed timeout was 900ms after writing a
712  * continuous stream of data until the internal logic
713  * overflowed.
714  */
715  if (mmc_host_is_spi(card->host)) {
716  if (data->flags & MMC_DATA_WRITE) {
717  if (data->timeout_ns < 1000000000)
718  data->timeout_ns = 1000000000; /* 1s */
719  } else {
720  if (data->timeout_ns < 100000000)
721  data->timeout_ns = 100000000; /* 100ms */
722  }
723  }
724 }
726 
741 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
742 {
743  /*
744  * FIXME: We don't have a system for the controller to tell
745  * the core about its problems yet, so for now we just 32-bit
746  * align the size.
747  */
748  sz = ((sz + 3) / 4) * 4;
749 
750  return sz;
751 }
753 
765 {
767  unsigned long flags;
768  int stop;
769 
770  might_sleep();
771 
772  add_wait_queue(&host->wq, &wait);
773  spin_lock_irqsave(&host->lock, flags);
774  while (1) {
776  stop = abort ? atomic_read(abort) : 0;
777  if (stop || !host->claimed || host->claimer == current)
778  break;
779  spin_unlock_irqrestore(&host->lock, flags);
780  schedule();
781  spin_lock_irqsave(&host->lock, flags);
782  }
784  if (!stop) {
785  host->claimed = 1;
786  host->claimer = current;
787  host->claim_cnt += 1;
788  } else
789  wake_up(&host->wq);
790  spin_unlock_irqrestore(&host->lock, flags);
791  remove_wait_queue(&host->wq, &wait);
792  if (host->ops->enable && !stop && host->claim_cnt == 1)
793  host->ops->enable(host);
794  return stop;
795 }
796 
798 
805 int mmc_try_claim_host(struct mmc_host *host)
806 {
807  int claimed_host = 0;
808  unsigned long flags;
809 
810  spin_lock_irqsave(&host->lock, flags);
811  if (!host->claimed || host->claimer == current) {
812  host->claimed = 1;
813  host->claimer = current;
814  host->claim_cnt += 1;
815  claimed_host = 1;
816  }
817  spin_unlock_irqrestore(&host->lock, flags);
818  if (host->ops->enable && claimed_host && host->claim_cnt == 1)
819  host->ops->enable(host);
820  return claimed_host;
821 }
823 
831 void mmc_release_host(struct mmc_host *host)
832 {
833  unsigned long flags;
834 
835  WARN_ON(!host->claimed);
836 
837  if (host->ops->disable && host->claim_cnt == 1)
838  host->ops->disable(host);
839 
840  spin_lock_irqsave(&host->lock, flags);
841  if (--host->claim_cnt) {
842  /* Release for nested claim */
843  spin_unlock_irqrestore(&host->lock, flags);
844  } else {
845  host->claimed = 0;
846  host->claimer = NULL;
847  spin_unlock_irqrestore(&host->lock, flags);
848  wake_up(&host->wq);
849  }
850 }
852 
853 /*
854  * Internal function that does the actual ios call to the host driver,
855  * optionally printing some debug output.
856  */
857 static inline void mmc_set_ios(struct mmc_host *host)
858 {
859  struct mmc_ios *ios = &host->ios;
860 
861  pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
862  "width %u timing %u\n",
863  mmc_hostname(host), ios->clock, ios->bus_mode,
864  ios->power_mode, ios->chip_select, ios->vdd,
865  ios->bus_width, ios->timing);
866 
867  if (ios->clock > 0)
868  mmc_set_ungated(host);
869  host->ops->set_ios(host, ios);
870 }
871 
872 /*
873  * Control chip select pin on a host.
874  */
875 void mmc_set_chip_select(struct mmc_host *host, int mode)
876 {
877  mmc_host_clk_hold(host);
878  host->ios.chip_select = mode;
879  mmc_set_ios(host);
880  mmc_host_clk_release(host);
881 }
882 
883 /*
884  * Sets the host clock to the highest possible frequency that
885  * is below "hz".
886  */
887 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
888 {
889  WARN_ON(hz < host->f_min);
890 
891  if (hz > host->f_max)
892  hz = host->f_max;
893 
894  host->ios.clock = hz;
895  mmc_set_ios(host);
896 }
897 
898 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
899 {
900  mmc_host_clk_hold(host);
901  __mmc_set_clock(host, hz);
902  mmc_host_clk_release(host);
903 }
904 
905 #ifdef CONFIG_MMC_CLKGATE
906 /*
907  * This gates the clock by setting it to 0 Hz.
908  */
909 void mmc_gate_clock(struct mmc_host *host)
910 {
911  unsigned long flags;
912 
913  spin_lock_irqsave(&host->clk_lock, flags);
914  host->clk_old = host->ios.clock;
915  host->ios.clock = 0;
916  host->clk_gated = true;
917  spin_unlock_irqrestore(&host->clk_lock, flags);
918  mmc_set_ios(host);
919 }
920 
921 /*
922  * This restores the clock from gating by using the cached
923  * clock value.
924  */
925 void mmc_ungate_clock(struct mmc_host *host)
926 {
927  /*
928  * We should previously have gated the clock, so the clock shall
929  * be 0 here! The clock may however be 0 during initialization,
930  * when some request operations are performed before setting
931  * the frequency. When ungate is requested in that situation
932  * we just ignore the call.
933  */
934  if (host->clk_old) {
935  BUG_ON(host->ios.clock);
936  /* This call will also set host->clk_gated to false */
937  __mmc_set_clock(host, host->clk_old);
938  }
939 }
940 
941 void mmc_set_ungated(struct mmc_host *host)
942 {
943  unsigned long flags;
944 
945  /*
946  * We've been given a new frequency while the clock is gated,
947  * so make sure we regard this as ungating it.
948  */
949  spin_lock_irqsave(&host->clk_lock, flags);
950  host->clk_gated = false;
951  spin_unlock_irqrestore(&host->clk_lock, flags);
952 }
953 
954 #else
955 void mmc_set_ungated(struct mmc_host *host)
956 {
957 }
958 #endif
959 
960 /*
961  * Change the bus mode (open drain/push-pull) of a host.
962  */
963 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
964 {
965  mmc_host_clk_hold(host);
966  host->ios.bus_mode = mode;
967  mmc_set_ios(host);
968  mmc_host_clk_release(host);
969 }
970 
971 /*
972  * Change data bus width of a host.
973  */
974 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
975 {
976  mmc_host_clk_hold(host);
977  host->ios.bus_width = width;
978  mmc_set_ios(host);
979  mmc_host_clk_release(host);
980 }
981 
997 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
998 {
999  const int max_bit = ilog2(MMC_VDD_35_36);
1000  int bit;
1001 
1002  if (vdd < 1650 || vdd > 3600)
1003  return -EINVAL;
1004 
1005  if (vdd >= 1650 && vdd <= 1950)
1006  return ilog2(MMC_VDD_165_195);
1007 
1008  if (low_bits)
1009  vdd -= 1;
1010 
1011  /* Base 2000 mV, step 100 mV, bit's base 8. */
1012  bit = (vdd - 2000) / 100 + 8;
1013  if (bit > max_bit)
1014  return max_bit;
1015  return bit;
1016 }
1017 
1031 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1032 {
1033  u32 mask = 0;
1034 
1035  if (vdd_max < vdd_min)
1036  return 0;
1037 
1038  /* Prefer high bits for the boundary vdd_max values. */
1039  vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1040  if (vdd_max < 0)
1041  return 0;
1042 
1043  /* Prefer low bits for the boundary vdd_min values. */
1044  vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1045  if (vdd_min < 0)
1046  return 0;
1047 
1048  /* Fill the mask, from max bit to min bit. */
1049  while (vdd_max >= vdd_min)
1050  mask |= 1 << vdd_max--;
1051 
1052  return mask;
1053 }
1055 
1056 #ifdef CONFIG_REGULATOR
1057 
1067 int mmc_regulator_get_ocrmask(struct regulator *supply)
1068 {
1069  int result = 0;
1070  int count;
1071  int i;
1072 
1073  count = regulator_count_voltages(supply);
1074  if (count < 0)
1075  return count;
1076 
1077  for (i = 0; i < count; i++) {
1078  int vdd_uV;
1079  int vdd_mV;
1080 
1081  vdd_uV = regulator_list_voltage(supply, i);
1082  if (vdd_uV <= 0)
1083  continue;
1084 
1085  vdd_mV = vdd_uV / 1000;
1086  result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1087  }
1088 
1089  return result;
1090 }
1091 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1092 
1105 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1106  struct regulator *supply,
1107  unsigned short vdd_bit)
1108 {
1109  int result = 0;
1110  int min_uV, max_uV;
1111 
1112  if (vdd_bit) {
1113  int tmp;
1114  int voltage;
1115 
1116  /*
1117  * REVISIT mmc_vddrange_to_ocrmask() may have set some
1118  * bits this regulator doesn't quite support ... don't
1119  * be too picky, most cards and regulators are OK with
1120  * a 0.1V range goof (it's a small error percentage).
1121  */
1122  tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1123  if (tmp == 0) {
1124  min_uV = 1650 * 1000;
1125  max_uV = 1950 * 1000;
1126  } else {
1127  min_uV = 1900 * 1000 + tmp * 100 * 1000;
1128  max_uV = min_uV + 100 * 1000;
1129  }
1130 
1131  /*
1132  * If we're using a fixed/static regulator, don't call
1133  * regulator_set_voltage; it would fail.
1134  */
1135  voltage = regulator_get_voltage(supply);
1136 
1137  if (regulator_count_voltages(supply) == 1)
1138  min_uV = max_uV = voltage;
1139 
1140  if (voltage < 0)
1141  result = voltage;
1142  else if (voltage < min_uV || voltage > max_uV)
1143  result = regulator_set_voltage(supply, min_uV, max_uV);
1144  else
1145  result = 0;
1146 
1147  if (result == 0 && !mmc->regulator_enabled) {
1148  result = regulator_enable(supply);
1149  if (!result)
1150  mmc->regulator_enabled = true;
1151  }
1152  } else if (mmc->regulator_enabled) {
1153  result = regulator_disable(supply);
1154  if (result == 0)
1155  mmc->regulator_enabled = false;
1156  }
1157 
1158  if (result)
1159  dev_err(mmc_dev(mmc),
1160  "could not set regulator OCR (%d)\n", result);
1161  return result;
1162 }
1163 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1164 
1165 int mmc_regulator_get_supply(struct mmc_host *mmc)
1166 {
1167  struct device *dev = mmc_dev(mmc);
1168  struct regulator *supply;
1169  int ret;
1170 
1171  supply = devm_regulator_get(dev, "vmmc");
1172  mmc->supply.vmmc = supply;
1173  mmc->supply.vqmmc = devm_regulator_get(dev, "vqmmc");
1174 
1175  if (IS_ERR(supply))
1176  return PTR_ERR(supply);
1177 
1178  ret = mmc_regulator_get_ocrmask(supply);
1179  if (ret > 0)
1180  mmc->ocr_avail = ret;
1181  else
1182  dev_warn(mmc_dev(mmc), "Failed getting OCR mask: %d\n", ret);
1183 
1184  return 0;
1185 }
1186 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1187 
1188 #endif /* CONFIG_REGULATOR */
1189 
1190 /*
1191  * Mask off any voltages we don't support and select
1192  * the lowest voltage
1193  */
1195 {
1196  int bit;
1197 
1198  ocr &= host->ocr_avail;
1199 
1200  bit = ffs(ocr);
1201  if (bit) {
1202  bit -= 1;
1203 
1204  ocr &= 3 << bit;
1205 
1206  mmc_host_clk_hold(host);
1207  host->ios.vdd = bit;
1208  mmc_set_ios(host);
1209  mmc_host_clk_release(host);
1210  } else {
1211  pr_warning("%s: host doesn't support card's voltages\n",
1212  mmc_hostname(host));
1213  ocr = 0;
1214  }
1215 
1216  return ocr;
1217 }
1218 
1219 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
1220 {
1221  struct mmc_command cmd = {0};
1222  int err = 0;
1223 
1224  BUG_ON(!host);
1225 
1226  /*
1227  * Send CMD11 only if the request is to switch the card to
1228  * 1.8V signalling.
1229  */
1230  if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
1231  cmd.opcode = SD_SWITCH_VOLTAGE;
1232  cmd.arg = 0;
1233  cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1234 
1235  err = mmc_wait_for_cmd(host, &cmd, 0);
1236  if (err)
1237  return err;
1238 
1239  if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1240  return -EIO;
1241  }
1242 
1243  host->ios.signal_voltage = signal_voltage;
1244 
1245  if (host->ops->start_signal_voltage_switch) {
1246  mmc_host_clk_hold(host);
1247  err = host->ops->start_signal_voltage_switch(host, &host->ios);
1248  mmc_host_clk_release(host);
1249  }
1250 
1251  return err;
1252 }
1253 
1254 /*
1255  * Select timing parameters for host.
1256  */
1257 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1258 {
1259  mmc_host_clk_hold(host);
1260  host->ios.timing = timing;
1261  mmc_set_ios(host);
1262  mmc_host_clk_release(host);
1263 }
1264 
1265 /*
1266  * Select appropriate driver type for host.
1267  */
1268 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1269 {
1270  mmc_host_clk_hold(host);
1271  host->ios.drv_type = drv_type;
1272  mmc_set_ios(host);
1273  mmc_host_clk_release(host);
1274 }
1275 
1276 /*
1277  * Apply power to the MMC stack. This is a two-stage process.
1278  * First, we enable power to the card without the clock running.
1279  * We then wait a bit for the power to stabilise. Finally,
1280  * enable the bus drivers and clock to the card.
1281  *
1282  * We must _NOT_ enable the clock prior to power stablising.
1283  *
1284  * If a host does all the power sequencing itself, ignore the
1285  * initial MMC_POWER_UP stage.
1286  */
1287 static void mmc_power_up(struct mmc_host *host)
1288 {
1289  int bit;
1290 
1291  if (host->ios.power_mode == MMC_POWER_ON)
1292  return;
1293 
1294  mmc_host_clk_hold(host);
1295 
1296  /* If ocr is set, we use it */
1297  if (host->ocr)
1298  bit = ffs(host->ocr) - 1;
1299  else
1300  bit = fls(host->ocr_avail) - 1;
1301 
1302  host->ios.vdd = bit;
1303  if (mmc_host_is_spi(host))
1304  host->ios.chip_select = MMC_CS_HIGH;
1305  else
1306  host->ios.chip_select = MMC_CS_DONTCARE;
1307  host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1308  host->ios.power_mode = MMC_POWER_UP;
1309  host->ios.bus_width = MMC_BUS_WIDTH_1;
1310  host->ios.timing = MMC_TIMING_LEGACY;
1311  mmc_set_ios(host);
1312 
1313  /* Set signal voltage to 3.3V */
1315 
1316  /*
1317  * This delay should be sufficient to allow the power supply
1318  * to reach the minimum voltage.
1319  */
1320  mmc_delay(10);
1321 
1322  host->ios.clock = host->f_init;
1323 
1324  host->ios.power_mode = MMC_POWER_ON;
1325  mmc_set_ios(host);
1326 
1327  /*
1328  * This delay must be at least 74 clock sizes, or 1 ms, or the
1329  * time required to reach a stable voltage.
1330  */
1331  mmc_delay(10);
1332 
1333  mmc_host_clk_release(host);
1334 }
1335 
1336 void mmc_power_off(struct mmc_host *host)
1337 {
1338  if (host->ios.power_mode == MMC_POWER_OFF)
1339  return;
1340 
1341  mmc_host_clk_hold(host);
1342 
1343  host->ios.clock = 0;
1344  host->ios.vdd = 0;
1345 
1346 
1347  /*
1348  * Reset ocr mask to be the highest possible voltage supported for
1349  * this mmc host. This value will be used at next power up.
1350  */
1351  host->ocr = 1 << (fls(host->ocr_avail) - 1);
1352 
1353  if (!mmc_host_is_spi(host)) {
1354  host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1355  host->ios.chip_select = MMC_CS_DONTCARE;
1356  }
1357  host->ios.power_mode = MMC_POWER_OFF;
1358  host->ios.bus_width = MMC_BUS_WIDTH_1;
1359  host->ios.timing = MMC_TIMING_LEGACY;
1360  mmc_set_ios(host);
1361 
1362  /*
1363  * Some configurations, such as the 802.11 SDIO card in the OLPC
1364  * XO-1.5, require a short delay after poweroff before the card
1365  * can be successfully turned on again.
1366  */
1367  mmc_delay(1);
1368 
1369  mmc_host_clk_release(host);
1370 }
1371 
1372 /*
1373  * Cleanup when the last reference to the bus operator is dropped.
1374  */
1375 static void __mmc_release_bus(struct mmc_host *host)
1376 {
1377  BUG_ON(!host);
1378  BUG_ON(host->bus_refs);
1379  BUG_ON(!host->bus_dead);
1380 
1381  host->bus_ops = NULL;
1382 }
1383 
1384 /*
1385  * Increase reference count of bus operator
1386  */
1387 static inline void mmc_bus_get(struct mmc_host *host)
1388 {
1389  unsigned long flags;
1390 
1391  spin_lock_irqsave(&host->lock, flags);
1392  host->bus_refs++;
1393  spin_unlock_irqrestore(&host->lock, flags);
1394 }
1395 
1396 /*
1397  * Decrease reference count of bus operator and free it if
1398  * it is the last reference.
1399  */
1400 static inline void mmc_bus_put(struct mmc_host *host)
1401 {
1402  unsigned long flags;
1403 
1404  spin_lock_irqsave(&host->lock, flags);
1405  host->bus_refs--;
1406  if ((host->bus_refs == 0) && host->bus_ops)
1407  __mmc_release_bus(host);
1408  spin_unlock_irqrestore(&host->lock, flags);
1409 }
1410 
1411 /*
1412  * Assign a mmc bus handler to a host. Only one bus handler may control a
1413  * host at any given time.
1414  */
1415 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1416 {
1417  unsigned long flags;
1418 
1419  BUG_ON(!host);
1420  BUG_ON(!ops);
1421 
1422  WARN_ON(!host->claimed);
1423 
1424  spin_lock_irqsave(&host->lock, flags);
1425 
1426  BUG_ON(host->bus_ops);
1427  BUG_ON(host->bus_refs);
1428 
1429  host->bus_ops = ops;
1430  host->bus_refs = 1;
1431  host->bus_dead = 0;
1432 
1433  spin_unlock_irqrestore(&host->lock, flags);
1434 }
1435 
1436 /*
1437  * Remove the current bus handler from a host.
1438  */
1439 void mmc_detach_bus(struct mmc_host *host)
1440 {
1441  unsigned long flags;
1442 
1443  BUG_ON(!host);
1444 
1445  WARN_ON(!host->claimed);
1446  WARN_ON(!host->bus_ops);
1447 
1448  spin_lock_irqsave(&host->lock, flags);
1449 
1450  host->bus_dead = 1;
1451 
1452  spin_unlock_irqrestore(&host->lock, flags);
1453 
1454  mmc_bus_put(host);
1455 }
1456 
1467 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1468 {
1469 #ifdef CONFIG_MMC_DEBUG
1470  unsigned long flags;
1471  spin_lock_irqsave(&host->lock, flags);
1472  WARN_ON(host->removed);
1473  spin_unlock_irqrestore(&host->lock, flags);
1474 #endif
1475  host->detect_change = 1;
1476  mmc_schedule_delayed_work(&host->detect, delay);
1477 }
1478 
1480 
1482 {
1483  unsigned int sz;
1484 
1485  if (is_power_of_2(card->erase_size))
1486  card->erase_shift = ffs(card->erase_size) - 1;
1487  else
1488  card->erase_shift = 0;
1489 
1490  /*
1491  * It is possible to erase an arbitrarily large area of an SD or MMC
1492  * card. That is not desirable because it can take a long time
1493  * (minutes) potentially delaying more important I/O, and also the
1494  * timeout calculations become increasingly hugely over-estimated.
1495  * Consequently, 'pref_erase' is defined as a guide to limit erases
1496  * to that size and alignment.
1497  *
1498  * For SD cards that define Allocation Unit size, limit erases to one
1499  * Allocation Unit at a time. For MMC cards that define High Capacity
1500  * Erase Size, whether it is switched on or not, limit to that size.
1501  * Otherwise just have a stab at a good value. For modern cards it
1502  * will end up being 4MiB. Note that if the value is too small, it
1503  * can end up taking longer to erase.
1504  */
1505  if (mmc_card_sd(card) && card->ssr.au) {
1506  card->pref_erase = card->ssr.au;
1507  card->erase_shift = ffs(card->ssr.au) - 1;
1508  } else if (card->ext_csd.hc_erase_size) {
1509  card->pref_erase = card->ext_csd.hc_erase_size;
1510  } else {
1511  sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1512  if (sz < 128)
1513  card->pref_erase = 512 * 1024 / 512;
1514  else if (sz < 512)
1515  card->pref_erase = 1024 * 1024 / 512;
1516  else if (sz < 1024)
1517  card->pref_erase = 2 * 1024 * 1024 / 512;
1518  else
1519  card->pref_erase = 4 * 1024 * 1024 / 512;
1520  if (card->pref_erase < card->erase_size)
1521  card->pref_erase = card->erase_size;
1522  else {
1523  sz = card->pref_erase % card->erase_size;
1524  if (sz)
1525  card->pref_erase += card->erase_size - sz;
1526  }
1527  }
1528 }
1529 
1530 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1531  unsigned int arg, unsigned int qty)
1532 {
1533  unsigned int erase_timeout;
1534 
1535  if (arg == MMC_DISCARD_ARG ||
1536  (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1537  erase_timeout = card->ext_csd.trim_timeout;
1538  } else if (card->ext_csd.erase_group_def & 1) {
1539  /* High Capacity Erase Group Size uses HC timeouts */
1540  if (arg == MMC_TRIM_ARG)
1541  erase_timeout = card->ext_csd.trim_timeout;
1542  else
1543  erase_timeout = card->ext_csd.hc_erase_timeout;
1544  } else {
1545  /* CSD Erase Group Size uses write timeout */
1546  unsigned int mult = (10 << card->csd.r2w_factor);
1547  unsigned int timeout_clks = card->csd.tacc_clks * mult;
1548  unsigned int timeout_us;
1549 
1550  /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1551  if (card->csd.tacc_ns < 1000000)
1552  timeout_us = (card->csd.tacc_ns * mult) / 1000;
1553  else
1554  timeout_us = (card->csd.tacc_ns / 1000) * mult;
1555 
1556  /*
1557  * ios.clock is only a target. The real clock rate might be
1558  * less but not that much less, so fudge it by multiplying by 2.
1559  */
1560  timeout_clks <<= 1;
1561  timeout_us += (timeout_clks * 1000) /
1562  (mmc_host_clk_rate(card->host) / 1000);
1563 
1564  erase_timeout = timeout_us / 1000;
1565 
1566  /*
1567  * Theoretically, the calculation could underflow so round up
1568  * to 1ms in that case.
1569  */
1570  if (!erase_timeout)
1571  erase_timeout = 1;
1572  }
1573 
1574  /* Multiplier for secure operations */
1575  if (arg & MMC_SECURE_ARGS) {
1576  if (arg == MMC_SECURE_ERASE_ARG)
1577  erase_timeout *= card->ext_csd.sec_erase_mult;
1578  else
1579  erase_timeout *= card->ext_csd.sec_trim_mult;
1580  }
1581 
1582  erase_timeout *= qty;
1583 
1584  /*
1585  * Ensure at least a 1 second timeout for SPI as per
1586  * 'mmc_set_data_timeout()'
1587  */
1588  if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1589  erase_timeout = 1000;
1590 
1591  return erase_timeout;
1592 }
1593 
1594 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1595  unsigned int arg,
1596  unsigned int qty)
1597 {
1598  unsigned int erase_timeout;
1599 
1600  if (card->ssr.erase_timeout) {
1601  /* Erase timeout specified in SD Status Register (SSR) */
1602  erase_timeout = card->ssr.erase_timeout * qty +
1603  card->ssr.erase_offset;
1604  } else {
1605  /*
1606  * Erase timeout not specified in SD Status Register (SSR) so
1607  * use 250ms per write block.
1608  */
1609  erase_timeout = 250 * qty;
1610  }
1611 
1612  /* Must not be less than 1 second */
1613  if (erase_timeout < 1000)
1614  erase_timeout = 1000;
1615 
1616  return erase_timeout;
1617 }
1618 
1619 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1620  unsigned int arg,
1621  unsigned int qty)
1622 {
1623  if (mmc_card_sd(card))
1624  return mmc_sd_erase_timeout(card, arg, qty);
1625  else
1626  return mmc_mmc_erase_timeout(card, arg, qty);
1627 }
1628 
1629 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1630  unsigned int to, unsigned int arg)
1631 {
1632  struct mmc_command cmd = {0};
1633  unsigned int qty = 0;
1634  int err;
1635 
1636  /*
1637  * qty is used to calculate the erase timeout which depends on how many
1638  * erase groups (or allocation units in SD terminology) are affected.
1639  * We count erasing part of an erase group as one erase group.
1640  * For SD, the allocation units are always a power of 2. For MMC, the
1641  * erase group size is almost certainly also power of 2, but it does not
1642  * seem to insist on that in the JEDEC standard, so we fall back to
1643  * division in that case. SD may not specify an allocation unit size,
1644  * in which case the timeout is based on the number of write blocks.
1645  *
1646  * Note that the timeout for secure trim 2 will only be correct if the
1647  * number of erase groups specified is the same as the total of all
1648  * preceding secure trim 1 commands. Since the power may have been
1649  * lost since the secure trim 1 commands occurred, it is generally
1650  * impossible to calculate the secure trim 2 timeout correctly.
1651  */
1652  if (card->erase_shift)
1653  qty += ((to >> card->erase_shift) -
1654  (from >> card->erase_shift)) + 1;
1655  else if (mmc_card_sd(card))
1656  qty += to - from + 1;
1657  else
1658  qty += ((to / card->erase_size) -
1659  (from / card->erase_size)) + 1;
1660 
1661  if (!mmc_card_blockaddr(card)) {
1662  from <<= 9;
1663  to <<= 9;
1664  }
1665 
1666  if (mmc_card_sd(card))
1668  else
1670  cmd.arg = from;
1672  err = mmc_wait_for_cmd(card->host, &cmd, 0);
1673  if (err) {
1674  pr_err("mmc_erase: group start error %d, "
1675  "status %#x\n", err, cmd.resp[0]);
1676  err = -EIO;
1677  goto out;
1678  }
1679 
1680  memset(&cmd, 0, sizeof(struct mmc_command));
1681  if (mmc_card_sd(card))
1683  else
1685  cmd.arg = to;
1687  err = mmc_wait_for_cmd(card->host, &cmd, 0);
1688  if (err) {
1689  pr_err("mmc_erase: group end error %d, status %#x\n",
1690  err, cmd.resp[0]);
1691  err = -EIO;
1692  goto out;
1693  }
1694 
1695  memset(&cmd, 0, sizeof(struct mmc_command));
1696  cmd.opcode = MMC_ERASE;
1697  cmd.arg = arg;
1699  cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1700  err = mmc_wait_for_cmd(card->host, &cmd, 0);
1701  if (err) {
1702  pr_err("mmc_erase: erase error %d, status %#x\n",
1703  err, cmd.resp[0]);
1704  err = -EIO;
1705  goto out;
1706  }
1707 
1708  if (mmc_host_is_spi(card->host))
1709  goto out;
1710 
1711  do {
1712  memset(&cmd, 0, sizeof(struct mmc_command));
1713  cmd.opcode = MMC_SEND_STATUS;
1714  cmd.arg = card->rca << 16;
1715  cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1716  /* Do not retry else we can't see errors */
1717  err = mmc_wait_for_cmd(card->host, &cmd, 0);
1718  if (err || (cmd.resp[0] & 0xFDF92000)) {
1719  pr_err("error %d requesting status %#x\n",
1720  err, cmd.resp[0]);
1721  err = -EIO;
1722  goto out;
1723  }
1724  } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1725  R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
1726 out:
1727  return err;
1728 }
1729 
1739 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1740  unsigned int arg)
1741 {
1742  unsigned int rem, to = from + nr;
1743 
1744  if (!(card->host->caps & MMC_CAP_ERASE) ||
1745  !(card->csd.cmdclass & CCC_ERASE))
1746  return -EOPNOTSUPP;
1747 
1748  if (!card->erase_size)
1749  return -EOPNOTSUPP;
1750 
1751  if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1752  return -EOPNOTSUPP;
1753 
1754  if ((arg & MMC_SECURE_ARGS) &&
1755  !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1756  return -EOPNOTSUPP;
1757 
1758  if ((arg & MMC_TRIM_ARGS) &&
1759  !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1760  return -EOPNOTSUPP;
1761 
1762  if (arg == MMC_SECURE_ERASE_ARG) {
1763  if (from % card->erase_size || nr % card->erase_size)
1764  return -EINVAL;
1765  }
1766 
1767  if (arg == MMC_ERASE_ARG) {
1768  rem = from % card->erase_size;
1769  if (rem) {
1770  rem = card->erase_size - rem;
1771  from += rem;
1772  if (nr > rem)
1773  nr -= rem;
1774  else
1775  return 0;
1776  }
1777  rem = nr % card->erase_size;
1778  if (rem)
1779  nr -= rem;
1780  }
1781 
1782  if (nr == 0)
1783  return 0;
1784 
1785  to = from + nr;
1786 
1787  if (to <= from)
1788  return -EINVAL;
1789 
1790  /* 'from' and 'to' are inclusive */
1791  to -= 1;
1792 
1793  return mmc_do_erase(card, from, to, arg);
1794 }
1796 
1797 int mmc_can_erase(struct mmc_card *card)
1798 {
1799  if ((card->host->caps & MMC_CAP_ERASE) &&
1800  (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1801  return 1;
1802  return 0;
1803 }
1805 
1806 int mmc_can_trim(struct mmc_card *card)
1807 {
1808  if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1809  return 1;
1810  return 0;
1811 }
1813 
1814 int mmc_can_discard(struct mmc_card *card)
1815 {
1816  /*
1817  * As there's no way to detect the discard support bit at v4.5
1818  * use the s/w feature support filed.
1819  */
1820  if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1821  return 1;
1822  return 0;
1823 }
1825 
1826 int mmc_can_sanitize(struct mmc_card *card)
1827 {
1828  if (!mmc_can_trim(card) && !mmc_can_erase(card))
1829  return 0;
1830  if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1831  return 1;
1832  return 0;
1833 }
1835 
1837 {
1838  if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1839  return 1;
1840  return 0;
1841 }
1843 
1844 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1845  unsigned int nr)
1846 {
1847  if (!card->erase_size)
1848  return 0;
1849  if (from % card->erase_size || nr % card->erase_size)
1850  return 0;
1851  return 1;
1852 }
1854 
1855 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1856  unsigned int arg)
1857 {
1858  struct mmc_host *host = card->host;
1859  unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
1860  unsigned int last_timeout = 0;
1861 
1862  if (card->erase_shift)
1863  max_qty = UINT_MAX >> card->erase_shift;
1864  else if (mmc_card_sd(card))
1865  max_qty = UINT_MAX;
1866  else
1867  max_qty = UINT_MAX / card->erase_size;
1868 
1869  /* Find the largest qty with an OK timeout */
1870  do {
1871  y = 0;
1872  for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1873  timeout = mmc_erase_timeout(card, arg, qty + x);
1874  if (timeout > host->max_discard_to)
1875  break;
1876  if (timeout < last_timeout)
1877  break;
1878  last_timeout = timeout;
1879  y = x;
1880  }
1881  qty += y;
1882  } while (y);
1883 
1884  if (!qty)
1885  return 0;
1886 
1887  if (qty == 1)
1888  return 1;
1889 
1890  /* Convert qty to sectors */
1891  if (card->erase_shift)
1892  max_discard = --qty << card->erase_shift;
1893  else if (mmc_card_sd(card))
1894  max_discard = qty;
1895  else
1896  max_discard = --qty * card->erase_size;
1897 
1898  return max_discard;
1899 }
1900 
1901 unsigned int mmc_calc_max_discard(struct mmc_card *card)
1902 {
1903  struct mmc_host *host = card->host;
1904  unsigned int max_discard, max_trim;
1905 
1906  if (!host->max_discard_to)
1907  return UINT_MAX;
1908 
1909  /*
1910  * Without erase_group_def set, MMC erase timeout depends on clock
1911  * frequence which can change. In that case, the best choice is
1912  * just the preferred erase size.
1913  */
1914  if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1915  return card->pref_erase;
1916 
1917  max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1918  if (mmc_can_trim(card)) {
1919  max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1920  if (max_trim < max_discard)
1921  max_discard = max_trim;
1922  } else if (max_discard < card->erase_size) {
1923  max_discard = 0;
1924  }
1925  pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1926  mmc_hostname(host), max_discard, host->max_discard_to);
1927  return max_discard;
1928 }
1930 
1931 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1932 {
1933  struct mmc_command cmd = {0};
1934 
1935  if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1936  return 0;
1937 
1938  cmd.opcode = MMC_SET_BLOCKLEN;
1939  cmd.arg = blocklen;
1941  return mmc_wait_for_cmd(card->host, &cmd, 5);
1942 }
1944 
1945 static void mmc_hw_reset_for_init(struct mmc_host *host)
1946 {
1947  if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1948  return;
1949  mmc_host_clk_hold(host);
1950  host->ops->hw_reset(host);
1951  mmc_host_clk_release(host);
1952 }
1953 
1954 int mmc_can_reset(struct mmc_card *card)
1955 {
1956  u8 rst_n_function;
1957 
1958  if (!mmc_card_mmc(card))
1959  return 0;
1960  rst_n_function = card->ext_csd.rst_n_function;
1961  if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
1962  return 0;
1963  return 1;
1964 }
1966 
1967 static int mmc_do_hw_reset(struct mmc_host *host, int check)
1968 {
1969  struct mmc_card *card = host->card;
1970 
1971  if (!host->bus_ops->power_restore)
1972  return -EOPNOTSUPP;
1973 
1974  if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1975  return -EOPNOTSUPP;
1976 
1977  if (!card)
1978  return -EINVAL;
1979 
1980  if (!mmc_can_reset(card))
1981  return -EOPNOTSUPP;
1982 
1983  mmc_host_clk_hold(host);
1984  mmc_set_clock(host, host->f_init);
1985 
1986  host->ops->hw_reset(host);
1987 
1988  /* If the reset has happened, then a status command will fail */
1989  if (check) {
1990  struct mmc_command cmd = {0};
1991  int err;
1992 
1993  cmd.opcode = MMC_SEND_STATUS;
1994  if (!mmc_host_is_spi(card->host))
1995  cmd.arg = card->rca << 16;
1997  err = mmc_wait_for_cmd(card->host, &cmd, 0);
1998  if (!err) {
1999  mmc_host_clk_release(host);
2000  return -ENOSYS;
2001  }
2002  }
2003 
2004  host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
2005  if (mmc_host_is_spi(host)) {
2006  host->ios.chip_select = MMC_CS_HIGH;
2007  host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
2008  } else {
2009  host->ios.chip_select = MMC_CS_DONTCARE;
2010  host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
2011  }
2012  host->ios.bus_width = MMC_BUS_WIDTH_1;
2013  host->ios.timing = MMC_TIMING_LEGACY;
2014  mmc_set_ios(host);
2015 
2016  mmc_host_clk_release(host);
2017 
2018  return host->bus_ops->power_restore(host);
2019 }
2020 
2021 int mmc_hw_reset(struct mmc_host *host)
2022 {
2023  return mmc_do_hw_reset(host, 0);
2024 }
2026 
2027 int mmc_hw_reset_check(struct mmc_host *host)
2028 {
2029  return mmc_do_hw_reset(host, 1);
2030 }
2032 
2033 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2034 {
2035  host->f_init = freq;
2036 
2037 #ifdef CONFIG_MMC_DEBUG
2038  pr_info("%s: %s: trying to init card at %u Hz\n",
2039  mmc_hostname(host), __func__, host->f_init);
2040 #endif
2041  mmc_power_up(host);
2042 
2043  /*
2044  * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2045  * do a hardware reset if possible.
2046  */
2047  mmc_hw_reset_for_init(host);
2048 
2049  /*
2050  * sdio_reset sends CMD52 to reset card. Since we do not know
2051  * if the card is being re-initialized, just send it. CMD52
2052  * should be ignored by SD/eMMC cards.
2053  */
2054  sdio_reset(host);
2055  mmc_go_idle(host);
2056 
2057  mmc_send_if_cond(host, host->ocr_avail);
2058 
2059  /* Order's important: probe SDIO, then SD, then MMC */
2060  if (!mmc_attach_sdio(host))
2061  return 0;
2062  if (!mmc_attach_sd(host))
2063  return 0;
2064  if (!mmc_attach_mmc(host))
2065  return 0;
2066 
2067  mmc_power_off(host);
2068  return -EIO;
2069 }
2070 
2072 {
2073  int ret;
2074 
2075  if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
2076  return 0;
2077 
2078  if (!host->card || mmc_card_removed(host->card))
2079  return 1;
2080 
2081  ret = host->bus_ops->alive(host);
2082  if (ret) {
2083  mmc_card_set_removed(host->card);
2084  pr_debug("%s: card remove detected\n", mmc_hostname(host));
2085  }
2086 
2087  return ret;
2088 }
2089 
2091 {
2092  struct mmc_card *card = host->card;
2093  int ret;
2094 
2095  WARN_ON(!host->claimed);
2096 
2097  if (!card)
2098  return 1;
2099 
2100  ret = mmc_card_removed(card);
2101  /*
2102  * The card will be considered unchanged unless we have been asked to
2103  * detect a change or host requires polling to provide card detection.
2104  */
2105  if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) &&
2106  !(host->caps2 & MMC_CAP2_DETECT_ON_ERR))
2107  return ret;
2108 
2109  host->detect_change = 0;
2110  if (!ret) {
2111  ret = _mmc_detect_card_removed(host);
2112  if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) {
2113  /*
2114  * Schedule a detect work as soon as possible to let a
2115  * rescan handle the card removal.
2116  */
2117  cancel_delayed_work(&host->detect);
2118  mmc_detect_change(host, 0);
2119  }
2120  }
2121 
2122  return ret;
2123 }
2125 
2127 {
2128  struct mmc_host *host =
2129  container_of(work, struct mmc_host, detect.work);
2130  int i;
2131 
2132  if (host->rescan_disable)
2133  return;
2134 
2135  /* If there is a non-removable card registered, only scan once */
2136  if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
2137  return;
2138  host->rescan_entered = 1;
2139 
2140  mmc_bus_get(host);
2141 
2142  /*
2143  * if there is a _removable_ card registered, check whether it is
2144  * still present
2145  */
2146  if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
2147  && !(host->caps & MMC_CAP_NONREMOVABLE))
2148  host->bus_ops->detect(host);
2149 
2150  host->detect_change = 0;
2151 
2152  /*
2153  * Let mmc_bus_put() free the bus/bus_ops if we've found that
2154  * the card is no longer present.
2155  */
2156  mmc_bus_put(host);
2157  mmc_bus_get(host);
2158 
2159  /* if there still is a card present, stop here */
2160  if (host->bus_ops != NULL) {
2161  mmc_bus_put(host);
2162  goto out;
2163  }
2164 
2165  /*
2166  * Only we can add a new handler, so it's safe to
2167  * release the lock here.
2168  */
2169  mmc_bus_put(host);
2170 
2171  if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
2172  mmc_claim_host(host);
2173  mmc_power_off(host);
2174  mmc_release_host(host);
2175  goto out;
2176  }
2177 
2178  mmc_claim_host(host);
2179  for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2180  if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2181  break;
2182  if (freqs[i] <= host->f_min)
2183  break;
2184  }
2185  mmc_release_host(host);
2186 
2187  out:
2188  if (host->caps & MMC_CAP_NEEDS_POLL)
2189  mmc_schedule_delayed_work(&host->detect, HZ);
2190 }
2191 
2192 void mmc_start_host(struct mmc_host *host)
2193 {
2194  host->f_init = max(freqs[0], host->f_min);
2195  host->rescan_disable = 0;
2196  mmc_power_up(host);
2197  mmc_detect_change(host, 0);
2198 }
2199 
2200 void mmc_stop_host(struct mmc_host *host)
2201 {
2202 #ifdef CONFIG_MMC_DEBUG
2203  unsigned long flags;
2204  spin_lock_irqsave(&host->lock, flags);
2205  host->removed = 1;
2206  spin_unlock_irqrestore(&host->lock, flags);
2207 #endif
2208 
2209  host->rescan_disable = 1;
2211  mmc_flush_scheduled_work();
2212 
2213  /* clear pm flags now and let card drivers set them as needed */
2214  host->pm_flags = 0;
2215 
2216  mmc_bus_get(host);
2217  if (host->bus_ops && !host->bus_dead) {
2218  /* Calling bus_ops->remove() with a claimed host can deadlock */
2219  if (host->bus_ops->remove)
2220  host->bus_ops->remove(host);
2221 
2222  mmc_claim_host(host);
2223  mmc_detach_bus(host);
2224  mmc_power_off(host);
2225  mmc_release_host(host);
2226  mmc_bus_put(host);
2227  return;
2228  }
2229  mmc_bus_put(host);
2230 
2231  BUG_ON(host->card);
2232 
2233  mmc_power_off(host);
2234 }
2235 
2237 {
2238  int ret = 0;
2239 
2240 #ifdef CONFIG_MMC_DEBUG
2241  pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2242 #endif
2243 
2244  mmc_bus_get(host);
2245 
2246  if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2247  mmc_bus_put(host);
2248  return -EINVAL;
2249  }
2250 
2251  if (host->bus_ops->power_save)
2252  ret = host->bus_ops->power_save(host);
2253 
2254  mmc_bus_put(host);
2255 
2256  mmc_power_off(host);
2257 
2258  return ret;
2259 }
2261 
2263 {
2264  int ret;
2265 
2266 #ifdef CONFIG_MMC_DEBUG
2267  pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2268 #endif
2269 
2270  mmc_bus_get(host);
2271 
2272  if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2273  mmc_bus_put(host);
2274  return -EINVAL;
2275  }
2276 
2277  mmc_power_up(host);
2278  ret = host->bus_ops->power_restore(host);
2279 
2280  mmc_bus_put(host);
2281 
2282  return ret;
2283 }
2285 
2286 int mmc_card_awake(struct mmc_host *host)
2287 {
2288  int err = -ENOSYS;
2289 
2290  if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2291  return 0;
2292 
2293  mmc_bus_get(host);
2294 
2295  if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
2296  err = host->bus_ops->awake(host);
2297 
2298  mmc_bus_put(host);
2299 
2300  return err;
2301 }
2303 
2304 int mmc_card_sleep(struct mmc_host *host)
2305 {
2306  int err = -ENOSYS;
2307 
2308  if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2309  return 0;
2310 
2311  mmc_bus_get(host);
2312 
2313  if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
2314  err = host->bus_ops->sleep(host);
2315 
2316  mmc_bus_put(host);
2317 
2318  return err;
2319 }
2321 
2322 int mmc_card_can_sleep(struct mmc_host *host)
2323 {
2324  struct mmc_card *card = host->card;
2325 
2326  if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
2327  return 1;
2328  return 0;
2329 }
2331 
2332 /*
2333  * Flush the cache to the non-volatile storage.
2334  */
2335 int mmc_flush_cache(struct mmc_card *card)
2336 {
2337  struct mmc_host *host = card->host;
2338  int err = 0;
2339 
2340  if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
2341  return err;
2342 
2343  if (mmc_card_mmc(card) &&
2344  (card->ext_csd.cache_size > 0) &&
2345  (card->ext_csd.cache_ctrl & 1)) {
2346  err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2347  EXT_CSD_FLUSH_CACHE, 1, 0);
2348  if (err)
2349  pr_err("%s: cache flush error %d\n",
2350  mmc_hostname(card->host), err);
2351  }
2352 
2353  return err;
2354 }
2356 
2357 /*
2358  * Turn the cache ON/OFF.
2359  * Turning the cache OFF shall trigger flushing of the data
2360  * to the non-volatile storage.
2361  */
2362 int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
2363 {
2364  struct mmc_card *card = host->card;
2365  unsigned int timeout;
2366  int err = 0;
2367 
2368  if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
2369  mmc_card_is_removable(host))
2370  return err;
2371 
2372  mmc_claim_host(host);
2373  if (card && mmc_card_mmc(card) &&
2374  (card->ext_csd.cache_size > 0)) {
2375  enable = !!enable;
2376 
2377  if (card->ext_csd.cache_ctrl ^ enable) {
2378  timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
2379  err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2380  EXT_CSD_CACHE_CTRL, enable, timeout);
2381  if (err)
2382  pr_err("%s: cache %s error %d\n",
2383  mmc_hostname(card->host),
2384  enable ? "on" : "off",
2385  err);
2386  else
2387  card->ext_csd.cache_ctrl = enable;
2388  }
2389  }
2390  mmc_release_host(host);
2391 
2392  return err;
2393 }
2395 
2396 #ifdef CONFIG_PM
2397 
2402 int mmc_suspend_host(struct mmc_host *host)
2403 {
2404  int err = 0;
2405 
2406  cancel_delayed_work(&host->detect);
2407  mmc_flush_scheduled_work();
2408 
2409  err = mmc_cache_ctrl(host, 0);
2410  if (err)
2411  goto out;
2412 
2413  mmc_bus_get(host);
2414  if (host->bus_ops && !host->bus_dead) {
2415  if (host->bus_ops->suspend) {
2416  if (mmc_card_doing_bkops(host->card)) {
2417  err = mmc_stop_bkops(host->card);
2418  if (err)
2419  goto out;
2420  }
2421  err = host->bus_ops->suspend(host);
2422  }
2423 
2424  if (err == -ENOSYS || !host->bus_ops->resume) {
2425  /*
2426  * We simply "remove" the card in this case.
2427  * It will be redetected on resume. (Calling
2428  * bus_ops->remove() with a claimed host can
2429  * deadlock.)
2430  */
2431  if (host->bus_ops->remove)
2432  host->bus_ops->remove(host);
2433  mmc_claim_host(host);
2434  mmc_detach_bus(host);
2435  mmc_power_off(host);
2436  mmc_release_host(host);
2437  host->pm_flags = 0;
2438  err = 0;
2439  }
2440  }
2441  mmc_bus_put(host);
2442 
2443  if (!err && !mmc_card_keep_power(host))
2444  mmc_power_off(host);
2445 
2446 out:
2447  return err;
2448 }
2449 
2451 
2456 int mmc_resume_host(struct mmc_host *host)
2457 {
2458  int err = 0;
2459 
2460  mmc_bus_get(host);
2461  if (host->bus_ops && !host->bus_dead) {
2462  if (!mmc_card_keep_power(host)) {
2463  mmc_power_up(host);
2464  mmc_select_voltage(host, host->ocr);
2465  /*
2466  * Tell runtime PM core we just powered up the card,
2467  * since it still believes the card is powered off.
2468  * Note that currently runtime PM is only enabled
2469  * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
2470  */
2471  if (mmc_card_sdio(host->card) &&
2472  (host->caps & MMC_CAP_POWER_OFF_CARD)) {
2473  pm_runtime_disable(&host->card->dev);
2474  pm_runtime_set_active(&host->card->dev);
2475  pm_runtime_enable(&host->card->dev);
2476  }
2477  }
2478  BUG_ON(!host->bus_ops->resume);
2479  err = host->bus_ops->resume(host);
2480  if (err) {
2481  pr_warning("%s: error %d during resume "
2482  "(card was removed?)\n",
2483  mmc_hostname(host), err);
2484  err = 0;
2485  }
2486  }
2487  host->pm_flags &= ~MMC_PM_KEEP_POWER;
2488  mmc_bus_put(host);
2489 
2490  return err;
2491 }
2493 
2494 /* Do the card removal on suspend if card is assumed removeable
2495  * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2496  to sync the card.
2497 */
2498 int mmc_pm_notify(struct notifier_block *notify_block,
2499  unsigned long mode, void *unused)
2500 {
2501  struct mmc_host *host = container_of(
2502  notify_block, struct mmc_host, pm_notify);
2503  unsigned long flags;
2504  int err = 0;
2505 
2506  switch (mode) {
2508  case PM_SUSPEND_PREPARE:
2509  if (host->card && mmc_card_mmc(host->card) &&
2510  mmc_card_doing_bkops(host->card)) {
2511  err = mmc_stop_bkops(host->card);
2512  if (err) {
2513  pr_err("%s: didn't stop bkops\n",
2514  mmc_hostname(host));
2515  return err;
2516  }
2518  }
2519 
2520  spin_lock_irqsave(&host->lock, flags);
2521  host->rescan_disable = 1;
2522  spin_unlock_irqrestore(&host->lock, flags);
2524 
2525  if (!host->bus_ops || host->bus_ops->suspend)
2526  break;
2527 
2528  /* Calling bus_ops->remove() with a claimed host can deadlock */
2529  if (host->bus_ops->remove)
2530  host->bus_ops->remove(host);
2531 
2532  mmc_claim_host(host);
2533  mmc_detach_bus(host);
2534  mmc_power_off(host);
2535  mmc_release_host(host);
2536  host->pm_flags = 0;
2537  break;
2538 
2539  case PM_POST_SUSPEND:
2540  case PM_POST_HIBERNATION:
2541  case PM_POST_RESTORE:
2542 
2543  spin_lock_irqsave(&host->lock, flags);
2544  host->rescan_disable = 0;
2545  spin_unlock_irqrestore(&host->lock, flags);
2546  mmc_detect_change(host, 0);
2547 
2548  }
2549 
2550  return 0;
2551 }
2552 #endif
2553 
2554 static int __init mmc_init(void)
2555 {
2556  int ret;
2557 
2558  workqueue = alloc_ordered_workqueue("kmmcd", 0);
2559  if (!workqueue)
2560  return -ENOMEM;
2561 
2562  ret = mmc_register_bus();
2563  if (ret)
2564  goto destroy_workqueue;
2565 
2566  ret = mmc_register_host_class();
2567  if (ret)
2568  goto unregister_bus;
2569 
2570  ret = sdio_register_bus();
2571  if (ret)
2572  goto unregister_host_class;
2573 
2574  return 0;
2575 
2576 unregister_host_class:
2578 unregister_bus:
2581  destroy_workqueue(workqueue);
2582 
2583  return ret;
2584 }
2585 
2586 static void __exit mmc_exit(void)
2587 {
2591  destroy_workqueue(workqueue);
2592 }
2593 
2594 subsys_initcall(mmc_init);
2595 module_exit(mmc_exit);
2596 
2597 MODULE_LICENSE("GPL");