21 #include <linux/module.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
29 #include <linux/kdev_t.h>
35 #include <linux/capability.h>
44 #include <asm/uaccess.h>
49 #ifdef MODULE_PARAM_PREFIX
50 #undef MODULE_PARAM_PREFIX
52 #define MODULE_PARAM_PREFIX "mmcblk."
54 #define INAND_CMD38_ARG_EXT_CSD 113
55 #define INAND_CMD38_ARG_ERASE 0x00
56 #define INAND_CMD38_ARG_TRIM 0x01
57 #define INAND_CMD38_ARG_SECERASE 0x80
58 #define INAND_CMD38_ARG_SECTRIM1 0x81
59 #define INAND_CMD38_ARG_SECTRIM2 0x88
67 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
73 static int max_devices;
89 #define MMC_BLK_CMD23 (1 << 0)
90 #define MMC_BLK_REL_WR (1 << 1)
97 #define MMC_BLK_READ BIT(0)
98 #define MMC_BLK_WRITE BIT(1)
99 #define MMC_BLK_DISCARD BIT(2)
100 #define MMC_BLK_SECDISCARD BIT(3)
134 md = disk->private_data;
135 if (md && md->
usage == 0)
144 static inline int mmc_get_devidx(
struct gendisk *disk)
146 int devmaj =
MAJOR(disk_devt(disk));
147 int devidx =
MINOR(disk_devt(disk)) / perdev_minors;
150 devidx = disk->first_minor / perdev_minors;
158 if (md->
usage == 0) {
159 int devidx = mmc_get_devidx(md->
disk);
174 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
196 if (kstrtoul(buf, 0, &
set))
202 md = mmc_blk_get(dev_to_disk(dev));
203 card = md->
queue.card;
205 mmc_claim_host(card->
host);
212 pr_err(
"%s: Locking boot partition ro until next power on failed: %d\n", md->
disk->disk_name, ret);
219 pr_info(
"%s: Locking boot partition ro until next power on\n",
220 md->
disk->disk_name);
225 pr_info(
"%s: Locking boot partition ro until next power on\n", part_md->
disk->disk_name);
238 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
241 get_disk_ro(dev_to_disk(dev)) ^
248 const char *buf,
size_t count)
252 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
287 static int mmc_blk_release(
struct gendisk *disk,
fmode_t mode)
380 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
382 return PTR_ERR(idata);
384 md = mmc_blk_get(bdev->
bd_disk);
390 card = md->
queue.card;
397 cmd.
arg = idata->
ic.arg;
408 if (idata->
ic.write_flag)
417 if (idata->
ic.data_timeout_ns)
438 mmc_claim_host(card->
host);
440 if (idata->
ic.is_acmd) {
450 __func__, cmd.
error);
456 __func__, data.
error);
465 if (idata->
ic.postsleep_min_us)
473 if (!idata->
ic.write_flag) {
474 if (
copy_to_user((
void __user *)(
unsigned long) idata->
ic.data_ptr,
493 unsigned int cmd,
unsigned long arg)
497 ret = mmc_blk_ioctl_cmd(bdev, (
struct mmc_ioc_cmd __user *)arg);
503 unsigned int cmd,
unsigned long arg)
505 return mmc_blk_ioctl(bdev, mode, cmd, (
unsigned long) compat_ptr(arg));
509 static const struct block_device_operations mmc_bdops = {
510 .open = mmc_blk_open,
511 .release = mmc_blk_release,
512 .getgeo = mmc_blk_getgeo,
514 .ioctl = mmc_blk_ioctl,
516 .compat_ioctl = mmc_blk_compat_ioctl,
520 static inline int mmc_blk_part_switch(
struct mmc_card *card,
530 u8 part_config = card->
ext_csd.part_config;
541 card->
ext_csd.part_config = part_config;
548 static u32 mmc_sd_num_wr_blocks(
struct mmc_card *card)
561 cmd.
arg = card->
rca << 16;
594 result =
ntohl(*blocks);
612 *status = cmd.
resp[0];
623 cmd.
arg = card->
rca << 16;
627 *status = cmd.
resp[0];
631 #define ERR_NOMEDIUM 3
634 #define ERR_CONTINUE 0
637 bool status_valid,
u32 status)
642 pr_err(
"%s: %s sending %s command, card status %#x\n",
643 req->rq_disk->disk_name,
"response CRC error",
648 pr_err(
"%s: %s sending %s command, card status %#x\n",
649 req->rq_disk->disk_name,
"timed out", name, status);
668 pr_err(
"%s: unknown error %d sending read/write command, card status %#x\n",
669 req->rq_disk->disk_name, error, status);
695 bool prev_cmd_status_valid =
true;
707 for (retry = 2; retry >= 0; retry--) {
708 err = get_card_status(card, &status, 0);
712 prev_cmd_status_valid =
false;
713 pr_err(
"%s: error %d sending status command, %sing\n",
714 req->rq_disk->disk_name, err, retry ?
"retry" :
"abort");
727 (brq->
stop.resp[0] & R1_CARD_ECC_FAILED) ||
728 (brq->
cmd.resp[0] & R1_CARD_ECC_FAILED))
737 err = send_stop(card, &stop_status);
739 pr_err(
"%s: error %d sending stop command\n",
740 req->rq_disk->disk_name, err);
748 if (stop_status & R1_CARD_ECC_FAILED)
754 return mmc_blk_cmd_error(req,
"SET_BLOCK_COUNT", brq->
sbc.error,
755 prev_cmd_status_valid, status);
759 return mmc_blk_cmd_error(req,
"r/w cmd", brq->
cmd.error,
760 prev_cmd_status_valid, status);
763 if (!brq->
stop.error)
767 pr_err(
"%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
768 req->rq_disk->disk_name, brq->
stop.error,
769 brq->
cmd.resp[0], status);
776 brq->
stop.resp[0] = stop_status;
798 part_err = mmc_blk_part_switch(host->
card, md);
810 static inline void mmc_blk_reset_success(
struct mmc_blk_data *md,
int type)
815 static int mmc_blk_issue_discard_rq(
struct mmc_queue *mq,
struct request *req)
827 from = blk_rq_pos(req);
828 nr = blk_rq_sectors(req);
849 if (err == -
EIO && !mmc_blk_reset(md, card->
host, type))
852 mmc_blk_reset_success(md, type);
858 static int mmc_blk_issue_secdiscard_rq(
struct mmc_queue *mq,
863 unsigned int from,
nr,
arg, trim_arg, erase_arg;
871 from = blk_rq_pos(req);
872 nr = blk_rq_sectors(req);
930 if (err && !mmc_blk_reset(md, card->
host, type))
933 mmc_blk_reset_success(md, type);
969 brq->
data.blocks = 1;
973 else if (brq->
data.blocks < card->
ext_csd.rel_sectors)
974 brq->
data.blocks = 1;
981 R1_BLOCK_LEN_ERROR | \
986 static int mmc_blk_err_check(
struct mmc_card *card,
1005 if (brq->
sbc.error || brq->
cmd.error || brq->
stop.error ||
1007 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
1025 pr_err(
"%s: r/w command failed, status = %#x\n",
1026 req->rq_disk->disk_name, brq->
cmd.resp[0]);
1038 int err = get_card_status(card, &status, 5);
1040 pr_err(
"%s: error %d requesting status\n",
1041 req->rq_disk->disk_name, err);
1053 if (brq->
data.error) {
1054 pr_err(
"%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1055 req->rq_disk->disk_name, brq->
data.error,
1056 (
unsigned)blk_rq_pos(req),
1057 (
unsigned)blk_rq_sectors(req),
1058 brq->
cmd.resp[0], brq->
stop.resp[0]);
1060 if (rq_data_dir(req) ==
READ) {
1069 if (!brq->
data.bytes_xfered)
1072 if (blk_rq_bytes(req) != brq->
data.bytes_xfered)
1083 u32 readcmd, writecmd;
1096 bool do_rel_wr = ((req->cmd_flags &
REQ_FUA) ||
1098 (rq_data_dir(req) ==
WRITE) &&
1102 brq->
mrq.cmd = &brq->
cmd;
1105 brq->
cmd.arg = blk_rq_pos(req);
1109 brq->
data.blksz = 512;
1113 brq->
data.blocks = blk_rq_sectors(req);
1120 if (brq->
data.blocks > card->
host->max_blk_count)
1121 brq->
data.blocks = card->
host->max_blk_count;
1123 if (brq->
data.blocks > 1) {
1130 brq->
data.blocks = 1;
1134 rq_data_dir(req) ==
READ)
1135 brq->
data.blocks = 1;
1138 if (brq->
data.blocks > 1 || do_rel_wr) {
1143 rq_data_dir(req) ==
READ)
1152 if (rq_data_dir(req) ==
READ) {
1153 brq->
cmd.opcode = readcmd;
1156 brq->
cmd.opcode = writecmd;
1161 mmc_apply_rel_rw(brq, card, req);
1167 do_data_tag = (card->
ext_csd.data_tag_unit_size) &&
1169 (rq_data_dir(req) ==
WRITE) &&
1170 ((brq->
data.blocks * brq->
data.blksz) >=
1171 card->
ext_csd.data_tag_unit_size);
1195 brq->
sbc.arg = brq->
data.blocks |
1196 (do_rel_wr ? (1 << 31) : 0) |
1197 (do_data_tag ? (1 << 29) : 0);
1199 brq->
mrq.sbc = &brq->
sbc;
1211 if (brq->
data.blocks != blk_rq_sectors(req)) {
1217 if (data_size <= 0) {
1223 brq->
data.sg_len =
i;
1227 mqrq->
mmc_active.err_check = mmc_blk_err_check;
1247 blocks = mmc_sd_num_wr_blocks(card);
1248 if (blocks != (
u32)-1) {
1262 int ret = 1, disable_multi = 0, retry = 0,
type;
1277 if ((brq->
data.blocks & 0x07) &&
1278 (card->
ext_csd.data_sector_size == 4096)) {
1279 pr_err(
"%s: Transfer size is not 4KB sector size aligned\n",
1280 req->rq_disk->disk_name);
1283 mmc_blk_rw_rq_prep(mq->
mqrq_cur, card, 0, mq);
1303 mmc_blk_reset_success(md, type);
1305 brq->
data.bytes_xfered);
1312 pr_err(
"%s BUG rq_tot %d d_xfer %d\n",
1313 __func__, blk_rq_bytes(req),
1314 brq->
data.bytes_xfered);
1320 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1321 if (!mmc_blk_reset(md, card->
host, type))
1329 if (!mmc_blk_reset(md, card->
host, type))
1335 err = mmc_blk_reset(md, card->
host, type);
1343 if (brq->
data.blocks > 1) {
1345 pr_warning(
"%s: retrying using single block read\n",
1346 req->rq_disk->disk_name);
1369 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
1384 mmc_blk_rw_rq_prep(mq->
mqrq_cur, card, 0, mq);
1399 mmc_claim_host(card->
host);
1401 ret = mmc_blk_part_switch(card, md);
1412 if (card->
host->areq)
1413 mmc_blk_issue_rw_rq(mq,
NULL);
1416 ret = mmc_blk_issue_secdiscard_rq(mq, req);
1418 ret = mmc_blk_issue_discard_rq(mq, req);
1419 }
else if (req && req->cmd_flags &
REQ_FLUSH) {
1421 if (card->
host->areq)
1422 mmc_blk_issue_rw_rq(mq,
NULL);
1423 ret = mmc_blk_issue_flush(mq, req);
1425 ret = mmc_blk_issue_rw_rq(mq, req);
1435 static inline int mmc_blk_readonly(
struct mmc_card *card)
1445 const char *subname,
1452 if (devidx >= max_devices)
1473 dev_to_disk(parent)->private_data)->
name_idx;
1490 INIT_LIST_HEAD(&md->
part);
1497 md->
queue.issue_fn = mmc_blk_issue_rq;
1501 md->
disk->first_minor = devidx * perdev_minors;
1502 md->
disk->fops = &mmc_bdops;
1503 md->
disk->private_data =
md;
1505 md->
disk->driverfs_dev = parent;
1521 "mmcblk%d%s", md->
name_idx, subname ? subname :
"");
1525 card->
ext_csd.data_sector_size);
1529 set_capacity(md->
disk, size);
1531 if (mmc_host_cmd23(card->
host)) {
1553 return ERR_PTR(ret);
1572 size = card->
csd.capacity << (card->
csd.read_blkbits - 9);
1575 md = mmc_blk_alloc_req(card, &card->
dev, size,
false,
NULL,
1580 static int mmc_blk_alloc_part(
struct mmc_card *card,
1585 const char *subname,
1591 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->
disk), size, default_ro,
1592 subname, area_type);
1593 if (IS_ERR(part_md))
1594 return PTR_ERR(part_md);
1596 list_add(&part_md->
part, &md->
part);
1599 cap_str,
sizeof(cap_str));
1600 pr_info(
"%s: %s %s partition %u %s\n",
1619 for (idx = 0; idx < card->
nr_parts; idx++) {
1620 if (card->
part[idx].size) {
1621 ret = mmc_blk_alloc_part(card, md,
1622 card->
part[idx].part_cfg,
1623 card->
part[idx].size >> 9,
1624 card->
part[idx].force_ro,
1625 card->
part[idx].name,
1626 card->
part[idx].area_type);
1635 static void mmc_blk_remove_req(
struct mmc_blk_data *md)
1640 card = md->
queue.card;
1641 if (md->
disk->flags & GENHD_FL_UP) {
1644 card->
ext_csd.boot_ro_lockable)
1658 static void mmc_blk_remove_parts(
struct mmc_card *card,
1668 mmc_blk_remove_req(part_md);
1679 md->
force_ro.store = force_ro_store;
1681 md->
force_ro.attr.name =
"force_ro";
1688 card->
ext_csd.boot_ro_lockable) {
1701 "ro_lock_until_next_power_on";
1705 goto power_ro_lock_fail;
1717 #define CID_MANFID_SANDISK 0x2
1718 #define CID_MANFID_TOSHIBA 0x11
1719 #define CID_MANFID_MICRON 0x13
1720 #define CID_MANFID_SAMSUNG 0x15
1722 static const struct mmc_fixup blk_fixups[] =
1782 static int mmc_blk_probe(
struct mmc_card *card)
1793 md = mmc_blk_alloc(card);
1798 cap_str,
sizeof(cap_str));
1803 if (mmc_blk_alloc_parts(card, md))
1809 if (mmc_add_disk(md))
1813 if (mmc_add_disk(part_md))
1819 mmc_blk_remove_parts(card, md);
1820 mmc_blk_remove_req(md);
1824 static void mmc_blk_remove(
struct mmc_card *card)
1828 mmc_blk_remove_parts(card, md);
1829 mmc_claim_host(card->
host);
1830 mmc_blk_part_switch(card, md);
1832 mmc_blk_remove_req(md);
1870 #define mmc_blk_suspend NULL
1871 #define mmc_blk_resume NULL
1878 .probe = mmc_blk_probe,
1879 .remove = mmc_blk_remove,
1884 static int __init mmc_blk_init(
void)
1888 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1889 pr_info(
"mmcblk: using %d minors per device\n", perdev_minors);
1891 max_devices = 256 / perdev_minors;
1908 static void __exit mmc_blk_exit(
void)