47 #include <linux/pktcdvd.h>
48 #include <linux/module.h>
49 #include <linux/types.h>
50 #include <linux/kernel.h>
53 #include <linux/errno.h>
61 #include <linux/slab.h>
64 #include <scsi/scsi.h>
66 #include <linux/device.h>
68 #include <asm/uaccess.h>
70 #define DRIVER_NAME "pktcdvd"
73 #define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
75 #define DPRINTK(fmt, args...)
79 #define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
81 #define VPRINTK(fmt, args...)
84 #define MAX_SPEED 0xffff
86 #define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
91 static int pktdev_major;
94 static struct mutex ctl_mutex;
97 static struct class *class_pktcdvd =
NULL;
102 static int pkt_remove_dev(
dev_t pkt_dev);
103 static int pkt_seq_show(
struct seq_file *
m,
void *
p);
154 #define DEF_ATTR(_obj,_name,_mode) \
155 static struct attribute _obj = { .name = _name, .mode = _mode }
170 DEF_ATTR(kobj_pkt_attr_st1,
"reset", 0200);
171 DEF_ATTR(kobj_pkt_attr_st2,
"packets_started", 0444);
172 DEF_ATTR(kobj_pkt_attr_st3,
"packets_finished", 0444);
173 DEF_ATTR(kobj_pkt_attr_st4,
"kb_written", 0444);
174 DEF_ATTR(kobj_pkt_attr_st5,
"kb_read", 0444);
175 DEF_ATTR(kobj_pkt_attr_st6,
"kb_read_gather", 0444);
177 static struct attribute *kobj_pkt_attrs_stat[] = {
187 DEF_ATTR(kobj_pkt_attr_wq1,
"size", 0444);
188 DEF_ATTR(kobj_pkt_attr_wq2,
"congestion_off", 0644);
189 DEF_ATTR(kobj_pkt_attr_wq3,
"congestion_on", 0644);
191 static struct attribute *kobj_pkt_attrs_wqueue[] = {
204 if (
strcmp(attr->
name,
"packets_started") == 0) {
207 }
else if (
strcmp(attr->
name,
"packets_finished") == 0) {
210 }
else if (
strcmp(attr->
name,
"kb_written") == 0) {
213 }
else if (
strcmp(attr->
name,
"kb_read") == 0) {
216 }
else if (
strcmp(attr->
name,
"kb_read_gather") == 0) {
220 spin_lock(&pd->
lock);
222 spin_unlock(&pd->
lock);
225 }
else if (
strcmp(attr->
name,
"congestion_off") == 0) {
226 spin_lock(&pd->
lock);
228 spin_unlock(&pd->
lock);
231 }
else if (
strcmp(attr->
name,
"congestion_on") == 0) {
232 spin_lock(&pd->
lock);
234 spin_unlock(&pd->
lock);
240 static void init_write_congestion_marks(
int* lo,
int* hi)
244 *hi =
min(*hi, 1000000);
248 *lo =
min(*lo, *hi - 100);
259 const char *data,
size_t len)
264 if (
strcmp(attr->
name,
"reset") == 0 && len > 0) {
265 pd->
stats.pkt_started = 0;
266 pd->
stats.pkt_ended = 0;
267 pd->
stats.secs_w = 0;
268 pd->
stats.secs_rg = 0;
269 pd->
stats.secs_r = 0;
271 }
else if (
strcmp(attr->
name,
"congestion_off") == 0
272 &&
sscanf(data,
"%d", &val) == 1) {
273 spin_lock(&pd->
lock);
277 spin_unlock(&pd->
lock);
279 }
else if (
strcmp(attr->
name,
"congestion_on") == 0
280 &&
sscanf(data,
"%d", &val) == 1) {
281 spin_lock(&pd->
lock);
285 spin_unlock(&pd->
lock);
290 static const struct sysfs_ops kobj_pkt_ops = {
291 .show = kobj_pkt_show,
292 .store = kobj_pkt_store
294 static struct kobj_type kobj_pkt_type_stat = {
295 .release = pkt_kobj_release,
296 .sysfs_ops = &kobj_pkt_ops,
297 .default_attrs = kobj_pkt_attrs_stat
299 static struct kobj_type kobj_pkt_type_wqueue = {
300 .release = pkt_kobj_release,
301 .sysfs_ops = &kobj_pkt_ops,
302 .default_attrs = kobj_pkt_attrs_wqueue
314 pd->
kobj_stat = pkt_kobj_create(pd,
"stat",
316 &kobj_pkt_type_stat);
317 pd->
kobj_wqueue = pkt_kobj_create(pd,
"write_queue",
319 &kobj_pkt_type_wqueue);
339 static void class_pktcdvd_release(
struct class *cls)
343 static ssize_t class_pktcdvd_show_map(
struct class *
c,
354 n +=
sprintf(data+n,
"%s %u:%u %u:%u\n",
364 static ssize_t class_pktcdvd_store_add(
struct class *
c,
371 if (
sscanf(buf,
"%u:%u", &major, &minor) == 2) {
376 pkt_setup_dev(
MKDEV(major, minor),
NULL);
386 static ssize_t class_pktcdvd_store_remove(
struct class *
c,
391 unsigned int major, minor;
392 if (
sscanf(buf,
"%u:%u", &major, &minor) == 2) {
393 pkt_remove_dev(
MKDEV(major, minor));
401 __ATTR(
remove, 0200,
NULL, class_pktcdvd_store_remove),
402 __ATTR(device_map, 0444, class_pktcdvd_show_map,
NULL),
407 static int pkt_sysfs_init(
void)
415 class_pktcdvd = kzalloc(
sizeof(*class_pktcdvd),
GFP_KERNEL);
424 kfree(class_pktcdvd);
425 class_pktcdvd =
NULL;
432 static void pkt_sysfs_cleanup(
void)
436 class_pktcdvd =
NULL;
447 static int pkt_debugfs_seq_show(
struct seq_file *
m,
void *p)
449 return pkt_seq_show(m, p);
458 .open = pkt_debugfs_fops_open,
467 if (!pkt_debugfs_root)
485 if (!pkt_debugfs_root)
495 static void pkt_debugfs_init(
void)
498 if (IS_ERR(pkt_debugfs_root)) {
499 pkt_debugfs_root =
NULL;
504 static void pkt_debugfs_cleanup(
void)
506 if (!pkt_debugfs_root)
509 pkt_debugfs_root =
NULL;
551 for (i = 0; i <
frames; i++) {
562 for (i = 0; i < frames; i++) {
563 struct bio *bio = pkt->
r_bios[
i];
582 static void pkt_free_packet_data(
struct packet_data *pkt)
586 for (i = 0; i < pkt->
frames; i++) {
587 struct bio *bio = pkt->
r_bios[
i];
601 BUG_ON(!list_empty(&pd->
cdrw.pkt_active_list));
604 pkt_free_packet_data(pkt);
606 INIT_LIST_HEAD(&pd->
cdrw.pkt_free_list);
609 static int pkt_grow_pktlist(
struct pktcdvd_device *pd,
int nr_packets)
615 while (nr_packets > 0) {
616 pkt = pkt_alloc_packet_data(pd->
settings.size >> 2);
618 pkt_shrink_pktlist(pd);
621 pkt->
id = nr_packets;
623 list_add(&pkt->
list, &pd->
cdrw.pkt_free_list);
670 if (s > tmp->
bio->bi_sector) {
671 tmp = pkt_rbtree_next(tmp);
697 rb_link_node(&node->
rb_node, parent, p);
724 rq->cmd_type = REQ_TYPE_BLOCK_PC;
742 static char *
info[9] = {
"No sense",
"Recovered error",
"Not ready",
743 "Medium error",
"Hardware error",
"Illegal request",
744 "Unit attention",
"Data protect",
"Blank check" };
758 printk(
"sense %02x.%02x.%02x", sense->sense_key, sense->
asc, sense->
ascq);
760 if (sense->sense_key > 8) {
765 printk(
" (%s)\n", info[sense->sense_key]);
786 return pkt_generic_packet(pd, &cgc);
792 static noinline_for_stack
int pkt_set_speed(
struct pktcdvd_device *pd,
793 unsigned write_speed,
unsigned read_speed)
802 cgc.
cmd[2] = (read_speed >> 8) & 0xff;
803 cgc.
cmd[3] = read_speed & 0xff;
804 cgc.
cmd[4] = (write_speed >> 8) & 0xff;
805 cgc.
cmd[5] = write_speed & 0xff;
807 if ((ret = pkt_generic_packet(pd, &cgc)))
808 pkt_dump_sense(&cgc);
817 static void pkt_queue_bio(
struct pktcdvd_device *pd,
struct bio *bio)
820 if (bio_data_dir(bio) ==
READ)
821 bio_list_add(&pd->
iosched.read_queue, bio);
823 bio_list_add(&pd->
iosched.write_queue, bio);
824 spin_unlock(&pd->
iosched.lock);
855 int reads_queued, writes_queued;
858 reads_queued = !bio_list_empty(&pd->
iosched.read_queue);
859 writes_queued = !bio_list_empty(&pd->
iosched.write_queue);
860 spin_unlock(&pd->
iosched.lock);
862 if (!reads_queued && !writes_queued)
866 int need_write_seek = 1;
868 bio = bio_list_peek(&pd->
iosched.write_queue);
869 spin_unlock(&pd->
iosched.lock);
870 if (bio && (bio->bi_sector == pd->
iosched.last_write))
872 if (need_write_seek && reads_queued) {
881 if (!reads_queued && writes_queued) {
892 bio = bio_list_pop(&pd->
iosched.write_queue);
894 bio = bio_list_pop(&pd->
iosched.read_queue);
895 spin_unlock(&pd->
iosched.lock);
900 if (bio_data_dir(bio) ==
READ)
901 pd->
iosched.successive_reads += bio->bi_size >> 10;
903 pd->
iosched.successive_reads = 0;
904 pd->
iosched.last_write = bio->bi_sector + bio_sectors(bio);
930 <= queue_max_segments(q)) {
937 <= queue_max_segments(q)) {
953 static void pkt_copy_bio_data(
struct bio *src_bio,
int seg,
int offs,
struct page *dst_page,
int dst_offs)
957 while (copy_size > 0) {
958 struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
960 src_bvl->bv_offset +
offs;
962 int len =
min_t(
int, copy_size, src_bvl->bv_len - offs);
982 static void pkt_make_local_copy(
struct packet_data *pkt,
struct bio_vec *bvec)
989 for (f = 0; f < pkt->
frames; f++) {
990 if (bvec[f].bv_page != pkt->
pages[p]) {
991 void *vfrom =
kmap_atomic(bvec[f].bv_page) + bvec[
f].bv_offset;
995 bvec[
f].bv_page = pkt->
pages[
p];
996 bvec[
f].bv_offset =
offs;
998 BUG_ON(bvec[f].bv_offset != offs);
1008 static void pkt_end_io_read(
struct bio *bio,
int err)
1014 VPRINTK(
"pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
1015 (
unsigned long long)pkt->
sector, (
unsigned long long)bio->bi_sector, err);
1023 pkt_bio_finished(pd);
1026 static void pkt_end_io_packet_write(
struct bio *bio,
int err)
1032 VPRINTK(
"pkt_end_io_packet_write: id=%d, err=%d\n", pkt->
id, err);
1034 pd->
stats.pkt_ended++;
1036 pkt_bio_finished(pd);
1047 int frames_read = 0;
1060 memset(written, 0,
sizeof(written));
1061 spin_lock(&pkt->
lock);
1062 bio_list_for_each(bio, &pkt->
orig_bios) {
1068 for (f = first_frame; f < first_frame + num_frames; f++)
1071 spin_unlock(&pkt->
lock);
1074 VPRINTK(
"pkt_gather_data: zone %llx cached\n",
1075 (
unsigned long long)pkt->
sector);
1082 for (f = 0; f < pkt->
frames; f++) {
1091 bio->bi_bdev = pd->
bdev;
1092 bio->bi_end_io = pkt_end_io_read;
1093 bio->bi_private = pkt;
1097 VPRINTK(
"pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
1098 f, pkt->
pages[p], offset);
1104 pkt_queue_bio(pd, bio);
1109 VPRINTK(
"pkt_gather_data: need %d frames for zone %llx\n",
1110 frames_read, (
unsigned long long)pkt->
sector);
1111 pd->
stats.pkt_started++;
1124 if (pkt->
sector == zone || pkt->
list.next == &pd->
cdrw.pkt_free_list) {
1125 list_del_init(&pkt->
list);
1138 list_add(&pkt->
list, &pd->
cdrw.pkt_free_list);
1150 static int pkt_start_recovery(
struct packet_data *pkt)
1174 if (!sb->
s_op->relocate_blocks)
1178 if (sb->
s_op->relocate_blocks(sb, old_block, &new_block))
1182 pkt->
sector = new_sector;
1184 pkt->bio->bi_sector = new_sector;
1185 pkt->bio->bi_next =
NULL;
1186 pkt->bio->bi_flags = 1 << BIO_UPTODATE;
1187 pkt->bio->bi_idx = 0;
1192 BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
1193 BUG_ON(pkt->bio->bi_private != pkt);
1206 #if PACKET_DEBUG > 1
1207 static const char *state_name[] = {
1208 "IDLE",
"WAITING",
"READ_WAIT",
"WRITE_WAIT",
"RECOVERY",
"FINISHED"
1211 VPRINTK(
"pkt %2d : s=%6llx %s -> %s\n", pkt->
id, (
unsigned long long)pkt->
sector,
1212 state_name[old_state], state_name[state]);
1224 struct bio *bio =
NULL;
1234 if (list_empty(&pd->
cdrw.pkt_free_list)) {
1235 VPRINTK(
"handle_queue: no pkt\n");
1242 spin_lock(&pd->
lock);
1252 zone =
ZONE(bio->bi_sector, pd);
1261 node = pkt_rbtree_next(node);
1267 if (node == first_node)
1270 spin_unlock(&pd->
lock);
1272 VPRINTK(
"handle_queue: no bio\n");
1276 pkt = pkt_get_packet_data(pd, zone);
1287 spin_lock(&pd->
lock);
1288 VPRINTK(
"pkt_handle_queue: looking for zone %llx\n", (
unsigned long long)zone);
1289 while ((node = pkt_rbtree_find(pd, zone)) !=
NULL) {
1291 VPRINTK(
"pkt_handle_queue: found zone=%llx\n",
1292 (
unsigned long long)
ZONE(bio->bi_sector, pd));
1293 if (
ZONE(bio->bi_sector, pd) != zone)
1295 pkt_rbtree_erase(pd, node);
1296 spin_lock(&pkt->
lock);
1299 spin_unlock(&pkt->
lock);
1305 spin_unlock(&pd->
lock);
1315 spin_lock(&pd->
cdrw.active_list_lock);
1316 list_add(&pkt->
list, &pd->
cdrw.pkt_active_list);
1317 spin_unlock(&pd->
cdrw.active_list_lock);
1331 struct bio_vec *bvec = pkt->
w_bio->bi_io_vec;
1333 for (f = 0; f < pkt->
frames; f++) {
1342 spin_lock(&pkt->
lock);
1343 bio_list_for_each(bio, &pkt->
orig_bios) {
1350 for (f = first_frame; f < first_frame + num_frames; f++) {
1351 struct bio_vec *src_bvl = bio_iovec_idx(bio, segment);
1353 while (src_offs >= src_bvl->bv_len) {
1354 src_offs -= src_bvl->bv_len;
1356 BUG_ON(segment >= bio->bi_vcnt);
1357 src_bvl = bio_iovec_idx(bio, segment);
1361 bvec[
f].bv_page = src_bvl->bv_page;
1362 bvec[
f].bv_offset = src_bvl->bv_offset + src_offs;
1364 pkt_copy_bio_data(bio, segment, src_offs,
1365 bvec[f].bv_page, bvec[f].bv_offset);
1372 spin_unlock(&pkt->
lock);
1374 VPRINTK(
"pkt_start_write: Writing %d frames for zone %llx\n",
1375 frames_write, (
unsigned long long)pkt->
sector);
1379 pkt_make_local_copy(pkt, bvec);
1389 pkt->
w_bio->bi_end_io = pkt_end_io_packet_write;
1390 pkt->
w_bio->bi_private = pkt;
1391 for (f = 0; f < pkt->
frames; f++)
1398 pkt_queue_bio(pd, pkt->
w_bio);
1401 static void pkt_finish_packet(
struct packet_data *pkt,
int uptodate)
1409 while ((bio = bio_list_pop(&pkt->
orig_bios)))
1417 VPRINTK(
"run_state_machine: pkt %d\n", pkt->
id);
1420 switch (pkt->
state) {
1426 pkt_gather_data(pd, pkt);
1437 pkt_start_write(pd, pkt);
1453 if (pkt_start_recovery(pkt)) {
1454 pkt_start_write(pd, pkt);
1456 VPRINTK(
"No recovery possible\n");
1463 pkt_finish_packet(pkt, uptodate);
1477 VPRINTK(
"pkt_handle_packets\n");
1485 pkt_run_state_machine(pd, pkt);
1492 spin_lock(&pd->
cdrw.active_list_lock);
1496 pkt_put_packet_data(pd, pkt);
1501 spin_unlock(&pd->
cdrw.active_list_lock);
1504 static void pkt_count_states(
struct pktcdvd_device *pd,
int *states)
1512 spin_lock(&pd->
cdrw.active_list_lock);
1514 states[pkt->
state]++;
1516 spin_unlock(&pd->
cdrw.active_list_lock);
1523 static int kcdrwd(
void *foobar)
1527 long min_sleep_time, residue;
1559 pkt_count_states(pd, states);
1560 VPRINTK(
"kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1561 states[0], states[1], states[2], states[3],
1562 states[4], states[5]);
1571 VPRINTK(
"kcdrwd: sleeping\n");
1602 while (pkt_handle_queue(pd))
1608 pkt_handle_packets(pd);
1613 pkt_iosched_process_queue(pd);
1623 printk(
"Mode-%c disc\n", pd->
settings.block_mode == 8 ?
'1' :
'2');
1631 cgc->
cmd[2] = page_code | (page_control << 6);
1635 return pkt_generic_packet(pd, cgc);
1647 return pkt_generic_packet(pd, cgc);
1661 if ((ret = pkt_generic_packet(pd, &cgc)))
1674 return pkt_generic_packet(pd, &cgc);
1684 cgc.
cmd[1] = type & 3;
1685 cgc.
cmd[4] = (track & 0xff00) >> 8;
1686 cgc.
cmd[5] = track & 0xff;
1690 if ((ret = pkt_generic_packet(pd, &cgc)))
1700 return pkt_generic_packet(pd, &cgc);
1703 static noinline_for_stack
int pkt_get_last_written(
struct pktcdvd_device *pd,
1711 if ((ret = pkt_get_disc_info(pd, &di)))
1715 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1721 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1741 static noinline_for_stack
int pkt_set_write_settings(
struct pktcdvd_device *pd)
1745 write_param_page *
wp;
1753 memset(buffer, 0,
sizeof(buffer));
1757 pkt_dump_sense(&cgc);
1761 size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1762 pd->
mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1763 if (size >
sizeof(buffer))
1764 size =
sizeof(buffer);
1772 pkt_dump_sense(&cgc);
1782 wp->track_mode = pd->
settings.track_mode;
1783 wp->write_type = pd->
settings.write_type;
1784 wp->data_block_type = pd->
settings.block_mode;
1786 wp->multi_session = 0;
1788 #ifdef PACKET_USE_LS
1794 wp->session_format = 0;
1797 wp->session_format = 0x20;
1813 if ((ret = pkt_mode_select(pd, &cgc))) {
1814 pkt_dump_sense(&cgc);
1818 pkt_print_settings(pd);
1836 if (!ti->packet || !ti->fp)
1842 if (ti->rt == 0 && ti->blank == 0)
1845 if (ti->rt == 0 && ti->blank == 1)
1848 if (ti->rt == 1 && ti->blank == 0)
1887 if (di->erasable == 0) {
1900 static noinline_for_stack
int pkt_probe_settings(
struct pktcdvd_device *pd)
1903 unsigned char buf[12];
1911 ret = pkt_generic_packet(pd, &cgc);
1912 pd->
mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1917 if ((ret = pkt_get_disc_info(pd, &di))) {
1918 printk(
"failed get_disc\n");
1922 if (!pkt_writable_disc(pd, &di))
1928 if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
1933 if (!pkt_writable_track(pd, &ti)) {
1968 pd->
lra = 0xffffffff;
1977 pd->
settings.track_mode = ti.track_mode;
1982 switch (ti.data_mode) {
1999 static noinline_for_stack
int pkt_write_caching(
struct pktcdvd_device *pd,
2004 unsigned char buf[64];
2021 cgc.
buflen = cgc.
cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
2022 ret = pkt_mode_select(pd, &cgc);
2025 pkt_dump_sense(&cgc);
2026 }
else if (!ret &&
set)
2031 static int pkt_lock_door(
struct pktcdvd_device *pd,
int lockflag)
2037 cgc.
cmd[4] = lockflag ? 1 : 0;
2038 return pkt_generic_packet(pd, &cgc);
2044 static noinline_for_stack
int pkt_get_max_speed(
struct pktcdvd_device *pd,
2045 unsigned *write_speed)
2049 unsigned char buf[256+18];
2050 unsigned char *cap_buf;
2063 pkt_dump_sense(&cgc);
2069 if (cap_buf[1] >= 28)
2071 if (cap_buf[1] >= 30) {
2076 int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
2081 *write_speed = (cap_buf[
offset] << 8) | cap_buf[offset + 1];
2087 static char clv_to_speed[16] = {
2089 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2092 static char hs_clv_to_speed[16] = {
2094 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2097 static char us_clv_to_speed[16] = {
2099 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
2105 static noinline_for_stack
int pkt_media_speed(
struct pktcdvd_device *pd,
2110 unsigned char buf[64];
2120 ret = pkt_generic_packet(pd, &cgc);
2122 pkt_dump_sense(&cgc);
2125 size = ((
unsigned int) buf[0]<<8) + buf[1] + 2;
2126 if (size >
sizeof(buf))
2135 ret = pkt_generic_packet(pd, &cgc);
2137 pkt_dump_sense(&cgc);
2141 if (!(buf[6] & 0x40)) {
2145 if (!(buf[6] & 0x4)) {
2150 st = (buf[6] >> 3) & 0x7;
2157 *speed = clv_to_speed[
sp];
2160 *speed = hs_clv_to_speed[
sp];
2163 *speed = us_clv_to_speed[
sp];
2178 static noinline_for_stack
int pkt_perform_opc(
struct pktcdvd_device *pd)
2191 if ((ret = pkt_generic_packet(pd, &cgc)))
2192 pkt_dump_sense(&cgc);
2199 unsigned int write_speed, media_write_speed, read_speed;
2201 if ((ret = pkt_probe_settings(pd))) {
2206 if ((ret = pkt_set_write_settings(pd))) {
2213 if ((ret = pkt_get_max_speed(pd, &write_speed)))
2214 write_speed = 16 * 177;
2222 if ((ret = pkt_media_speed(pd, &media_write_speed)))
2223 media_write_speed = 16;
2224 write_speed =
min(write_speed, media_write_speed * 177);
2228 read_speed = write_speed;
2230 if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
2237 if ((ret = pkt_perform_opc(pd))) {
2262 if ((ret = pkt_get_last_written(pd, &lba))) {
2267 set_capacity(pd->
disk, lba << 2);
2268 set_capacity(pd->
bdev->bd_disk, lba << 2);
2271 q = bdev_get_queue(pd->
bdev);
2273 if ((ret = pkt_open_write(pd)))
2279 spin_lock_irq(q->queue_lock);
2281 spin_unlock_irq(q->queue_lock);
2288 if ((ret = pkt_set_segment_merging(pd, q)))
2292 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2312 static void pkt_release_dev(
struct pktcdvd_device *pd,
int flush)
2314 if (flush && pkt_flush_cache(pd))
2317 pkt_lock_door(pd, 0);
2322 pkt_shrink_pktlist(pd);
2325 static struct pktcdvd_device *pkt_find_dev_from_minor(
unsigned int dev_minor)
2327 if (dev_minor >= MAX_WRITERS)
2329 return pkt_devs[dev_minor];
2341 pd = pkt_find_dev_from_minor(
MINOR(bdev->
bd_dev));
2356 ret = pkt_open_dev(pd, mode & FMODE_WRITE);
2379 static int pkt_close(
struct gendisk *
disk,
fmode_t mode)
2390 pkt_release_dev(pd, flush);
2398 static void pkt_end_io_read_cloned(
struct bio *bio,
int err)
2406 pkt_bio_finished(pd);
2409 static void pkt_make_request(
struct request_queue *q,
struct bio *bio)
2415 int was_empty, blocked_bio;
2427 if (bio_data_dir(bio) ==
READ) {
2428 struct bio *cloned_bio = bio_clone(bio,
GFP_NOIO);
2433 cloned_bio->bi_bdev = pd->
bdev;
2434 cloned_bio->bi_private = psd;
2435 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2436 pd->
stats.secs_r += bio->bi_size >> 9;
2437 pkt_queue_bio(pd, cloned_bio);
2443 pd->
name, (
unsigned long long)bio->bi_sector);
2454 zone =
ZONE(bio->bi_sector, pd);
2455 VPRINTK(
"pkt_make_request: start = %6llx stop = %6llx\n",
2456 (
unsigned long long)bio->bi_sector,
2457 (
unsigned long long)(bio->bi_sector + bio_sectors(bio)));
2461 struct bio_pair *bp;
2465 last_zone =
ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd);
2466 if (last_zone != zone) {
2468 first_sectors = last_zone - bio->bi_sector;
2471 pkt_make_request(q, &bp->bio1);
2472 pkt_make_request(q, &bp->bio2);
2482 spin_lock(&pd->
cdrw.active_list_lock);
2485 if (pkt->
sector == zone) {
2486 spin_lock(&pkt->
lock);
2496 spin_unlock(&pkt->
lock);
2497 spin_unlock(&pd->
cdrw.active_list_lock);
2502 spin_unlock(&pkt->
lock);
2505 spin_unlock(&pd->
cdrw.active_list_lock);
2512 spin_lock(&pd->
lock);
2517 spin_unlock(&pd->
lock);
2519 spin_lock(&pd->
lock);
2522 spin_unlock(&pd->
lock);
2529 spin_lock(&pd->
lock);
2532 pkt_rbtree_insert(pd, node);
2533 spin_unlock(&pd->
lock);
2542 }
else if (!list_empty(&pd->
cdrw.pkt_free_list) && !blocked_bio) {
2556 static int pkt_merge_bvec(
struct request_queue *q,
struct bvec_merge_data *bmd,
2557 struct bio_vec *bvec)
2561 int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
2562 int remaining = (pd->
settings.size << 9) - used;
2570 remaining =
max(remaining, remaining2);
2587 static int pkt_seq_show(
struct seq_file *
m,
void *p)
2639 pkt_count_states(pd, states);
2640 seq_printf(m,
"\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2641 states[0], states[1], states[2], states[3], states[4], states[5]);
2643 seq_printf(m,
"\twrite congestion marks:\toff=%d on=%d\n",
2651 return single_open(file, pkt_seq_show, PDE(inode)->data);
2655 .open = pkt_seq_open,
2676 if (pd2->
bdev->bd_dev == dev) {
2703 if (IS_ERR(pd->
cdrw.thread)) {
2725 VPRINTK(
"pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
2736 pkt_lock_door(pd, 0);
2758 static unsigned int pkt_check_events(
struct gendisk *disk,
2759 unsigned int clearing)
2762 struct gendisk *attached_disk;
2768 attached_disk = pd->
bdev->bd_disk;
2769 if (!attached_disk || !attached_disk->fops->check_events)
2771 return attached_disk->fops->check_events(attached_disk, clearing);
2774 static const struct block_device_operations pktcdvd_ops = {
2777 .release = pkt_close,
2779 .check_events = pkt_check_events,
2782 static char *pktcdvd_devnode(
struct gendisk *gd,
umode_t *mode)
2790 static int pkt_setup_dev(
dev_t dev,
dev_t* pkt_dev)
2795 struct gendisk *disk;
2802 if (idx == MAX_WRITERS) {
2817 INIT_LIST_HEAD(&pd->
cdrw.pkt_free_list);
2818 INIT_LIST_HEAD(&pd->
cdrw.pkt_active_list);
2823 bio_list_init(&pd->
iosched.read_queue);
2824 bio_list_init(&pd->
iosched.write_queue);
2836 disk->major = pktdev_major;
2837 disk->first_minor =
idx;
2838 disk->fops = &pktcdvd_ops;
2839 disk->flags = GENHD_FL_REMOVABLE;
2841 disk->devnode = pktcdvd_devnode;
2842 disk->private_data = pd;
2848 ret = pkt_new_dev(pd, dev);
2853 disk->events = pd->
bdev->bd_disk->events;
2854 disk->async_events = pd->
bdev->bd_disk->async_events;
2858 pkt_sysfs_dev_new(pd);
2859 pkt_debugfs_dev_new(pd);
2885 static int pkt_remove_dev(
dev_t pkt_dev)
2895 if (pd && (pd->
pkt_dev == pkt_dev))
2898 if (idx == MAX_WRITERS) {
2908 if (!IS_ERR(pd->
cdrw.thread))
2913 pkt_debugfs_dev_remove(pd);
2914 pkt_sysfs_dev_remove(pd);
2942 pd = pkt_find_dev_from_minor(ctrl_cmd->
dev_index);
2944 ctrl_cmd->
dev = new_encode_dev(pd->
bdev->bd_dev);
2955 static long pkt_ctl_ioctl(
struct file *file,
unsigned int cmd,
unsigned long arg)
2972 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.
dev), &pkt_dev);
2973 ctrl_cmd.
pkt_dev = new_encode_dev(pkt_dev);
2978 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.
pkt_dev));
2981 pkt_get_status(&ctrl_cmd);
2992 #ifdef CONFIG_COMPAT
2993 static long pkt_ctl_compat_ioctl(
struct file *file,
unsigned int cmd,
unsigned long arg)
2995 return pkt_ctl_ioctl(file, cmd, (
unsigned long)compat_ptr(arg));
3001 .unlocked_ioctl = pkt_ctl_ioctl,
3002 #ifdef CONFIG_COMPAT
3003 .compat_ioctl = pkt_ctl_compat_ioctl,
3012 .nodename =
"pktcdvd/control",
3013 .fops = &pkt_ctl_fops
3016 static int __init pkt_init(
void)
3035 ret = pkt_sysfs_init();
3052 pkt_debugfs_cleanup();
3053 pkt_sysfs_cleanup();
3061 static void __exit pkt_exit(
void)
3066 pkt_debugfs_cleanup();
3067 pkt_sysfs_cleanup();