13 #define KMSG_COMPONENT "tape"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 #include <linux/module.h>
21 #include <linux/list.h>
22 #include <linux/slab.h>
24 #include <asm/types.h>
26 #define TAPE_DBF_AREA tape_core_dbf
31 #define LONG_BUSY_TIMEOUT 180
33 static void __tape_do_irq (
struct ccw_device *,
unsigned long,
struct irb *);
34 static void tape_delayed_next_request(
struct work_struct *);
35 static void tape_long_busy_timeout(
unsigned long data);
83 return dev_id->
devno + (dev_id->
ssid << 16);
169 static struct attribute *tape_attrs[] = {
170 &dev_attr_medium_state.attr,
171 &dev_attr_first_minor.attr,
172 &dev_attr_state.attr,
173 &dev_attr_operation.attr,
174 &dev_attr_blocksize.attr,
202 if (newstate < TS_SIZE && newstate >= 0)
220 static char env_state_loaded[] =
"MEDIUM_STATE=LOADED";
221 static char env_state_unloaded[] =
"MEDIUM_STATE=UNLOADED";
229 pr_info(
"%s: The tape cartridge has been successfully "
230 "unloaded\n", dev_name(&device->
cdev->dev));
231 envp[0] = env_state_unloaded;
235 pr_info(
"%s: A tape cartridge has been mounted\n",
236 dev_name(&device->
cdev->dev));
237 envp[0] = env_state_loaded;
267 if (oldstate == newstate)
301 for (retries = 0; retries < 5; retries++) {
358 list_del_init(&device->
node);
377 DBF_LH(6,
"tape_enable_device(%p, %p)\n", device, discipline);
385 device->
lb_timeout.function = tape_long_busy_timeout;
389 if (!try_module_get(discipline->
owner)) {
396 rc = tape_assign_minor(device);
411 tape_remove_minor(device);
416 module_put(discipline->
owner);
426 tape_remove_minor(device);
452 DBF_LH(3,
"(%08x): tape_generic_pm_suspend(%p)\n",
456 pr_err(
"A cartridge is loaded in tape device %s, "
457 "refusing to suspend\n", dev_name(&cdev->
dev));
469 pr_err(
"Tape device %s is busy, refusing to "
470 "suspend\n", dev_name(&cdev->
dev));
496 DBF_LH(3,
"(%08x): tape_generic_offline(%p)\n",
508 tape_cleanup_device(device);
511 DBF_EVENT(3,
"(%08x): Set offline failed "
526 tape_alloc_device(
void)
531 if (device ==
NULL) {
543 INIT_LIST_HEAD(&device->
node);
566 DBF_EVENT(4,
"tape_get_device(%p) = %i\n", device, count);
582 DBF_EVENT(4,
"tape_put_device(%p) -> %i\n", device, count);
598 device = ERR_PTR(-
ENODEV);
620 device = tape_alloc_device();
634 device->cdev_id = devid_to_int(&dev_id);
639 __tape_discard_requests(
struct tape_device *device)
674 DBF_LH(3,
"(%08x): tape_generic_remove(%p)\n", device->
cdev_id, cdev);
692 tape_cleanup_device(device);
700 DBF_EVENT(3,
"(%08x): Drive in use vanished!\n",
702 pr_warning(
"%s: A tape unit was detached while in "
703 "use\n", dev_name(&device->
cdev->dev));
705 __tape_discard_requests(device);
707 tape_cleanup_device(device);
728 DBF_LH(6,
"tape_alloc_request(%d, %d)\n", cplength, datasize);
731 if (request ==
NULL) {
737 request->
cpaddr = kcalloc(cplength,
sizeof(
struct ccw1),
755 DBF_LH(6,
"New request %p(%p/%p)\n", request, request->
cpaddr,
767 DBF_LH(6,
"Free request %p\n", request);
784 (
unsigned long) request,
790 }
else if (rc == -
EBUSY) {
797 DBF_EVENT(1,
"tape: start request failed with RC = %i\n", rc);
803 __tape_start_next_request(
struct tape_device *device)
809 DBF_LH(6,
"__tape_start_next_request(%p)\n", device);
838 rc = __tape_cancel_io(device, request);
840 rc = __tape_start_io(device, request);
859 tape_delayed_next_request(
struct work_struct *work)
864 DBF_LH(6,
"tape_delayed_next_request(%p)\n", device);
866 __tape_start_next_request(device);
870 static void tape_long_busy_timeout(
unsigned long data)
880 __tape_start_next_request(device);
892 DBF_LH(6,
"__tape_end_request(%p, %p, %i)\n", device, request, rc);
907 __tape_start_next_request(device);
924 DBF_EVENT(3,
"DSTAT : %02x CSTAT: %02x\n",
927 sptr = (
unsigned int *) irb->
ecw;
928 DBF_EVENT(3,
"%08x %08x\n", sptr[0], sptr[1]);
929 DBF_EVENT(3,
"%08x %08x\n", sptr[2], sptr[3]);
930 DBF_EVENT(3,
"%08x %08x\n", sptr[4], sptr[5]);
931 DBF_EVENT(3,
"%08x %08x\n", sptr[6], sptr[7]);
944 switch (request->
op) {
966 rc = __tape_start_io(device, request);
970 DBF_LH(5,
"Request %p added for execution.\n", request);
973 DBF_LH(5,
"Request %p add to queue.\n", request);
989 DBF_LH(6,
"tape_do_io_async(%p, %p)\n", device, request);
993 rc = __tape_start_request(device, request);
1004 __tape_wake_up(
struct tape_request *request,
void *data)
1017 request->
callback = __tape_wake_up;
1020 rc = __tape_start_request(device, request);
1036 __tape_wake_up_interruptible(
struct tape_request *request,
void *data)
1050 request->
callback = __tape_wake_up_interruptible;
1052 rc = __tape_start_request(device, request);
1065 rc = __tape_cancel_io(device, request);
1091 rc = __tape_cancel_io(device, request);
1107 if (device ==
NULL) {
1112 DBF_LH(6,
"__tape_do_irq(device=%p, request=%p)\n", device, request);
1117 switch (PTR_ERR(irb)) {
1119 DBF_LH(1,
"(%08x): Request timed out\n",
1122 __tape_end_request(device, request, -
EIO);
1125 DBF_LH(1,
"(%08x): Unexpected i/o error %li\n",
1126 device->
cdev_id, PTR_ERR(irb));
1141 DBF_EVENT(3,
"(%08x): deferred cc=%i, fctl=%i. restarting\n",
1151 else if ((irb->
scsw.
cmd.dstat == 0x85 || irb->
scsw.
cmd.dstat == 0x80) &&
1162 __tape_start_next_request(device);
1167 if (irb->
scsw.
cmd.dstat != 0x0c) {
1178 DBF_EVENT(3,
"-- Tape Interrupthandler --\n");
1185 DBF_EVENT(6,
"tape:device is not operational\n");
1194 __tape_end_request(device, request, -
EIO);
1198 rc = device->
discipline->irq(device, request, irb);
1210 __tape_end_request(device, request, rc);
1224 rc = __tape_start_io(device, request);
1226 __tape_end_request(device, request, rc);
1229 rc = __tape_cancel_io(device, request);
1231 __tape_end_request(device, request, rc);
1236 __tape_end_request(device, request, -
EIO);
1238 __tape_end_request(device, request, rc);
1263 !try_module_get(device->
discipline->owner)) {
1299 DBF_EVENT(6,
"TAPE:arg: %x\n", mt_count);
1311 for (; mt_count > 500; mt_count -= 500)
1312 if ((rc =
fn(device, 500)) != 0)
1315 rc =
fn(device, mt_count);
1317 rc =
fn(device, mt_count);
1330 #ifdef DBF_LIKE_HELL
1353 MODULE_AUTHOR(
"(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "