10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
16 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/list.h>
20 #include <linux/device.h>
27 #include <asm/param.h>
43 static int recovery_phase;
44 static const unsigned long recovery_delay[] = { 3, 30, 300 };
62 found = ccw_device_id_match(ids, &cdev->
id);
66 cdev->
id.driver_info = found->driver_info;
74 static int snprint_alias(
char *
buf,
size_t size,
89 len +=
snprintf(buf, size,
"dtdm%s", suffix);
101 char modalias_buf[30];
125 snprint_alias(modalias_buf,
sizeof(modalias_buf),
id,
"");
130 static struct bus_type ccw_bus_type;
132 static void io_subchannel_irq(
struct subchannel *);
133 static int io_subchannel_probe(
struct subchannel *);
134 static int io_subchannel_remove(
struct subchannel *);
135 static void io_subchannel_shutdown(
struct subchannel *);
136 static int io_subchannel_sch_event(
struct subchannel *,
int);
139 static void recovery_func(
unsigned long data);
156 cdev = sch_get_cdev(sch);
157 if (cdev && !device_is_registered(&cdev->
dev))
162 static int io_subchannel_settle(
void)
174 static struct css_driver io_subchannel_driver = {
177 .name =
"io_subchannel",
179 .subchannel_type = io_subchannel_ids,
180 .irq = io_subchannel_irq,
181 .sch_event = io_subchannel_sch_event,
182 .chp_event = io_subchannel_chp_event,
183 .probe = io_subchannel_probe,
184 .remove = io_subchannel_remove,
185 .shutdown = io_subchannel_shutdown,
186 .prepare = io_subchannel_prepare,
187 .settle = io_subchannel_settle,
228 for (chp = 0; chp < 8; chp++) {
233 ret +=
sprintf(buf + ret,
"00 ");
235 ret +=
sprintf (buf+ret,
"\n");
245 return sprintf (buf,
"%02x %02x %02x\n",
256 return sprintf(buf,
"%04x/%02x\n",
268 return sprintf(buf,
"%04x/%02x\n",
279 len = snprint_alias(buf,
PAGE_SIZE,
id,
"\n");
297 static void ccw_device_unregister(
struct ccw_device *cdev)
299 if (device_is_registered(&cdev->
dev)) {
303 if (cdev->
private->flags.initialized) {
304 cdev->
private->flags.initialized = 0;
310 static void io_subchannel_quiesce(
struct subchannel *);
333 if (cdev->
drv->set_offline) {
334 ret = cdev->
drv->set_offline(cdev);
342 while (!dev_fsm_final_state(cdev) &&
344 spin_unlock_irq(cdev->
ccwlock);
354 "0.%x.%04x\n", ret, cdev->
private->dev_id.ssid,
359 spin_unlock_irq(cdev->
ccwlock);
360 io_subchannel_quiesce(sch);
363 }
while (ret == -
EBUSY);
364 spin_unlock_irq(cdev->
ccwlock);
369 pr_warning(
"%s: The device entered boxed state while "
370 "being set offline\n", dev_name(&cdev->
dev));
372 pr_warning(
"%s: The device stopped operating while "
373 "being set offline\n", dev_name(&cdev->
dev));
382 spin_unlock_irq(cdev->
ccwlock);
415 spin_unlock_irq(cdev->
ccwlock);
420 "device 0.%x.%04x\n",
421 ret, cdev->
private->dev_id.ssid,
431 spin_unlock_irq(cdev->
ccwlock);
434 pr_warning(
"%s: Setting the device online failed "
435 "because it is boxed\n",
436 dev_name(&cdev->
dev));
438 pr_warning(
"%s: Setting the device online failed "
439 "because it is not operational\n",
440 dev_name(&cdev->
dev));
446 spin_unlock_irq(cdev->
ccwlock);
447 if (cdev->
drv->set_online)
448 ret = cdev->
drv->set_online(cdev);
457 while (!dev_fsm_final_state(cdev) &&
459 spin_unlock_irq(cdev->
ccwlock);
467 spin_unlock_irq(cdev->
ccwlock);
476 "device 0.%x.%04x\n",
477 ret2, cdev->
private->dev_id.ssid,
480 spin_unlock_irq(cdev->
ccwlock);
486 static int online_store_handle_offline(
struct ccw_device *cdev)
491 spin_unlock_irq(cdev->
ccwlock);
494 if (cdev->
drv && cdev->
drv->set_offline)
499 static int online_store_recog_and_online(
struct ccw_device *cdev)
505 spin_unlock_irq(cdev->
ccwlock);
507 cdev->
private->flags.recog_done);
512 if (cdev->
drv && cdev->
drv->set_online)
517 static int online_store_handle_online(
struct ccw_device *cdev,
int force)
521 ret = online_store_recog_and_online(cdev);
528 if (cdev->
id.cu_type == 0)
530 ret = online_store_recog_and_online(cdev);
538 const char *buf,
size_t count)
548 if (!dev_fsm_final_state(cdev) &&
559 if (cdev->
drv && !try_module_get(cdev->
drv->driver.owner)) {
563 if (!
strncmp(buf,
"force\n", count)) {
575 ret = online_store_handle_offline(cdev);
578 ret = online_store_handle_online(cdev, force);
585 module_put(cdev->
drv->driver.owner);
588 return (ret < 0) ? ret :
count;
598 return sprintf(buf,
"no device\n");
599 switch (cdev->
private->state) {
601 return sprintf(buf,
"boxed\n");
607 return sprintf(buf,
"no path\n");
609 return sprintf(buf,
"no device\n");
618 const char *buf,
size_t count)
625 pr_warning(
"Logging for subchannel 0.%x.%04x failed with "
630 pr_notice(
"Logging for subchannel 0.%x.%04x was triggered\n",
644 static struct attribute *io_subchannel_attrs[] = {
645 &dev_attr_chpids.attr,
646 &dev_attr_pimpampom.attr,
647 &dev_attr_logging.attr,
652 .attrs = io_subchannel_attrs,
655 static struct attribute * ccwdev_attrs[] = {
656 &dev_attr_devtype.attr,
657 &dev_attr_cutype.attr,
658 &dev_attr_modalias.attr,
659 &dev_attr_online.attr,
661 &dev_attr_availability.attr,
666 .attrs = ccwdev_attrs,
676 static int ccw_device_register(
struct ccw_device *cdev)
681 dev->
bus = &ccw_bus_type;
689 static int match_dev_id(
struct device *dev,
void *
data)
694 return ccw_dev_id_is_equal(&cdev->
private->dev_id, dev_id);
717 static void ccw_device_do_unbind_bind(
struct ccw_device *cdev)
721 if (device_is_registered(&cdev->
dev)) {
729 ccw_device_release(
struct device *dev)
757 static int io_subchannel_initialize_dev(
struct subchannel *sch,
763 cdev->
dev.parent = &sch->
dev;
764 cdev->
dev.release = ccw_device_release;
766 cdev->
dev.groups = ccwdev_attr_groups;
774 cdev->
private->flags.initialized = 1;
783 cdev = io_subchannel_allocate_dev(sch);
785 ret = io_subchannel_initialize_dev(sch, cdev);
794 static void sch_create_and_recog_new_device(
struct subchannel *sch)
799 cdev = io_subchannel_create_ccwdev(sch);
806 io_subchannel_recog(cdev, sch);
812 static void io_subchannel_register(
struct ccw_device *cdev)
815 int ret, adjust_init_count = 1;
825 if (!device_is_registered(&sch->
dev))
834 if (device_is_registered(&cdev->
dev)) {
840 " %d for 0.%x.%04x\n", ret,
844 adjust_init_count = 0;
851 dev_set_uevent_suppress(&sch->
dev, 0);
854 ret = ccw_device_register(cdev);
856 CIO_MSG_EVENT(0,
"Could not register ccw dev 0.%x.%04x: %d\n",
858 cdev->
private->dev_id.devno, ret);
860 sch_set_cdev(sch,
NULL);
861 spin_unlock_irqrestore(sch->
lock, flags);
867 cdev->
private->flags.recog_done = 1;
874 static void ccw_device_call_sch_unregister(
struct ccw_device *cdev)
894 cdev->
private->flags.recog_done = 1;
897 switch (cdev->
private->state) {
901 cdev->
private->flags.recog_done = 1;
937 spin_lock_irq(sch->
lock);
938 sch_set_cdev(sch, cdev);
940 spin_unlock_irq(sch->
lock);
943 static int ccw_device_move_to_sch(
struct ccw_device *cdev,
947 int rc, old_enabled = 0;
955 spin_lock_irq(old_sch->
lock);
956 old_enabled = old_sch->
schib.pmcw.ena;
960 spin_unlock_irq(old_sch->
lock);
975 sch->
schib.pmcw.dev, rc);
978 spin_lock_irq(old_sch->
lock);
980 spin_unlock_irq(old_sch->
lock);
988 spin_lock_irq(old_sch->
lock);
989 sch_set_cdev(old_sch,
NULL);
990 spin_unlock_irq(old_sch->
lock);
996 spin_lock_irq(sch->
lock);
1000 sch_set_cdev(sch, cdev);
1001 spin_unlock_irq(sch->
lock);
1007 static int ccw_device_move_to_orph(
struct ccw_device *cdev)
1015 static void io_subchannel_irq(
struct subchannel *sch)
1019 cdev = sch_get_cdev(sch);
1035 static void io_subchannel_init_fields(
struct subchannel *sch)
1044 CIO_MSG_EVENT(6,
"Detected device %04x on subchannel 0.%x.%04X"
1045 " - PIM = %02X, PAM = %02X, POM = %02X\n",
1057 static int io_subchannel_probe(
struct subchannel *sch)
1065 &io_subchannel_attr_group);
1068 "attributes for subchannel "
1069 "0.%x.%04x (rc=%d)\n",
1076 dev_set_uevent_suppress(&sch->
dev, 0);
1078 cdev = sch_get_cdev(sch);
1079 cdev->
dev.groups = ccwdev_attr_groups;
1081 cdev->
private->flags.initialized = 1;
1082 ccw_device_register(cdev);
1094 io_subchannel_init_fields(sch);
1099 &io_subchannel_attr_group);
1112 spin_lock_irq(sch->
lock);
1114 spin_unlock_irq(sch->
lock);
1119 io_subchannel_remove (
struct subchannel *sch)
1124 cdev = sch_get_cdev(sch);
1127 io_subchannel_quiesce(sch);
1130 sch_set_cdev(sch,
NULL);
1133 spin_unlock_irq(cdev->
ccwlock);
1134 ccw_device_unregister(cdev);
1141 static void io_subchannel_verify(
struct subchannel *sch)
1145 cdev = sch_get_cdev(sch);
1150 static void io_subchannel_terminate_path(
struct subchannel *sch,
u8 mask)
1154 cdev = sch_get_cdev(sch);
1160 if (scsw_actl(&sch->
schib.scsw) == 0 || sch->
schib.pmcw.lpum != mask)
1177 static int io_subchannel_chp_event(
struct subchannel *sch,
1192 io_subchannel_terminate_path(sch, mask);
1199 io_subchannel_verify(sch);
1206 io_subchannel_terminate_path(sch, mask);
1211 sch->
lpm |= mask & sch->
opm;
1214 io_subchannel_verify(sch);
1220 static void io_subchannel_quiesce(
struct subchannel *sch)
1225 spin_lock_irq(sch->
lock);
1226 cdev = sch_get_cdev(sch);
1229 if (!sch->
schib.pmcw.ena)
1236 while (ret == -
EBUSY) {
1240 if (ret == -
EBUSY) {
1242 spin_unlock_irq(sch->
lock);
1245 spin_lock_irq(sch->
lock);
1250 spin_unlock_irq(sch->
lock);
1253 static void io_subchannel_shutdown(
struct subchannel *sch)
1255 io_subchannel_quiesce(sch);
1258 static int device_is_disconnected(
struct ccw_device *cdev)
1266 static int recovery_check(
struct device *dev,
void *
data)
1272 switch (cdev->
private->state) {
1284 spin_unlock_irq(cdev->
ccwlock);
1295 spin_lock_irq(&recovery_lock);
1296 if (!timer_pending(&recovery_timer)) {
1297 if (recovery_phase <
ARRAY_SIZE(recovery_delay) - 1)
1300 recovery_delay[recovery_phase] *
HZ);
1302 spin_unlock_irq(&recovery_lock);
1307 static DECLARE_WORK(recovery_work, recovery_work_func);
1309 static void recovery_func(
unsigned long data)
1318 static void ccw_device_schedule_recovery(
void)
1320 unsigned long flags;
1324 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1326 mod_timer(&recovery_timer, jiffies + recovery_delay[0] *
HZ);
1328 spin_unlock_irqrestore(&recovery_lock, flags);
1331 static int purge_fn(
struct device *dev,
void *data)
1345 spin_unlock_irq(cdev->
ccwlock);
1370 cdev->
private->flags.fake_irb = 0;
1373 ccw_device_schedule_recovery();
1403 cdev = sch_get_cdev(sch);
1415 if (sch->
schib.pmcw.dev != cdev->
private->dev_id.devno) {
1420 if ((sch->
schib.pmcw.pam & sch->
opm) == 0) {
1425 if (device_is_disconnected(cdev))
1444 static int io_subchannel_sch_event(
struct subchannel *sch,
int process)
1446 unsigned long flags;
1453 if (!device_is_registered(&sch->
dev))
1457 cdev = sch_get_cdev(sch);
1460 action = sch_get_action(sch);
1461 CIO_MSG_EVENT(2,
"event: sch 0.%x.%04x, process=%d, action=%d\n",
1473 io_subchannel_verify(sch);
1504 spin_unlock_irqrestore(sch->
lock, flags);
1513 rc = ccw_device_move_to_orph(cdev);
1519 if (cdev->
private->flags.resuming) {
1524 sch_set_cdev(sch,
NULL);
1525 spin_unlock_irqrestore(sch->
lock, flags);
1527 ccw_device_unregister(cdev);
1536 if (!cdev || !cdev->
private->flags.resuming)
1546 sch_create_and_recog_new_device(sch);
1549 rc = ccw_device_move_to_sch(cdev, sch);
1557 spin_unlock_irqrestore(sch->
lock, flags);
1567 spin_unlock_irqrestore(sch->
lock, flags);
1572 #ifdef CONFIG_CCW_CONSOLE
1575 static int console_cdev_in_use;
1581 return &ccw_console_lock;
1584 static int ccw_device_console_enable(
struct ccw_device *cdev,
1591 memset(io_priv, 0,
sizeof(*io_priv));
1593 io_subchannel_init_fields(sch);
1597 sch->
driver = &io_subchannel_driver;
1599 cdev->
dev.parent= &sch->
dev;
1600 sch_set_cdev(sch, cdev);
1601 io_subchannel_recog(cdev, sch);
1604 while (!dev_fsm_final_state(cdev))
1610 while (!dev_fsm_final_state(cdev))
1616 spin_unlock_irq(cdev->
ccwlock);
1626 if (
xchg(&console_cdev_in_use, 1) != 0)
1627 return ERR_PTR(-
EBUSY);
1628 sch = cio_probe_console();
1630 console_cdev_in_use = 0;
1631 return (
void *)
sch;
1635 console_cdev.private = &console_private;
1636 console_private.cdev = &console_cdev;
1638 ret = ccw_device_console_enable(&console_cdev, sch);
1640 cio_release_console();
1641 console_cdev_in_use = 0;
1642 return ERR_PTR(ret);
1644 console_cdev.online = 1;
1645 return &console_cdev;
1648 static int ccw_device_pm_restore(
struct device *dev);
1652 if (!console_cdev_in_use)
1654 return ccw_device_pm_restore(&console_cdev.dev);
1663 __ccwdev_check_busid(
struct device *dev,
void *
id)
1669 return (
strcmp(bus_id, dev_name(dev)) == 0);
1690 __ccwdev_check_busid);
1706 ccw_device_probe (
struct device *dev)
1732 ccw_device_remove (
struct device *dev)
1744 spin_unlock_irq(cdev->
ccwlock);
1747 dev_fsm_final_state(cdev));
1750 "device 0.%x.%04x\n",
1751 ret, cdev->
private->dev_id.ssid,
1762 static void ccw_device_shutdown(
struct device *dev)
1767 if (cdev->
drv && cdev->
drv->shutdown)
1768 cdev->
drv->shutdown(cdev);
1772 static int ccw_device_pm_prepare(
struct device *dev)
1783 return cdev->
drv->prepare(cdev);
1788 static void ccw_device_pm_complete(
struct device *dev)
1793 cdev->
drv->complete(cdev);
1796 static int ccw_device_pm_freeze(
struct device *dev)
1800 int ret, cm_enabled;
1803 if (!dev_fsm_final_state(cdev))
1807 if (cdev->
drv && cdev->
drv->freeze) {
1808 ret = cdev->
drv->freeze(cdev);
1813 spin_lock_irq(sch->
lock);
1815 spin_unlock_irq(sch->
lock);
1823 spin_lock_irq(sch->
lock);
1825 spin_unlock_irq(sch->
lock);
1830 static int ccw_device_pm_thaw(
struct device *dev)
1834 int ret, cm_enabled;
1839 spin_lock_irq(sch->
lock);
1843 spin_unlock_irq(sch->
lock);
1853 if (cdev->
drv && cdev->
drv->thaw)
1854 ret = cdev->
drv->thaw(cdev);
1859 static void __ccw_device_pm_restore(
struct ccw_device *cdev)
1863 spin_lock_irq(sch->
lock);
1872 cdev->
private->flags.resuming = 1;
1875 spin_unlock_irq(sch->
lock);
1880 spin_lock_irq(sch->
lock);
1886 spin_unlock_irq(sch->
lock);
1889 spin_lock_irq(sch->
lock);
1892 cdev->
private->flags.resuming = 0;
1893 spin_unlock_irq(sch->
lock);
1896 static int resume_handle_boxed(
struct ccw_device *cdev)
1905 static int resume_handle_disc(
struct ccw_device *cdev)
1914 static int ccw_device_pm_restore(
struct device *dev)
1920 __ccw_device_pm_restore(cdev);
1922 spin_lock_irq(sch->
lock);
1927 switch (cdev->
private->state) {
1930 cdev->
private->flags.donotify = 0;
1933 ret = resume_handle_boxed(cdev);
1938 ret = resume_handle_disc(cdev);
1954 ret = resume_handle_disc(cdev);
1959 spin_unlock_irq(sch->
lock);
1961 spin_lock_irq(sch->
lock);
1971 spin_unlock_irq(sch->
lock);
1973 spin_lock_irq(sch->
lock);
1976 "(rc=%d)\n", cdev->
private->dev_id.ssid,
1977 cdev->
private->dev_id.devno, ret);
1983 spin_unlock_irq(sch->
lock);
1985 ret = cdev->
drv->restore(cdev);
1989 spin_unlock_irq(sch->
lock);
1993 static const struct dev_pm_ops ccw_pm_ops = {
1994 .prepare = ccw_device_pm_prepare,
1995 .complete = ccw_device_pm_complete,
1996 .freeze = ccw_device_pm_freeze,
1997 .thaw = ccw_device_pm_thaw,
1998 .restore = ccw_device_pm_restore,
2001 static struct bus_type ccw_bus_type = {
2003 .match = ccw_bus_match,
2004 .uevent = ccw_uevent,
2005 .probe = ccw_device_probe,
2006 .remove = ccw_device_remove,
2007 .shutdown = ccw_device_shutdown,
2023 drv->
bus = &ccw_bus_type;
2065 spin_unlock_irq(cdev->
ccwlock);
2072 ccw_device_do_unbind_bind(cdev);
2075 io_subchannel_register(cdev);
2083 ccw_device_unregister(cdev);
2085 ccw_device_call_sch_unregister(cdev);
2105 CIO_MSG_EVENT(4,
"cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2108 if (cdev->
private->todo >= todo)