9 #include <linux/module.h>
12 #include <linux/string.h>
16 #include <asm/chpid.h>
26 static int timeout_log_enabled;
30 timeout_log_enabled = 1;
34 __setup(
"ccw_timeout_log", ccw_timeout_log_setup);
53 orb,
sizeof(*orb), 0);
55 dev_name(&cdev->
dev));
59 "vpm: %02x\n", sch->
lpm, sch->
opm, sch->
vpm);
66 sizeof(
struct tcw), 0);
69 if ((
void *)(
addr_t)orb->
cmd.cpa == &private->sense_ccw ||
78 sizeof(
struct ccw1), 0);
95 ccw_device_timeout(
unsigned long data)
101 if (timeout_log_enabled)
102 ccw_timeout_log(cdev);
104 spin_unlock_irq(cdev->
ccwlock);
117 if (timer_pending(&cdev->
private->timer)) {
121 cdev->
private->timer.function = ccw_device_timeout;
144 if (!sch->
schib.pmcw.ena)
150 if (!scsw_is_tm(&sch->
schib.scsw)) {
185 cdev->
id.cu_type = cdev->
private->senseid.cu_type;
186 cdev->
id.cu_model = cdev->
private->senseid.cu_model;
187 cdev->
id.dev_type = cdev->
private->senseid.dev_type;
188 cdev->
id.dev_model = cdev->
private->senseid.dev_model;
193 return cdev->
id.cu_type == cdev->
private->senseid.cu_type &&
194 cdev->
id.cu_model == cdev->
private->senseid.cu_model &&
195 cdev->
id.dev_type == cdev->
private->senseid.dev_type &&
196 cdev->
id.dev_model == cdev->
private->senseid.dev_model;
205 __recover_lost_chpids(
struct subchannel *sch,
int old_lpm)
211 for (i = 0; i<8; i++) {
213 if (!(sch->
lpm & mask))
251 if (sch->
lpm != old_lpm)
252 __recover_lost_chpids(sch, old_lpm);
255 cdev->
private->flags.recog_done = 1;
260 if (cdev->
private->flags.resuming) {
262 cdev->
private->flags.recog_done = 1;
275 cdev->
private->flags.recog_done = 1;
277 cdev->
private->flags.donotify = 1;
286 if (cdev->
id.cu_type != 0) {
287 cdev->
private->flags.recog_done = 1;
340 if (!cdev->
drv->notify) {
344 if (cdev->
drv->notify(cdev, event))
352 static void ccw_device_oper_notify(
struct ccw_device *cdev)
372 ccw_device_done(
struct ccw_device *cdev,
int state)
395 cdev->
private->flags.donotify = 0;
404 cdev->
private->flags.donotify = 0;
408 "%04x\n", cdev->
private->dev_id.devno,
415 cdev->
private->flags.donotify = 0;
421 if (cdev->
private->flags.donotify) {
422 cdev->
private->flags.donotify = 0;
423 ccw_device_oper_notify(cdev);
443 cdev->
private->flags.recog_done = 0;
472 static void ccw_device_report_path_events(
struct ccw_device *cdev)
478 for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
480 if (mask & cdev->
private->path_gone_mask & ~(sch->
vpm))
482 if (mask & cdev->
private->path_new_mask & sch->
vpm)
484 if (mask & cdev->
private->pgid_reset_mask & sch->
vpm)
487 if (cdev->
online && cdev->
drv->path_event)
488 cdev->
drv->path_event(cdev, path_event);
491 static void ccw_device_reset_path_events(
struct ccw_device *cdev)
493 cdev->
private->path_gone_mask = 0;
494 cdev->
private->path_new_mask = 0;
495 cdev->
private->pgid_reset_mask = 0;
498 static void create_fake_irb(
struct irb *
irb,
int type)
500 memset(irb, 0,
sizeof(*irb));
530 if (cdev->
private->flags.doverify) {
539 if (cdev->
private->flags.fake_irb) {
540 create_fake_irb(&cdev->
private->irb,
541 cdev->
private->flags.fake_irb);
542 cdev->
private->flags.fake_irb = 0;
548 ccw_device_report_path_events(cdev);
553 cdev->
private->flags.donotify = 0;
558 cdev->
private->flags.donotify = 0;
563 cdev->
private->flags.donotify = 0;
567 ccw_device_reset_path_events(cdev);
607 cdev->
private->flags.donotify = 0;
624 cdev->
private->flags.donotify = 0;
639 if (scsw_actl(&sch->
schib.scsw) != 0)
644 if (!cdev->
private->flags.pgroup) {
658 static void ccw_device_generic_notoper(
struct ccw_device *cdev,
670 static void ccw_device_offline_verify(
struct ccw_device *cdev,
671 enum dev_event dev_event)
682 ccw_device_online_verify(
struct ccw_device *cdev,
enum dev_event dev_event)
687 cdev->
private->flags.doverify = 1;
700 if (scsw_actl(&sch->
schib.scsw) != 0 ||
708 cdev->
private->flags.doverify = 1;
719 static void ccw_device_boxed_verify(
struct ccw_device *cdev,
720 enum dev_event dev_event)
728 ccw_device_online_verify(cdev, dev_event);
737 ccw_device_irq(
struct ccw_device *cdev,
enum dev_event dev_event)
743 is_cmd = !scsw_is_tm(&irb->
scsw);
745 if (!scsw_is_solicited(&irb->
scsw)) {
750 goto call_handler_unsol;
759 if (cdev->
private->flags.doverify)
760 ccw_device_online_verify(cdev, 0);
765 if (is_cmd && cdev->
private->flags.dosense) {
774 ccw_device_online_verify(cdev, 0);
781 ccw_device_online_timeout(
struct ccw_device *cdev,
enum dev_event dev_event)
804 ccw_device_w4sense(
struct ccw_device *cdev,
enum dev_event dev_event)
810 if (scsw_stctl(&irb->
scsw) ==
812 if (scsw_cc(&irb->
scsw) == 1)
817 "interrupt during w4sense...\n",
830 if (scsw_fctl(&irb->
scsw) &
832 cdev->
private->flags.dosense = 0;
839 if (cdev->
private->flags.dosense) {
851 ccw_device_online_verify(cdev, 0);
855 ccw_device_killing_irq(
struct ccw_device *cdev,
enum dev_event dev_event)
859 ccw_device_online_verify(cdev, 0);
867 ccw_device_killing_timeout(
struct ccw_device *cdev,
enum dev_event dev_event)
877 ccw_device_online_verify(cdev, 0);
895 ccw_device_online_verify(cdev, 0);
902 ccw_device_delay_verify(
struct ccw_device *cdev,
enum dev_event dev_event)
905 cdev->
private->flags.doverify = 1;
909 ccw_device_start_id(
struct ccw_device *cdev,
enum dev_event dev_event)
950 ccw_device_start_id(cdev, 0);
953 static void ccw_device_disabled_irq(
struct ccw_device *cdev,
954 enum dev_event dev_event)
967 ccw_device_change_cmfstate(
struct ccw_device *cdev,
enum dev_event dev_event)
971 dev_fsm_event(cdev, dev_event);
974 static void ccw_device_update_cmfblock(
struct ccw_device *cdev,
975 enum dev_event dev_event)
979 dev_fsm_event(cdev, dev_event);
983 ccw_device_quiesce_done(
struct ccw_device *cdev,
enum dev_event dev_event)
991 ccw_device_quiesce_timeout(
struct ccw_device *cdev,
enum dev_event dev_event)
1009 ccw_device_nop(
struct ccw_device *cdev,
enum dev_event dev_event)