Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
css.c
Go to the documentation of this file.
1 /*
2  * driver for channel subsystem
3  *
4  * Copyright IBM Corp. 2002, 2010
5  *
6  * Author(s): Arnd Bergmann ([email protected])
7  * Cornelia Huck ([email protected])
8  */
9 
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/reboot.h>
20 #include <linux/suspend.h>
21 #include <linux/proc_fs.h>
22 #include <asm/isc.h>
23 #include <asm/crw.h>
24 
25 #include "css.h"
26 #include "cio.h"
27 #include "cio_debug.h"
28 #include "ioasm.h"
29 #include "chsc.h"
30 #include "device.h"
31 #include "idset.h"
32 #include "chp.h"
33 
34 int css_init_done = 0;
36 
38 static struct bus_type css_bus_type;
39 
40 int
41 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
42 {
43  struct subchannel_id schid;
44  int ret;
45 
46  init_subchannel_id(&schid);
47  ret = -ENODEV;
48  do {
49  do {
50  ret = fn(schid, data);
51  if (ret)
52  break;
53  } while (schid.sch_no++ < __MAX_SUBCHANNEL);
54  schid.sch_no = 0;
55  } while (schid.ssid++ < max_ssid);
56  return ret;
57 }
58 
59 struct cb_data {
60  void *data;
61  struct idset *set;
62  int (*fn_known_sch)(struct subchannel *, void *);
64 };
65 
66 static int call_fn_known_sch(struct device *dev, void *data)
67 {
68  struct subchannel *sch = to_subchannel(dev);
69  struct cb_data *cb = data;
70  int rc = 0;
71 
72  idset_sch_del(cb->set, sch->schid);
73  if (cb->fn_known_sch)
74  rc = cb->fn_known_sch(sch, cb->data);
75  return rc;
76 }
77 
78 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
79 {
80  struct cb_data *cb = data;
81  int rc = 0;
82 
83  if (idset_sch_contains(cb->set, schid))
84  rc = cb->fn_unknown_sch(schid, cb->data);
85  return rc;
86 }
87 
88 static int call_fn_all_sch(struct subchannel_id schid, void *data)
89 {
90  struct cb_data *cb = data;
91  struct subchannel *sch;
92  int rc = 0;
93 
94  sch = get_subchannel_by_schid(schid);
95  if (sch) {
96  if (cb->fn_known_sch)
97  rc = cb->fn_known_sch(sch, cb->data);
98  put_device(&sch->dev);
99  } else {
100  if (cb->fn_unknown_sch)
101  rc = cb->fn_unknown_sch(schid, cb->data);
102  }
103 
104  return rc;
105 }
106 
107 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
108  int (*fn_unknown)(struct subchannel_id,
109  void *), void *data)
110 {
111  struct cb_data cb;
112  int rc;
113 
114  cb.data = data;
115  cb.fn_known_sch = fn_known;
116  cb.fn_unknown_sch = fn_unknown;
117 
118  cb.set = idset_sch_new();
119  if (!cb.set)
120  /* fall back to brute force scanning in case of oom */
121  return for_each_subchannel(call_fn_all_sch, &cb);
122 
123  idset_fill(cb.set);
124 
125  /* Process registered subchannels. */
126  rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
127  if (rc)
128  goto out;
129  /* Process unregistered subchannels. */
130  if (fn_unknown)
131  rc = for_each_subchannel(call_fn_unknown_sch, &cb);
132 out:
133  idset_free(cb.set);
134 
135  return rc;
136 }
137 
138 static void css_sch_todo(struct work_struct *work);
139 
140 static struct subchannel *
141 css_alloc_subchannel(struct subchannel_id schid)
142 {
143  struct subchannel *sch;
144  int ret;
145 
146  sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
147  if (sch == NULL)
148  return ERR_PTR(-ENOMEM);
149  ret = cio_validate_subchannel (sch, schid);
150  if (ret < 0) {
151  kfree(sch);
152  return ERR_PTR(ret);
153  }
154  INIT_WORK(&sch->todo_work, css_sch_todo);
155  return sch;
156 }
157 
158 static void
159 css_subchannel_release(struct device *dev)
160 {
161  struct subchannel *sch;
162 
163  sch = to_subchannel(dev);
164  if (!cio_is_console(sch->schid)) {
165  /* Reset intparm to zeroes. */
166  sch->config.intparm = 0;
167  cio_commit_config(sch);
168  kfree(sch->lock);
169  kfree(sch);
170  }
171 }
172 
173 static int css_sch_device_register(struct subchannel *sch)
174 {
175  int ret;
176 
177  mutex_lock(&sch->reg_mutex);
178  dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
179  sch->schid.sch_no);
180  ret = device_register(&sch->dev);
181  mutex_unlock(&sch->reg_mutex);
182  return ret;
183 }
184 
190 {
191  mutex_lock(&sch->reg_mutex);
192  if (device_is_registered(&sch->dev))
193  device_unregister(&sch->dev);
194  mutex_unlock(&sch->reg_mutex);
195 }
197 
198 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
199 {
200  int i;
201  int mask;
202 
203  memset(ssd, 0, sizeof(struct chsc_ssd_info));
204  ssd->path_mask = pmcw->pim;
205  for (i = 0; i < 8; i++) {
206  mask = 0x80 >> i;
207  if (pmcw->pim & mask) {
208  chp_id_init(&ssd->chpid[i]);
209  ssd->chpid[i].id = pmcw->chpid[i];
210  }
211  }
212 }
213 
214 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
215 {
216  int i;
217  int mask;
218 
219  for (i = 0; i < 8; i++) {
220  mask = 0x80 >> i;
221  if (ssd->path_mask & mask)
222  if (!chp_is_registered(ssd->chpid[i]))
223  chp_new(ssd->chpid[i]);
224  }
225 }
226 
228 {
229  int ret;
230 
231  if (cio_is_console(sch->schid)) {
232  /* Console is initialized too early for functions requiring
233  * memory allocation. */
234  ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
235  } else {
236  ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
237  if (ret)
238  ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
239  ssd_register_chpids(&sch->ssd_info);
240  }
241 }
242 
243 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
244  char *buf)
245 {
246  struct subchannel *sch = to_subchannel(dev);
247 
248  return sprintf(buf, "%01x\n", sch->st);
249 }
250 
251 static DEVICE_ATTR(type, 0444, type_show, NULL);
252 
253 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
254  char *buf)
255 {
256  struct subchannel *sch = to_subchannel(dev);
257 
258  return sprintf(buf, "css:t%01X\n", sch->st);
259 }
260 
261 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
262 
263 static struct attribute *subch_attrs[] = {
264  &dev_attr_type.attr,
265  &dev_attr_modalias.attr,
266  NULL,
267 };
268 
269 static struct attribute_group subch_attr_group = {
270  .attrs = subch_attrs,
271 };
272 
273 static const struct attribute_group *default_subch_attr_groups[] = {
274  &subch_attr_group,
275  NULL,
276 };
277 
278 static int css_register_subchannel(struct subchannel *sch)
279 {
280  int ret;
281 
282  /* Initialize the subchannel structure */
283  sch->dev.parent = &channel_subsystems[0]->device;
284  sch->dev.bus = &css_bus_type;
285  sch->dev.release = &css_subchannel_release;
286  sch->dev.groups = default_subch_attr_groups;
287  /*
288  * We don't want to generate uevents for I/O subchannels that don't
289  * have a working ccw device behind them since they will be
290  * unregistered before they can be used anyway, so we delay the add
291  * uevent until after device recognition was successful.
292  * Note that we suppress the uevent for all subchannel types;
293  * the subchannel driver can decide itself when it wants to inform
294  * userspace of its existence.
295  */
296  dev_set_uevent_suppress(&sch->dev, 1);
297  css_update_ssd_info(sch);
298  /* make it known to the system */
299  ret = css_sch_device_register(sch);
300  if (ret) {
301  CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
302  sch->schid.ssid, sch->schid.sch_no, ret);
303  return ret;
304  }
305  if (!sch->driver) {
306  /*
307  * No driver matched. Generate the uevent now so that
308  * a fitting driver module may be loaded based on the
309  * modalias.
310  */
311  dev_set_uevent_suppress(&sch->dev, 0);
312  kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
313  }
314  return ret;
315 }
316 
318 {
319  int ret;
320  struct subchannel *sch;
321 
322  if (cio_is_console(schid))
324  else {
325  sch = css_alloc_subchannel(schid);
326  if (IS_ERR(sch))
327  return PTR_ERR(sch);
328  }
329  ret = css_register_subchannel(sch);
330  if (ret) {
331  if (!cio_is_console(schid))
332  put_device(&sch->dev);
333  }
334  return ret;
335 }
336 
337 static int
338 check_subchannel(struct device * dev, void * data)
339 {
340  struct subchannel *sch;
341  struct subchannel_id *schid = data;
342 
343  sch = to_subchannel(dev);
344  return schid_equal(&sch->schid, schid);
345 }
346 
347 struct subchannel *
349 {
350  struct device *dev;
351 
352  dev = bus_find_device(&css_bus_type, NULL,
353  &schid, check_subchannel);
354 
355  return dev ? to_subchannel(dev) : NULL;
356 }
357 
363 {
364  if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
365  return 0;
366  if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
367  return 0;
368  return 1;
369 }
371 
372 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
373 {
374  struct schib schib;
375 
376  if (!slow) {
377  /* Will be done on the slow path. */
378  return -EAGAIN;
379  }
380  if (stsch_err(schid, &schib)) {
381  /* Subchannel is not provided. */
382  return -ENXIO;
383  }
384  if (!css_sch_is_valid(&schib)) {
385  /* Unusable - ignore. */
386  return 0;
387  }
388  CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
389  schid.sch_no);
390 
391  return css_probe_device(schid);
392 }
393 
394 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
395 {
396  int ret = 0;
397 
398  if (sch->driver) {
399  if (sch->driver->sch_event)
400  ret = sch->driver->sch_event(sch, slow);
401  else
402  dev_dbg(&sch->dev,
403  "Got subchannel machine check but "
404  "no sch_event handler provided.\n");
405  }
406  if (ret != 0 && ret != -EAGAIN) {
407  CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
408  sch->schid.ssid, sch->schid.sch_no, ret);
409  }
410  return ret;
411 }
412 
413 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
414 {
415  struct subchannel *sch;
416  int ret;
417 
418  sch = get_subchannel_by_schid(schid);
419  if (sch) {
420  ret = css_evaluate_known_subchannel(sch, slow);
421  put_device(&sch->dev);
422  } else
423  ret = css_evaluate_new_subchannel(schid, slow);
424  if (ret == -EAGAIN)
425  css_schedule_eval(schid);
426 }
427 
438 {
439  CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
440  sch->schid.ssid, sch->schid.sch_no, todo);
441  if (sch->todo >= todo)
442  return;
443  /* Get workqueue ref. */
444  if (!get_device(&sch->dev))
445  return;
446  sch->todo = todo;
447  if (!queue_work(cio_work_q, &sch->todo_work)) {
448  /* Already queued, release workqueue ref. */
449  put_device(&sch->dev);
450  }
451 }
453 
454 static void css_sch_todo(struct work_struct *work)
455 {
456  struct subchannel *sch;
457  enum sch_todo todo;
458  int ret;
459 
460  sch = container_of(work, struct subchannel, todo_work);
461  /* Find out todo. */
462  spin_lock_irq(sch->lock);
463  todo = sch->todo;
464  CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
465  sch->schid.sch_no, todo);
466  sch->todo = SCH_TODO_NOTHING;
467  spin_unlock_irq(sch->lock);
468  /* Perform todo. */
469  switch (todo) {
470  case SCH_TODO_NOTHING:
471  break;
472  case SCH_TODO_EVAL:
473  ret = css_evaluate_known_subchannel(sch, 1);
474  if (ret == -EAGAIN) {
475  spin_lock_irq(sch->lock);
476  css_sched_sch_todo(sch, todo);
477  spin_unlock_irq(sch->lock);
478  }
479  break;
480  case SCH_TODO_UNREG:
482  break;
483  }
484  /* Release workqueue ref. */
485  put_device(&sch->dev);
486 }
487 
488 static struct idset *slow_subchannel_set;
489 static spinlock_t slow_subchannel_lock;
490 static wait_queue_head_t css_eval_wq;
491 static atomic_t css_eval_scheduled;
492 
493 static int __init slow_subchannel_init(void)
494 {
495  spin_lock_init(&slow_subchannel_lock);
496  atomic_set(&css_eval_scheduled, 0);
497  init_waitqueue_head(&css_eval_wq);
498  slow_subchannel_set = idset_sch_new();
499  if (!slow_subchannel_set) {
500  CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
501  return -ENOMEM;
502  }
503  return 0;
504 }
505 
506 static int slow_eval_known_fn(struct subchannel *sch, void *data)
507 {
508  int eval;
509  int rc;
510 
511  spin_lock_irq(&slow_subchannel_lock);
512  eval = idset_sch_contains(slow_subchannel_set, sch->schid);
513  idset_sch_del(slow_subchannel_set, sch->schid);
514  spin_unlock_irq(&slow_subchannel_lock);
515  if (eval) {
516  rc = css_evaluate_known_subchannel(sch, 1);
517  if (rc == -EAGAIN)
518  css_schedule_eval(sch->schid);
519  }
520  return 0;
521 }
522 
523 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
524 {
525  int eval;
526  int rc = 0;
527 
528  spin_lock_irq(&slow_subchannel_lock);
529  eval = idset_sch_contains(slow_subchannel_set, schid);
530  idset_sch_del(slow_subchannel_set, schid);
531  spin_unlock_irq(&slow_subchannel_lock);
532  if (eval) {
533  rc = css_evaluate_new_subchannel(schid, 1);
534  switch (rc) {
535  case -EAGAIN:
536  css_schedule_eval(schid);
537  rc = 0;
538  break;
539  case -ENXIO:
540  case -ENOMEM:
541  case -EIO:
542  /* These should abort looping */
543  idset_sch_del_subseq(slow_subchannel_set, schid);
544  break;
545  default:
546  rc = 0;
547  }
548  }
549  return rc;
550 }
551 
552 static void css_slow_path_func(struct work_struct *unused)
553 {
554  unsigned long flags;
555 
556  CIO_TRACE_EVENT(4, "slowpath");
557  for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
558  NULL);
559  spin_lock_irqsave(&slow_subchannel_lock, flags);
560  if (idset_is_empty(slow_subchannel_set)) {
561  atomic_set(&css_eval_scheduled, 0);
562  wake_up(&css_eval_wq);
563  }
564  spin_unlock_irqrestore(&slow_subchannel_lock, flags);
565 }
566 
567 static DECLARE_WORK(slow_path_work, css_slow_path_func);
569 
571 {
572  unsigned long flags;
573 
574  spin_lock_irqsave(&slow_subchannel_lock, flags);
575  idset_sch_add(slow_subchannel_set, schid);
576  atomic_set(&css_eval_scheduled, 1);
577  queue_work(cio_work_q, &slow_path_work);
578  spin_unlock_irqrestore(&slow_subchannel_lock, flags);
579 }
580 
582 {
583  unsigned long flags;
584 
585  spin_lock_irqsave(&slow_subchannel_lock, flags);
586  idset_fill(slow_subchannel_set);
587  atomic_set(&css_eval_scheduled, 1);
588  queue_work(cio_work_q, &slow_path_work);
589  spin_unlock_irqrestore(&slow_subchannel_lock, flags);
590 }
591 
592 static int __unset_registered(struct device *dev, void *data)
593 {
594  struct idset *set = data;
595  struct subchannel *sch = to_subchannel(dev);
596 
597  idset_sch_del(set, sch->schid);
598  return 0;
599 }
600 
601 static void css_schedule_eval_all_unreg(void)
602 {
603  unsigned long flags;
604  struct idset *unreg_set;
605 
606  /* Find unregistered subchannels. */
607  unreg_set = idset_sch_new();
608  if (!unreg_set) {
609  /* Fallback. */
611  return;
612  }
613  idset_fill(unreg_set);
614  bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
615  /* Apply to slow_subchannel_set. */
616  spin_lock_irqsave(&slow_subchannel_lock, flags);
617  idset_add_set(slow_subchannel_set, unreg_set);
618  atomic_set(&css_eval_scheduled, 1);
619  queue_work(cio_work_q, &slow_path_work);
620  spin_unlock_irqrestore(&slow_subchannel_lock, flags);
621  idset_free(unreg_set);
622 }
623 
625 {
626  flush_workqueue(cio_work_q);
627 }
628 
629 /* Schedule reprobing of all unregistered subchannels. */
631 {
632  css_schedule_eval_all_unreg();
633 }
635 
636 /*
637  * Called from the machine check handler for subchannel report words.
638  */
639 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
640 {
641  struct subchannel_id mchk_schid;
642  struct subchannel *sch;
643 
644  if (overflow) {
646  return;
647  }
648  CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
649  "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
650  crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
651  crw0->erc, crw0->rsid);
652  if (crw1)
653  CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
654  "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
655  crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
656  crw1->anc, crw1->erc, crw1->rsid);
657  init_subchannel_id(&mchk_schid);
658  mchk_schid.sch_no = crw0->rsid;
659  if (crw1)
660  mchk_schid.ssid = (crw1->rsid >> 4) & 3;
661 
662  if (crw0->erc == CRW_ERC_PMOD) {
663  sch = get_subchannel_by_schid(mchk_schid);
664  if (sch) {
665  css_update_ssd_info(sch);
666  put_device(&sch->dev);
667  }
668  }
669  /*
670  * Since we are always presented with IPI in the CRW, we have to
671  * use stsch() to find out if the subchannel in question has come
672  * or gone.
673  */
674  css_evaluate_subchannel(mchk_schid, 0);
675 }
676 
677 static void __init
678 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
679 {
680  struct cpuid cpu_id;
681 
682  if (css_general_characteristics.mcss) {
683  css->global_pgid.pgid_high.ext_cssid.version = 0x80;
684  css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
685  } else {
686 #ifdef CONFIG_SMP
687  css->global_pgid.pgid_high.cpu_addr = stap();
688 #else
689  css->global_pgid.pgid_high.cpu_addr = 0;
690 #endif
691  }
692  get_cpu_id(&cpu_id);
693  css->global_pgid.cpu_id = cpu_id.ident;
694  css->global_pgid.cpu_model = cpu_id.machine;
695  css->global_pgid.tod_high = tod_high;
696 
697 }
698 
699 static void
700 channel_subsystem_release(struct device *dev)
701 {
702  struct channel_subsystem *css;
703 
704  css = to_css(dev);
705  mutex_destroy(&css->mutex);
706  if (css->pseudo_subchannel) {
707  /* Implies that it has been generated but never registered. */
708  css_subchannel_release(&css->pseudo_subchannel->dev);
709  css->pseudo_subchannel = NULL;
710  }
711  kfree(css);
712 }
713 
714 static ssize_t
715 css_cm_enable_show(struct device *dev, struct device_attribute *attr,
716  char *buf)
717 {
718  struct channel_subsystem *css = to_css(dev);
719  int ret;
720 
721  if (!css)
722  return 0;
723  mutex_lock(&css->mutex);
724  ret = sprintf(buf, "%x\n", css->cm_enabled);
725  mutex_unlock(&css->mutex);
726  return ret;
727 }
728 
729 static ssize_t
730 css_cm_enable_store(struct device *dev, struct device_attribute *attr,
731  const char *buf, size_t count)
732 {
733  struct channel_subsystem *css = to_css(dev);
734  int ret;
735  unsigned long val;
736 
737  ret = strict_strtoul(buf, 16, &val);
738  if (ret)
739  return ret;
740  mutex_lock(&css->mutex);
741  switch (val) {
742  case 0:
743  ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
744  break;
745  case 1:
746  ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
747  break;
748  default:
749  ret = -EINVAL;
750  }
751  mutex_unlock(&css->mutex);
752  return ret < 0 ? ret : count;
753 }
754 
755 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
756 
757 static int __init setup_css(int nr)
758 {
759  u32 tod_high;
760  int ret;
761  struct channel_subsystem *css;
762 
763  css = channel_subsystems[nr];
764  memset(css, 0, sizeof(struct channel_subsystem));
765  css->pseudo_subchannel =
766  kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
767  if (!css->pseudo_subchannel)
768  return -ENOMEM;
769  css->pseudo_subchannel->dev.parent = &css->device;
770  css->pseudo_subchannel->dev.release = css_subchannel_release;
771  dev_set_name(&css->pseudo_subchannel->dev, "defunct");
772  mutex_init(&css->pseudo_subchannel->reg_mutex);
774  if (ret) {
775  kfree(css->pseudo_subchannel);
776  return ret;
777  }
778  mutex_init(&css->mutex);
779  css->valid = 1;
780  css->cssid = nr;
781  dev_set_name(&css->device, "css%x", nr);
782  css->device.release = channel_subsystem_release;
783  tod_high = (u32) (get_clock() >> 32);
784  css_generate_pgid(css, tod_high);
785  return 0;
786 }
787 
788 static int css_reboot_event(struct notifier_block *this,
789  unsigned long event,
790  void *ptr)
791 {
792  int ret, i;
793 
794  ret = NOTIFY_DONE;
795  for (i = 0; i <= __MAX_CSSID; i++) {
796  struct channel_subsystem *css;
797 
798  css = channel_subsystems[i];
799  mutex_lock(&css->mutex);
800  if (css->cm_enabled)
801  if (chsc_secm(css, 0))
802  ret = NOTIFY_BAD;
803  mutex_unlock(&css->mutex);
804  }
805 
806  return ret;
807 }
808 
809 static struct notifier_block css_reboot_notifier = {
810  .notifier_call = css_reboot_event,
811 };
812 
813 /*
814  * Since the css devices are neither on a bus nor have a class
815  * nor have a special device type, we cannot stop/restart channel
816  * path measurements via the normal suspend/resume callbacks, but have
817  * to use notifiers.
818  */
819 static int css_power_event(struct notifier_block *this, unsigned long event,
820  void *ptr)
821 {
822  int ret, i;
823 
824  switch (event) {
826  case PM_SUSPEND_PREPARE:
827  ret = NOTIFY_DONE;
828  for (i = 0; i <= __MAX_CSSID; i++) {
829  struct channel_subsystem *css;
830 
831  css = channel_subsystems[i];
832  mutex_lock(&css->mutex);
833  if (!css->cm_enabled) {
834  mutex_unlock(&css->mutex);
835  continue;
836  }
837  ret = __chsc_do_secm(css, 0);
838  ret = notifier_from_errno(ret);
839  mutex_unlock(&css->mutex);
840  }
841  break;
842  case PM_POST_HIBERNATION:
843  case PM_POST_SUSPEND:
844  ret = NOTIFY_DONE;
845  for (i = 0; i <= __MAX_CSSID; i++) {
846  struct channel_subsystem *css;
847 
848  css = channel_subsystems[i];
849  mutex_lock(&css->mutex);
850  if (!css->cm_enabled) {
851  mutex_unlock(&css->mutex);
852  continue;
853  }
854  ret = __chsc_do_secm(css, 1);
855  ret = notifier_from_errno(ret);
856  mutex_unlock(&css->mutex);
857  }
858  /* search for subchannels, which appeared during hibernation */
860  break;
861  default:
862  ret = NOTIFY_DONE;
863  }
864  return ret;
865 
866 }
867 static struct notifier_block css_power_notifier = {
868  .notifier_call = css_power_event,
869 };
870 
871 /*
872  * Now that the driver core is running, we can setup our channel subsystem.
873  * The struct subchannel's are created during probing (except for the
874  * static console subchannel).
875  */
876 static int __init css_bus_init(void)
877 {
878  int ret, i;
879 
880  ret = chsc_init();
881  if (ret)
882  return ret;
883 
885  /* Try to enable MSS. */
887  if (ret)
888  max_ssid = 0;
889  else /* Success. */
891 
892  ret = slow_subchannel_init();
893  if (ret)
894  goto out;
895 
896  ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
897  if (ret)
898  goto out;
899 
900  if ((ret = bus_register(&css_bus_type)))
901  goto out;
902 
903  /* Setup css structure. */
904  for (i = 0; i <= __MAX_CSSID; i++) {
905  struct channel_subsystem *css;
906 
907  css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
908  if (!css) {
909  ret = -ENOMEM;
910  goto out_unregister;
911  }
912  channel_subsystems[i] = css;
913  ret = setup_css(i);
914  if (ret) {
915  kfree(channel_subsystems[i]);
916  goto out_unregister;
917  }
918  ret = device_register(&css->device);
919  if (ret) {
920  put_device(&css->device);
921  goto out_unregister;
922  }
923  if (css_chsc_characteristics.secm) {
924  ret = device_create_file(&css->device,
925  &dev_attr_cm_enable);
926  if (ret)
927  goto out_device;
928  }
929  ret = device_register(&css->pseudo_subchannel->dev);
930  if (ret) {
931  put_device(&css->pseudo_subchannel->dev);
932  goto out_file;
933  }
934  }
935  ret = register_reboot_notifier(&css_reboot_notifier);
936  if (ret)
937  goto out_unregister;
938  ret = register_pm_notifier(&css_power_notifier);
939  if (ret) {
940  unregister_reboot_notifier(&css_reboot_notifier);
941  goto out_unregister;
942  }
943  css_init_done = 1;
944 
945  /* Enable default isc for I/O subchannels. */
947 
948  return 0;
949 out_file:
950  if (css_chsc_characteristics.secm)
951  device_remove_file(&channel_subsystems[i]->device,
952  &dev_attr_cm_enable);
953 out_device:
954  device_unregister(&channel_subsystems[i]->device);
955 out_unregister:
956  while (i > 0) {
957  struct channel_subsystem *css;
958 
959  i--;
960  css = channel_subsystems[i];
962  css->pseudo_subchannel = NULL;
963  if (css_chsc_characteristics.secm)
965  &dev_attr_cm_enable);
966  device_unregister(&css->device);
967  }
968  bus_unregister(&css_bus_type);
969 out:
971  idset_free(slow_subchannel_set);
973  pr_alert("The CSS device driver initialization failed with "
974  "errno=%d\n", ret);
975  return ret;
976 }
977 
978 static void __init css_bus_cleanup(void)
979 {
980  struct channel_subsystem *css;
981  int i;
982 
983  for (i = 0; i <= __MAX_CSSID; i++) {
984  css = channel_subsystems[i];
986  css->pseudo_subchannel = NULL;
987  if (css_chsc_characteristics.secm)
988  device_remove_file(&css->device, &dev_attr_cm_enable);
989  device_unregister(&css->device);
990  }
991  bus_unregister(&css_bus_type);
993  idset_free(slow_subchannel_set);
996 }
997 
998 static int __init channel_subsystem_init(void)
999 {
1000  int ret;
1001 
1002  ret = css_bus_init();
1003  if (ret)
1004  return ret;
1005  cio_work_q = create_singlethread_workqueue("cio");
1006  if (!cio_work_q) {
1007  ret = -ENOMEM;
1008  goto out_bus;
1009  }
1010  ret = io_subchannel_init();
1011  if (ret)
1012  goto out_wq;
1013 
1014  return ret;
1015 out_wq:
1016  destroy_workqueue(cio_work_q);
1017 out_bus:
1018  css_bus_cleanup();
1019  return ret;
1020 }
1021 subsys_initcall(channel_subsystem_init);
1022 
1023 static int css_settle(struct device_driver *drv, void *unused)
1024 {
1025  struct css_driver *cssdrv = to_cssdriver(drv);
1026 
1027  if (cssdrv->settle)
1028  return cssdrv->settle();
1029  return 0;
1030 }
1031 
1033 {
1034  int ret;
1035 
1036  /* Wait for the evaluation of subchannels to finish. */
1037  ret = wait_event_interruptible(css_eval_wq,
1038  atomic_read(&css_eval_scheduled) == 0);
1039  if (ret)
1040  return -EINTR;
1041  flush_workqueue(cio_work_q);
1042  /* Wait for the subchannel type specific initialization to finish */
1043  return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1044 }
1045 
1046 
1047 /*
1048  * Wait for the initialization of devices to finish, to make sure we are
1049  * done with our setup if the search for the root device starts.
1050  */
1051 static int __init channel_subsystem_init_sync(void)
1052 {
1053  /* Start initial subchannel evaluation. */
1056  return 0;
1057 }
1058 subsys_initcall_sync(channel_subsystem_init_sync);
1059 
1061 {
1062  struct channel_path *chp;
1063  struct chp_id chpid;
1064 
1066  chp_id_for_each(&chpid) {
1067  chp = chpid_to_chp(chpid);
1068  if (!chp)
1069  continue;
1071  }
1072 }
1073 
1074 #ifdef CONFIG_PROC_FS
1075 static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1076  size_t count, loff_t *ppos)
1077 {
1078  int ret;
1079 
1080  /* Handle pending CRW's. */
1082  ret = css_complete_work();
1083 
1084  return ret ? ret : count;
1085 }
1086 
1087 static const struct file_operations cio_settle_proc_fops = {
1089  .write = cio_settle_write,
1090  .llseek = no_llseek,
1091 };
1092 
1093 static int __init cio_settle_init(void)
1094 {
1095  struct proc_dir_entry *entry;
1096 
1097  entry = proc_create("cio_settle", S_IWUSR, NULL,
1098  &cio_settle_proc_fops);
1099  if (!entry)
1100  return -ENOMEM;
1101  return 0;
1102 }
1103 device_initcall(cio_settle_init);
1104 #endif /*CONFIG_PROC_FS*/
1105 
1107 {
1108  return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1109 }
1110 
1111 static int css_bus_match(struct device *dev, struct device_driver *drv)
1112 {
1113  struct subchannel *sch = to_subchannel(dev);
1114  struct css_driver *driver = to_cssdriver(drv);
1115  struct css_device_id *id;
1116 
1117  for (id = driver->subchannel_type; id->match_flags; id++) {
1118  if (sch->st == id->type)
1119  return 1;
1120  }
1121 
1122  return 0;
1123 }
1124 
1125 static int css_probe(struct device *dev)
1126 {
1127  struct subchannel *sch;
1128  int ret;
1129 
1130  sch = to_subchannel(dev);
1131  sch->driver = to_cssdriver(dev->driver);
1132  ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1133  if (ret)
1134  sch->driver = NULL;
1135  return ret;
1136 }
1137 
1138 static int css_remove(struct device *dev)
1139 {
1140  struct subchannel *sch;
1141  int ret;
1142 
1143  sch = to_subchannel(dev);
1144  ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1145  sch->driver = NULL;
1146  return ret;
1147 }
1148 
1149 static void css_shutdown(struct device *dev)
1150 {
1151  struct subchannel *sch;
1152 
1153  sch = to_subchannel(dev);
1154  if (sch->driver && sch->driver->shutdown)
1155  sch->driver->shutdown(sch);
1156 }
1157 
1158 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1159 {
1160  struct subchannel *sch = to_subchannel(dev);
1161  int ret;
1162 
1163  ret = add_uevent_var(env, "ST=%01X", sch->st);
1164  if (ret)
1165  return ret;
1166  ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1167  return ret;
1168 }
1169 
1170 static int css_pm_prepare(struct device *dev)
1171 {
1172  struct subchannel *sch = to_subchannel(dev);
1173  struct css_driver *drv;
1174 
1175  if (mutex_is_locked(&sch->reg_mutex))
1176  return -EAGAIN;
1177  if (!sch->dev.driver)
1178  return 0;
1179  drv = to_cssdriver(sch->dev.driver);
1180  /* Notify drivers that they may not register children. */
1181  return drv->prepare ? drv->prepare(sch) : 0;
1182 }
1183 
1184 static void css_pm_complete(struct device *dev)
1185 {
1186  struct subchannel *sch = to_subchannel(dev);
1187  struct css_driver *drv;
1188 
1189  if (!sch->dev.driver)
1190  return;
1191  drv = to_cssdriver(sch->dev.driver);
1192  if (drv->complete)
1193  drv->complete(sch);
1194 }
1195 
1196 static int css_pm_freeze(struct device *dev)
1197 {
1198  struct subchannel *sch = to_subchannel(dev);
1199  struct css_driver *drv;
1200 
1201  if (!sch->dev.driver)
1202  return 0;
1203  drv = to_cssdriver(sch->dev.driver);
1204  return drv->freeze ? drv->freeze(sch) : 0;
1205 }
1206 
1207 static int css_pm_thaw(struct device *dev)
1208 {
1209  struct subchannel *sch = to_subchannel(dev);
1210  struct css_driver *drv;
1211 
1212  if (!sch->dev.driver)
1213  return 0;
1214  drv = to_cssdriver(sch->dev.driver);
1215  return drv->thaw ? drv->thaw(sch) : 0;
1216 }
1217 
1218 static int css_pm_restore(struct device *dev)
1219 {
1220  struct subchannel *sch = to_subchannel(dev);
1221  struct css_driver *drv;
1222 
1223  css_update_ssd_info(sch);
1224  if (!sch->dev.driver)
1225  return 0;
1226  drv = to_cssdriver(sch->dev.driver);
1227  return drv->restore ? drv->restore(sch) : 0;
1228 }
1229 
1230 static const struct dev_pm_ops css_pm_ops = {
1231  .prepare = css_pm_prepare,
1232  .complete = css_pm_complete,
1233  .freeze = css_pm_freeze,
1234  .thaw = css_pm_thaw,
1235  .restore = css_pm_restore,
1236 };
1237 
1238 static struct bus_type css_bus_type = {
1239  .name = "css",
1240  .match = css_bus_match,
1241  .probe = css_probe,
1242  .remove = css_remove,
1243  .shutdown = css_shutdown,
1244  .uevent = css_uevent,
1245  .pm = &css_pm_ops,
1246 };
1247 
1256 {
1257  cdrv->drv.bus = &css_bus_type;
1258  return driver_register(&cdrv->drv);
1259 }
1261 
1269 {
1270  driver_unregister(&cdrv->drv);
1271 }
1273 
1274 MODULE_LICENSE("GPL");