Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sclp.c
Go to the documentation of this file.
1 /*
2  * core function to access sclp interface
3  *
4  * Copyright IBM Corp. 1999, 2009
5  *
6  * Author(s): Martin Peschke <[email protected]>
7  * Martin Schwidefsky <[email protected]>
8  */
9 
10 #include <linux/kernel_stat.h>
11 #include <linux/module.h>
12 #include <linux/err.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/timer.h>
16 #include <linux/reboot.h>
17 #include <linux/jiffies.h>
18 #include <linux/init.h>
19 #include <linux/suspend.h>
20 #include <linux/completion.h>
21 #include <linux/platform_device.h>
22 #include <asm/types.h>
23 #include <asm/irq.h>
24 
25 #include "sclp.h"
26 
27 #define SCLP_HEADER "sclp: "
28 
29 /* Lock to protect internal data consistency. */
30 static DEFINE_SPINLOCK(sclp_lock);
31 
32 /* Mask of events that we can send to the sclp interface. */
34 
35 /* Mask of events that we can receive from the sclp interface. */
37 
38 /* List of registered event listeners and senders. */
39 static struct list_head sclp_reg_list;
40 
41 /* List of queued requests. */
42 static struct list_head sclp_req_queue;
43 
44 /* Data for read and and init requests. */
45 static struct sclp_req sclp_read_req;
46 static struct sclp_req sclp_init_req;
47 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
48 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
49 
50 /* Suspend request */
51 static DECLARE_COMPLETION(sclp_request_queue_flushed);
52 
53 static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
54 {
55  complete(&sclp_request_queue_flushed);
56 }
57 
58 static struct sclp_req sclp_suspend_req;
59 
60 /* Timer for request retries. */
61 static struct timer_list sclp_request_timer;
62 
63 /* Internal state: is the driver initialized? */
64 static volatile enum sclp_init_state_t {
68 } sclp_init_state = sclp_init_state_uninitialized;
69 
70 /* Internal state: is a request active at the sclp? */
71 static volatile enum sclp_running_state_t {
75 } sclp_running_state = sclp_running_state_idle;
76 
77 /* Internal state: is a read request pending? */
78 static volatile enum sclp_reading_state_t {
81 } sclp_reading_state = sclp_reading_state_idle;
82 
83 /* Internal state: is the driver currently serving requests? */
84 static volatile enum sclp_activation_state_t {
89 } sclp_activation_state = sclp_activation_state_active;
90 
91 /* Internal state: is an init mask request pending? */
92 static volatile enum sclp_mask_state_t {
95 } sclp_mask_state = sclp_mask_state_idle;
96 
97 /* Internal state: is the driver suspended? */
98 static enum sclp_suspend_state_t {
101 } sclp_suspend_state = sclp_suspend_state_running;
102 
103 /* Maximum retry counts */
104 #define SCLP_INIT_RETRY 3
105 #define SCLP_MASK_RETRY 3
106 
107 /* Timeout intervals in seconds.*/
108 #define SCLP_BUSY_INTERVAL 10
109 #define SCLP_RETRY_INTERVAL 30
110 
111 static void sclp_process_queue(void);
112 static void __sclp_make_read_req(void);
113 static int sclp_init_mask(int calculate);
114 static int sclp_init(void);
115 
116 /* Perform service call. Return 0 on success, non-zero otherwise. */
117 int
119 {
120  int cc;
121 
122  asm volatile(
123  " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
124  " ipm %0\n"
125  " srl %0,28"
126  : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
127  : "cc", "memory");
128  if (cc == 3)
129  return -EIO;
130  if (cc == 2)
131  return -EBUSY;
132  return 0;
133 }
134 
135 
136 static void
137 __sclp_queue_read_req(void)
138 {
139  if (sclp_reading_state == sclp_reading_state_idle) {
140  sclp_reading_state = sclp_reading_state_reading;
141  __sclp_make_read_req();
142  /* Add request to head of queue */
143  list_add(&sclp_read_req.list, &sclp_req_queue);
144  }
145 }
146 
147 /* Set up request retry timer. Called while sclp_lock is locked. */
148 static inline void
149 __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
150  unsigned long data)
151 {
152  del_timer(&sclp_request_timer);
153  sclp_request_timer.function = function;
154  sclp_request_timer.data = data;
155  sclp_request_timer.expires = jiffies + time;
156  add_timer(&sclp_request_timer);
157 }
158 
159 /* Request timeout handler. Restart the request queue. If DATA is non-zero,
160  * force restart of running request. */
161 static void
162 sclp_request_timeout(unsigned long data)
163 {
164  unsigned long flags;
165 
166  spin_lock_irqsave(&sclp_lock, flags);
167  if (data) {
168  if (sclp_running_state == sclp_running_state_running) {
169  /* Break running state and queue NOP read event request
170  * to get a defined interface state. */
171  __sclp_queue_read_req();
172  sclp_running_state = sclp_running_state_idle;
173  }
174  } else {
175  __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
176  sclp_request_timeout, 0);
177  }
178  spin_unlock_irqrestore(&sclp_lock, flags);
179  sclp_process_queue();
180 }
181 
182 /* Try to start a request. Return zero if the request was successfully
183  * started or if it will be started at a later time. Return non-zero otherwise.
184  * Called while sclp_lock is locked. */
185 static int
186 __sclp_start_request(struct sclp_req *req)
187 {
188  int rc;
189 
190  if (sclp_running_state != sclp_running_state_idle)
191  return 0;
192  del_timer(&sclp_request_timer);
193  rc = sclp_service_call(req->command, req->sccb);
194  req->start_count++;
195 
196  if (rc == 0) {
197  /* Successfully started request */
198  req->status = SCLP_REQ_RUNNING;
199  sclp_running_state = sclp_running_state_running;
200  __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
201  sclp_request_timeout, 1);
202  return 0;
203  } else if (rc == -EBUSY) {
204  /* Try again later */
205  __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
206  sclp_request_timeout, 0);
207  return 0;
208  }
209  /* Request failed */
210  req->status = SCLP_REQ_FAILED;
211  return rc;
212 }
213 
214 /* Try to start queued requests. */
215 static void
216 sclp_process_queue(void)
217 {
218  struct sclp_req *req;
219  int rc;
220  unsigned long flags;
221 
222  spin_lock_irqsave(&sclp_lock, flags);
223  if (sclp_running_state != sclp_running_state_idle) {
224  spin_unlock_irqrestore(&sclp_lock, flags);
225  return;
226  }
227  del_timer(&sclp_request_timer);
228  while (!list_empty(&sclp_req_queue)) {
229  req = list_entry(sclp_req_queue.next, struct sclp_req, list);
230  if (!req->sccb)
231  goto do_post;
232  rc = __sclp_start_request(req);
233  if (rc == 0)
234  break;
235  /* Request failed */
236  if (req->start_count > 1) {
237  /* Cannot abort already submitted request - could still
238  * be active at the SCLP */
239  __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
240  sclp_request_timeout, 0);
241  break;
242  }
243 do_post:
244  /* Post-processing for aborted request */
245  list_del(&req->list);
246  if (req->callback) {
247  spin_unlock_irqrestore(&sclp_lock, flags);
248  req->callback(req, req->callback_data);
249  spin_lock_irqsave(&sclp_lock, flags);
250  }
251  }
252  spin_unlock_irqrestore(&sclp_lock, flags);
253 }
254 
255 static int __sclp_can_add_request(struct sclp_req *req)
256 {
257  if (req == &sclp_suspend_req || req == &sclp_init_req)
258  return 1;
259  if (sclp_suspend_state != sclp_suspend_state_running)
260  return 0;
261  if (sclp_init_state != sclp_init_state_initialized)
262  return 0;
263  if (sclp_activation_state != sclp_activation_state_active)
264  return 0;
265  return 1;
266 }
267 
268 /* Queue a new request. Return zero on success, non-zero otherwise. */
269 int
271 {
272  unsigned long flags;
273  int rc;
274 
275  spin_lock_irqsave(&sclp_lock, flags);
276  if (!__sclp_can_add_request(req)) {
277  spin_unlock_irqrestore(&sclp_lock, flags);
278  return -EIO;
279  }
280  req->status = SCLP_REQ_QUEUED;
281  req->start_count = 0;
282  list_add_tail(&req->list, &sclp_req_queue);
283  rc = 0;
284  /* Start if request is first in list */
285  if (sclp_running_state == sclp_running_state_idle &&
286  req->list.prev == &sclp_req_queue) {
287  if (!req->sccb) {
288  list_del(&req->list);
289  rc = -ENODATA;
290  goto out;
291  }
292  rc = __sclp_start_request(req);
293  if (rc)
294  list_del(&req->list);
295  }
296 out:
297  spin_unlock_irqrestore(&sclp_lock, flags);
298  return rc;
299 }
300 
302 
303 /* Dispatch events found in request buffer to registered listeners. Return 0
304  * if all events were dispatched, non-zero otherwise. */
305 static int
306 sclp_dispatch_evbufs(struct sccb_header *sccb)
307 {
308  unsigned long flags;
309  struct evbuf_header *evbuf;
310  struct list_head *l;
311  struct sclp_register *reg;
312  int offset;
313  int rc;
314 
315  spin_lock_irqsave(&sclp_lock, flags);
316  rc = 0;
317  for (offset = sizeof(struct sccb_header); offset < sccb->length;
318  offset += evbuf->length) {
319  evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
320  /* Check for malformed hardware response */
321  if (evbuf->length == 0)
322  break;
323  /* Search for event handler */
324  reg = NULL;
325  list_for_each(l, &sclp_reg_list) {
326  reg = list_entry(l, struct sclp_register, list);
327  if (reg->receive_mask & (1 << (32 - evbuf->type)))
328  break;
329  else
330  reg = NULL;
331  }
332  if (reg && reg->receiver_fn) {
333  spin_unlock_irqrestore(&sclp_lock, flags);
334  reg->receiver_fn(evbuf);
335  spin_lock_irqsave(&sclp_lock, flags);
336  } else if (reg == NULL)
337  rc = -EOPNOTSUPP;
338  }
339  spin_unlock_irqrestore(&sclp_lock, flags);
340  return rc;
341 }
342 
343 /* Read event data request callback. */
344 static void
345 sclp_read_cb(struct sclp_req *req, void *data)
346 {
347  unsigned long flags;
348  struct sccb_header *sccb;
349 
350  sccb = (struct sccb_header *) req->sccb;
351  if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
352  sccb->response_code == 0x220))
353  sclp_dispatch_evbufs(sccb);
354  spin_lock_irqsave(&sclp_lock, flags);
355  sclp_reading_state = sclp_reading_state_idle;
356  spin_unlock_irqrestore(&sclp_lock, flags);
357 }
358 
359 /* Prepare read event data request. Called while sclp_lock is locked. */
360 static void __sclp_make_read_req(void)
361 {
362  struct sccb_header *sccb;
363 
364  sccb = (struct sccb_header *) sclp_read_sccb;
365  clear_page(sccb);
366  memset(&sclp_read_req, 0, sizeof(struct sclp_req));
367  sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
368  sclp_read_req.status = SCLP_REQ_QUEUED;
369  sclp_read_req.start_count = 0;
370  sclp_read_req.callback = sclp_read_cb;
371  sclp_read_req.sccb = sccb;
372  sccb->length = PAGE_SIZE;
373  sccb->function_code = 0;
374  sccb->control_mask[2] = 0x80;
375 }
376 
377 /* Search request list for request with matching sccb. Return request if found,
378  * NULL otherwise. Called while sclp_lock is locked. */
379 static inline struct sclp_req *
380 __sclp_find_req(u32 sccb)
381 {
382  struct list_head *l;
383  struct sclp_req *req;
384 
385  list_for_each(l, &sclp_req_queue) {
386  req = list_entry(l, struct sclp_req, list);
387  if (sccb == (u32) (addr_t) req->sccb)
388  return req;
389  }
390  return NULL;
391 }
392 
393 /* Handler for external interruption. Perform request post-processing.
394  * Prepare read event data request if necessary. Start processing of next
395  * request on queue. */
396 static void sclp_interrupt_handler(struct ext_code ext_code,
397  unsigned int param32, unsigned long param64)
398 {
399  struct sclp_req *req;
400  u32 finished_sccb;
401  u32 evbuf_pending;
402 
404  spin_lock(&sclp_lock);
405  finished_sccb = param32 & 0xfffffff8;
406  evbuf_pending = param32 & 0x3;
407  if (finished_sccb) {
408  del_timer(&sclp_request_timer);
409  sclp_running_state = sclp_running_state_reset_pending;
410  req = __sclp_find_req(finished_sccb);
411  if (req) {
412  /* Request post-processing */
413  list_del(&req->list);
414  req->status = SCLP_REQ_DONE;
415  if (req->callback) {
416  spin_unlock(&sclp_lock);
417  req->callback(req, req->callback_data);
418  spin_lock(&sclp_lock);
419  }
420  }
421  sclp_running_state = sclp_running_state_idle;
422  }
423  if (evbuf_pending &&
424  sclp_activation_state == sclp_activation_state_active)
425  __sclp_queue_read_req();
426  spin_unlock(&sclp_lock);
427  sclp_process_queue();
428 }
429 
430 /* Convert interval in jiffies to TOD ticks. */
431 static inline u64
432 sclp_tod_from_jiffies(unsigned long jiffies)
433 {
434  return (u64) (jiffies / HZ) << 32;
435 }
436 
437 /* Wait until a currently running request finished. Note: while this function
438  * is running, no timers are served on the calling CPU. */
439 void
441 {
442  unsigned long long old_tick;
443  unsigned long flags;
444  unsigned long cr0, cr0_sync;
445  u64 timeout;
446  int irq_context;
447 
448  /* We'll be disabling timer interrupts, so we need a custom timeout
449  * mechanism */
450  timeout = 0;
451  if (timer_pending(&sclp_request_timer)) {
452  /* Get timeout TOD value */
453  timeout = get_clock() +
454  sclp_tod_from_jiffies(sclp_request_timer.expires -
455  jiffies);
456  }
457  local_irq_save(flags);
458  /* Prevent bottom half from executing once we force interrupts open */
459  irq_context = in_interrupt();
460  if (!irq_context)
462  /* Enable service-signal interruption, disable timer interrupts */
463  old_tick = local_tick_disable();
465  __ctl_store(cr0, 0, 0);
466  cr0_sync = cr0;
467  cr0_sync &= 0xffff00a0;
468  cr0_sync |= 0x00000200;
469  __ctl_load(cr0_sync, 0, 0);
471  /* Loop until driver state indicates finished request */
472  while (sclp_running_state != sclp_running_state_idle) {
473  /* Check for expired request timer */
474  if (timer_pending(&sclp_request_timer) &&
475  get_clock() > timeout &&
476  del_timer(&sclp_request_timer))
477  sclp_request_timer.function(sclp_request_timer.data);
478  cpu_relax();
479  }
481  __ctl_load(cr0, 0, 0);
482  if (!irq_context)
484  local_tick_enable(old_tick);
485  local_irq_restore(flags);
486 }
488 
489 /* Dispatch changes in send and receive mask to registered listeners. */
490 static void
491 sclp_dispatch_state_change(void)
492 {
493  struct list_head *l;
494  struct sclp_register *reg;
495  unsigned long flags;
498 
499  do {
500  spin_lock_irqsave(&sclp_lock, flags);
501  reg = NULL;
502  list_for_each(l, &sclp_reg_list) {
503  reg = list_entry(l, struct sclp_register, list);
504  receive_mask = reg->send_mask & sclp_receive_mask;
505  send_mask = reg->receive_mask & sclp_send_mask;
506  if (reg->sclp_receive_mask != receive_mask ||
507  reg->sclp_send_mask != send_mask) {
509  reg->sclp_send_mask = send_mask;
510  break;
511  } else
512  reg = NULL;
513  }
514  spin_unlock_irqrestore(&sclp_lock, flags);
515  if (reg && reg->state_change_fn)
516  reg->state_change_fn(reg);
517  } while (reg);
518 }
519 
526  u16 _zeros : 12;
532 } __attribute__((packed));
535 /* State change event callback. Inform listeners of changes. */
536 static void
537 sclp_state_change_cb(struct evbuf_header *evbuf)
538 {
539  unsigned long flags;
540  struct sclp_statechangebuf *scbuf;
541 
542  scbuf = (struct sclp_statechangebuf *) evbuf;
543  if (scbuf->mask_length != sizeof(sccb_mask_t))
544  return;
545  spin_lock_irqsave(&sclp_lock, flags);
546  if (scbuf->validity_sclp_receive_mask)
548  if (scbuf->validity_sclp_send_mask)
550  spin_unlock_irqrestore(&sclp_lock, flags);
553  sclp_dispatch_state_change();
554 }
555 
556 static struct sclp_register sclp_state_change_event = {
557  .receive_mask = EVTYP_STATECHANGE_MASK,
558  .receiver_fn = sclp_state_change_cb
559 };
560 
561 /* Calculate receive and send mask of currently registered listeners.
562  * Called while sclp_lock is locked. */
563 static inline void
564 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
565 {
566  struct list_head *l;
567  struct sclp_register *t;
568 
569  *receive_mask = 0;
570  *send_mask = 0;
571  list_for_each(l, &sclp_reg_list) {
572  t = list_entry(l, struct sclp_register, list);
573  *receive_mask |= t->receive_mask;
574  *send_mask |= t->send_mask;
575  }
576 }
577 
578 /* Register event listener. Return 0 on success, non-zero otherwise. */
579 int
581 {
582  unsigned long flags;
585  int rc;
586 
587  rc = sclp_init();
588  if (rc)
589  return rc;
590  spin_lock_irqsave(&sclp_lock, flags);
591  /* Check event mask for collisions */
592  __sclp_get_mask(&receive_mask, &send_mask);
593  if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
594  spin_unlock_irqrestore(&sclp_lock, flags);
595  return -EBUSY;
596  }
597  /* Trigger initial state change callback */
598  reg->sclp_receive_mask = 0;
599  reg->sclp_send_mask = 0;
600  reg->pm_event_posted = 0;
601  list_add(&reg->list, &sclp_reg_list);
602  spin_unlock_irqrestore(&sclp_lock, flags);
603  rc = sclp_init_mask(1);
604  if (rc) {
605  spin_lock_irqsave(&sclp_lock, flags);
606  list_del(&reg->list);
607  spin_unlock_irqrestore(&sclp_lock, flags);
608  }
609  return rc;
610 }
611 
613 
614 /* Unregister event listener. */
615 void
617 {
618  unsigned long flags;
619 
620  spin_lock_irqsave(&sclp_lock, flags);
621  list_del(&reg->list);
622  spin_unlock_irqrestore(&sclp_lock, flags);
623  sclp_init_mask(1);
624 }
625 
627 
628 /* Remove event buffers which are marked processed. Return the number of
629  * remaining event buffers. */
630 int
632 {
633  struct evbuf_header *evbuf;
634  int unprocessed;
635  u16 remaining;
636 
637  evbuf = (struct evbuf_header *) (sccb + 1);
638  unprocessed = 0;
639  remaining = sccb->length - sizeof(struct sccb_header);
640  while (remaining > 0) {
641  remaining -= evbuf->length;
642  if (evbuf->flags & 0x80) {
643  sccb->length -= evbuf->length;
644  memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
645  remaining);
646  } else {
647  unprocessed++;
648  evbuf = (struct evbuf_header *)
649  ((addr_t) evbuf + evbuf->length);
650  }
651  }
652  return unprocessed;
653 }
654 
656 
657 /* Prepare init mask request. Called while sclp_lock is locked. */
658 static inline void
659 __sclp_make_init_req(u32 receive_mask, u32 send_mask)
660 {
661  struct init_sccb *sccb;
662 
663  sccb = (struct init_sccb *) sclp_init_sccb;
664  clear_page(sccb);
665  memset(&sclp_init_req, 0, sizeof(struct sclp_req));
666  sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
667  sclp_init_req.status = SCLP_REQ_FILLED;
668  sclp_init_req.start_count = 0;
669  sclp_init_req.callback = NULL;
670  sclp_init_req.callback_data = NULL;
671  sclp_init_req.sccb = sccb;
672  sccb->header.length = sizeof(struct init_sccb);
673  sccb->mask_length = sizeof(sccb_mask_t);
674  sccb->receive_mask = receive_mask;
675  sccb->send_mask = send_mask;
676  sccb->sclp_receive_mask = 0;
677  sccb->sclp_send_mask = 0;
678 }
679 
680 /* Start init mask request. If calculate is non-zero, calculate the mask as
681  * requested by registered listeners. Use zero mask otherwise. Return 0 on
682  * success, non-zero otherwise. */
683 static int
684 sclp_init_mask(int calculate)
685 {
686  unsigned long flags;
687  struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
690  int retry;
691  int rc;
692  unsigned long wait;
693 
694  spin_lock_irqsave(&sclp_lock, flags);
695  /* Check if interface is in appropriate state */
696  if (sclp_mask_state != sclp_mask_state_idle) {
697  spin_unlock_irqrestore(&sclp_lock, flags);
698  return -EBUSY;
699  }
700  if (sclp_activation_state == sclp_activation_state_inactive) {
701  spin_unlock_irqrestore(&sclp_lock, flags);
702  return -EINVAL;
703  }
704  sclp_mask_state = sclp_mask_state_initializing;
705  /* Determine mask */
706  if (calculate)
707  __sclp_get_mask(&receive_mask, &send_mask);
708  else {
709  receive_mask = 0;
710  send_mask = 0;
711  }
712  rc = -EIO;
713  for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
714  /* Prepare request */
715  __sclp_make_init_req(receive_mask, send_mask);
716  spin_unlock_irqrestore(&sclp_lock, flags);
717  if (sclp_add_request(&sclp_init_req)) {
718  /* Try again later */
719  wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
720  while (time_before(jiffies, wait))
721  sclp_sync_wait();
722  spin_lock_irqsave(&sclp_lock, flags);
723  continue;
724  }
725  while (sclp_init_req.status != SCLP_REQ_DONE &&
726  sclp_init_req.status != SCLP_REQ_FAILED)
727  sclp_sync_wait();
728  spin_lock_irqsave(&sclp_lock, flags);
729  if (sclp_init_req.status == SCLP_REQ_DONE &&
730  sccb->header.response_code == 0x20) {
731  /* Successful request */
732  if (calculate) {
735  } else {
736  sclp_receive_mask = 0;
737  sclp_send_mask = 0;
738  }
739  spin_unlock_irqrestore(&sclp_lock, flags);
740  sclp_dispatch_state_change();
741  spin_lock_irqsave(&sclp_lock, flags);
742  rc = 0;
743  break;
744  }
745  }
746  sclp_mask_state = sclp_mask_state_idle;
747  spin_unlock_irqrestore(&sclp_lock, flags);
748  return rc;
749 }
750 
751 /* Deactivate SCLP interface. On success, new requests will be rejected,
752  * events will no longer be dispatched. Return 0 on success, non-zero
753  * otherwise. */
754 int
756 {
757  unsigned long flags;
758  int rc;
759 
760  spin_lock_irqsave(&sclp_lock, flags);
761  /* Deactivate can only be called when active */
762  if (sclp_activation_state != sclp_activation_state_active) {
763  spin_unlock_irqrestore(&sclp_lock, flags);
764  return -EINVAL;
765  }
766  sclp_activation_state = sclp_activation_state_deactivating;
767  spin_unlock_irqrestore(&sclp_lock, flags);
768  rc = sclp_init_mask(0);
769  spin_lock_irqsave(&sclp_lock, flags);
770  if (rc == 0)
771  sclp_activation_state = sclp_activation_state_inactive;
772  else
773  sclp_activation_state = sclp_activation_state_active;
774  spin_unlock_irqrestore(&sclp_lock, flags);
775  return rc;
776 }
777 
779 
780 /* Reactivate SCLP interface after sclp_deactivate. On success, new
781  * requests will be accepted, events will be dispatched again. Return 0 on
782  * success, non-zero otherwise. */
783 int
785 {
786  unsigned long flags;
787  int rc;
788 
789  spin_lock_irqsave(&sclp_lock, flags);
790  /* Reactivate can only be called when inactive */
791  if (sclp_activation_state != sclp_activation_state_inactive) {
792  spin_unlock_irqrestore(&sclp_lock, flags);
793  return -EINVAL;
794  }
795  sclp_activation_state = sclp_activation_state_activating;
796  spin_unlock_irqrestore(&sclp_lock, flags);
797  rc = sclp_init_mask(1);
798  spin_lock_irqsave(&sclp_lock, flags);
799  if (rc == 0)
800  sclp_activation_state = sclp_activation_state_active;
801  else
802  sclp_activation_state = sclp_activation_state_inactive;
803  spin_unlock_irqrestore(&sclp_lock, flags);
804  return rc;
805 }
806 
808 
809 /* Handler for external interruption used during initialization. Modify
810  * request state to done. */
811 static void sclp_check_handler(struct ext_code ext_code,
812  unsigned int param32, unsigned long param64)
813 {
814  u32 finished_sccb;
815 
817  finished_sccb = param32 & 0xfffffff8;
818  /* Is this the interrupt we are waiting for? */
819  if (finished_sccb == 0)
820  return;
821  if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
822  panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
823  finished_sccb);
824  spin_lock(&sclp_lock);
825  if (sclp_running_state == sclp_running_state_running) {
826  sclp_init_req.status = SCLP_REQ_DONE;
827  sclp_running_state = sclp_running_state_idle;
828  }
829  spin_unlock(&sclp_lock);
830 }
831 
832 /* Initial init mask request timed out. Modify request state to failed. */
833 static void
834 sclp_check_timeout(unsigned long data)
835 {
836  unsigned long flags;
837 
838  spin_lock_irqsave(&sclp_lock, flags);
839  if (sclp_running_state == sclp_running_state_running) {
840  sclp_init_req.status = SCLP_REQ_FAILED;
841  sclp_running_state = sclp_running_state_idle;
842  }
843  spin_unlock_irqrestore(&sclp_lock, flags);
844 }
845 
846 /* Perform a check of the SCLP interface. Return zero if the interface is
847  * available and there are no pending requests from a previous instance.
848  * Return non-zero otherwise. */
849 static int
850 sclp_check_interface(void)
851 {
852  struct init_sccb *sccb;
853  unsigned long flags;
854  int retry;
855  int rc;
856 
857  spin_lock_irqsave(&sclp_lock, flags);
858  /* Prepare init mask command */
859  rc = register_external_interrupt(0x2401, sclp_check_handler);
860  if (rc) {
861  spin_unlock_irqrestore(&sclp_lock, flags);
862  return rc;
863  }
864  for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
865  __sclp_make_init_req(0, 0);
866  sccb = (struct init_sccb *) sclp_init_req.sccb;
867  rc = sclp_service_call(sclp_init_req.command, sccb);
868  if (rc == -EIO)
869  break;
870  sclp_init_req.status = SCLP_REQ_RUNNING;
871  sclp_running_state = sclp_running_state_running;
872  __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
873  sclp_check_timeout, 0);
874  spin_unlock_irqrestore(&sclp_lock, flags);
875  /* Enable service-signal interruption - needs to happen
876  * with IRQs enabled. */
878  /* Wait for signal from interrupt or timeout */
879  sclp_sync_wait();
880  /* Disable service-signal interruption - needs to happen
881  * with IRQs enabled. */
883  spin_lock_irqsave(&sclp_lock, flags);
884  del_timer(&sclp_request_timer);
885  if (sclp_init_req.status == SCLP_REQ_DONE &&
886  sccb->header.response_code == 0x20) {
887  rc = 0;
888  break;
889  } else
890  rc = -EBUSY;
891  }
892  unregister_external_interrupt(0x2401, sclp_check_handler);
893  spin_unlock_irqrestore(&sclp_lock, flags);
894  return rc;
895 }
896 
897 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
898  * events from interfering with rebooted system. */
899 static int
900 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
901 {
902  sclp_deactivate();
903  return NOTIFY_DONE;
904 }
905 
906 static struct notifier_block sclp_reboot_notifier = {
907  .notifier_call = sclp_reboot_event
908 };
909 
910 /*
911  * Suspend/resume SCLP notifier implementation
912  */
913 
914 static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
915 {
916  struct sclp_register *reg;
917  unsigned long flags;
918 
919  if (!rollback) {
920  spin_lock_irqsave(&sclp_lock, flags);
921  list_for_each_entry(reg, &sclp_reg_list, list)
922  reg->pm_event_posted = 0;
923  spin_unlock_irqrestore(&sclp_lock, flags);
924  }
925  do {
926  spin_lock_irqsave(&sclp_lock, flags);
927  list_for_each_entry(reg, &sclp_reg_list, list) {
928  if (rollback && reg->pm_event_posted)
929  goto found;
930  if (!rollback && !reg->pm_event_posted)
931  goto found;
932  }
933  spin_unlock_irqrestore(&sclp_lock, flags);
934  return;
935 found:
936  spin_unlock_irqrestore(&sclp_lock, flags);
937  if (reg->pm_event_fn)
938  reg->pm_event_fn(reg, sclp_pm_event);
939  reg->pm_event_posted = rollback ? 0 : 1;
940  } while (1);
941 }
942 
943 /*
944  * Susend/resume callbacks for platform device
945  */
946 
947 static int sclp_freeze(struct device *dev)
948 {
949  unsigned long flags;
950  int rc;
951 
953 
954  spin_lock_irqsave(&sclp_lock, flags);
955  sclp_suspend_state = sclp_suspend_state_suspended;
956  spin_unlock_irqrestore(&sclp_lock, flags);
957 
958  /* Init supend data */
959  memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
960  sclp_suspend_req.callback = sclp_suspend_req_cb;
961  sclp_suspend_req.status = SCLP_REQ_FILLED;
962  init_completion(&sclp_request_queue_flushed);
963 
964  rc = sclp_add_request(&sclp_suspend_req);
965  if (rc == 0)
966  wait_for_completion(&sclp_request_queue_flushed);
967  else if (rc != -ENODATA)
968  goto fail_thaw;
969 
970  rc = sclp_deactivate();
971  if (rc)
972  goto fail_thaw;
973  return 0;
974 
975 fail_thaw:
976  spin_lock_irqsave(&sclp_lock, flags);
977  sclp_suspend_state = sclp_suspend_state_running;
978  spin_unlock_irqrestore(&sclp_lock, flags);
980  return rc;
981 }
982 
983 static int sclp_undo_suspend(enum sclp_pm_event event)
984 {
985  unsigned long flags;
986  int rc;
987 
988  rc = sclp_reactivate();
989  if (rc)
990  return rc;
991 
992  spin_lock_irqsave(&sclp_lock, flags);
993  sclp_suspend_state = sclp_suspend_state_running;
994  spin_unlock_irqrestore(&sclp_lock, flags);
995 
996  sclp_pm_event(event, 0);
997  return 0;
998 }
999 
1000 static int sclp_thaw(struct device *dev)
1001 {
1002  return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1003 }
1004 
1005 static int sclp_restore(struct device *dev)
1006 {
1007  return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
1008 }
1009 
1010 static const struct dev_pm_ops sclp_pm_ops = {
1011  .freeze = sclp_freeze,
1012  .thaw = sclp_thaw,
1013  .restore = sclp_restore,
1014 };
1015 
1016 static struct platform_driver sclp_pdrv = {
1017  .driver = {
1018  .name = "sclp",
1019  .owner = THIS_MODULE,
1020  .pm = &sclp_pm_ops,
1021  },
1022 };
1023 
1024 static struct platform_device *sclp_pdev;
1025 
1026 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
1027  * otherwise. */
1028 static int
1029 sclp_init(void)
1030 {
1031  unsigned long flags;
1032  int rc = 0;
1033 
1034  spin_lock_irqsave(&sclp_lock, flags);
1035  /* Check for previous or running initialization */
1036  if (sclp_init_state != sclp_init_state_uninitialized)
1037  goto fail_unlock;
1038  sclp_init_state = sclp_init_state_initializing;
1039  /* Set up variables */
1040  INIT_LIST_HEAD(&sclp_req_queue);
1041  INIT_LIST_HEAD(&sclp_reg_list);
1042  list_add(&sclp_state_change_event.list, &sclp_reg_list);
1043  init_timer(&sclp_request_timer);
1044  /* Check interface */
1045  spin_unlock_irqrestore(&sclp_lock, flags);
1046  rc = sclp_check_interface();
1047  spin_lock_irqsave(&sclp_lock, flags);
1048  if (rc)
1049  goto fail_init_state_uninitialized;
1050  /* Register reboot handler */
1051  rc = register_reboot_notifier(&sclp_reboot_notifier);
1052  if (rc)
1053  goto fail_init_state_uninitialized;
1054  /* Register interrupt handler */
1055  rc = register_external_interrupt(0x2401, sclp_interrupt_handler);
1056  if (rc)
1057  goto fail_unregister_reboot_notifier;
1058  sclp_init_state = sclp_init_state_initialized;
1059  spin_unlock_irqrestore(&sclp_lock, flags);
1060  /* Enable service-signal external interruption - needs to happen with
1061  * IRQs enabled. */
1063  sclp_init_mask(1);
1064  return 0;
1065 
1066 fail_unregister_reboot_notifier:
1067  unregister_reboot_notifier(&sclp_reboot_notifier);
1068 fail_init_state_uninitialized:
1069  sclp_init_state = sclp_init_state_uninitialized;
1070 fail_unlock:
1071  spin_unlock_irqrestore(&sclp_lock, flags);
1072  return rc;
1073 }
1074 
1075 /*
1076  * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1077  * to print the panic message.
1078  */
1079 static int sclp_panic_notify(struct notifier_block *self,
1080  unsigned long event, void *data)
1081 {
1082  if (sclp_suspend_state == sclp_suspend_state_suspended)
1083  sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1084  return NOTIFY_OK;
1085 }
1086 
1087 static struct notifier_block sclp_on_panic_nb = {
1088  .notifier_call = sclp_panic_notify,
1089  .priority = SCLP_PANIC_PRIO,
1090 };
1091 
1092 static __init int sclp_initcall(void)
1093 {
1094  int rc;
1095 
1096  rc = platform_driver_register(&sclp_pdrv);
1097  if (rc)
1098  return rc;
1099  sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
1100  rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
1101  if (rc)
1102  goto fail_platform_driver_unregister;
1103  rc = atomic_notifier_chain_register(&panic_notifier_list,
1104  &sclp_on_panic_nb);
1105  if (rc)
1106  goto fail_platform_device_unregister;
1107 
1108  return sclp_init();
1109 
1110 fail_platform_device_unregister:
1111  platform_device_unregister(sclp_pdev);
1112 fail_platform_driver_unregister:
1113  platform_driver_unregister(&sclp_pdrv);
1114  return rc;
1115 }
1116 
1117 arch_initcall(sclp_initcall);