11 #include <linux/module.h>
16 #include <linux/reboot.h>
22 #include <asm/types.h>
27 #define SCLP_HEADER "sclp: "
45 static struct sclp_req sclp_read_req;
46 static struct sclp_req sclp_init_req;
55 complete(&sclp_request_queue_flushed);
58 static struct sclp_req sclp_suspend_req;
104 #define SCLP_INIT_RETRY 3
105 #define SCLP_MASK_RETRY 3
108 #define SCLP_BUSY_INTERVAL 10
109 #define SCLP_RETRY_INTERVAL 30
111 static void sclp_process_queue(
void);
112 static void __sclp_make_read_req(
void);
113 static int sclp_init_mask(
int calculate);
114 static int sclp_init(
void);
123 " .insn rre,0xb2200000,%1,%2\n"
126 :
"=&d" (
cc) :
"d" (command),
"a" (
__pa(sccb))
137 __sclp_queue_read_req(
void)
141 __sclp_make_read_req();
143 list_add(&sclp_read_req.list, &sclp_req_queue);
149 __sclp_set_request_timer(
unsigned long time,
void (*
function)(
unsigned long),
153 sclp_request_timer.function =
function;
154 sclp_request_timer.data =
data;
162 sclp_request_timeout(
unsigned long data)
171 __sclp_queue_read_req();
176 sclp_request_timeout, 0);
178 spin_unlock_irqrestore(&sclp_lock, flags);
179 sclp_process_queue();
201 sclp_request_timeout, 1);
203 }
else if (rc == -
EBUSY) {
206 sclp_request_timeout, 0);
216 sclp_process_queue(
void)
224 spin_unlock_irqrestore(&sclp_lock, flags);
228 while (!list_empty(&sclp_req_queue)) {
232 rc = __sclp_start_request(req);
240 sclp_request_timeout, 0);
247 spin_unlock_irqrestore(&sclp_lock, flags);
252 spin_unlock_irqrestore(&sclp_lock, flags);
255 static int __sclp_can_add_request(
struct sclp_req *req)
257 if (req == &sclp_suspend_req || req == &sclp_init_req)
276 if (!__sclp_can_add_request(req)) {
277 spin_unlock_irqrestore(&sclp_lock, flags);
286 req->
list.prev == &sclp_req_queue) {
292 rc = __sclp_start_request(req);
297 spin_unlock_irqrestore(&sclp_lock, flags);
318 offset += evbuf->
length) {
333 spin_unlock_irqrestore(&sclp_lock, flags);
336 }
else if (reg ==
NULL)
339 spin_unlock_irqrestore(&sclp_lock, flags);
345 sclp_read_cb(
struct sclp_req *req,
void *data)
353 sclp_dispatch_evbufs(sccb);
356 spin_unlock_irqrestore(&sclp_lock, flags);
360 static void __sclp_make_read_req(
void)
369 sclp_read_req.start_count = 0;
370 sclp_read_req.callback = sclp_read_cb;
371 sclp_read_req.sccb = sccb;
380 __sclp_find_req(
u32 sccb)
397 unsigned int param32,
unsigned long param64)
404 spin_lock(&sclp_lock);
405 finished_sccb = param32 & 0xfffffff8;
406 evbuf_pending = param32 & 0x3;
410 req = __sclp_find_req(finished_sccb);
416 spin_unlock(&sclp_lock);
418 spin_lock(&sclp_lock);
425 __sclp_queue_read_req();
426 spin_unlock(&sclp_lock);
427 sclp_process_queue();
432 sclp_tod_from_jiffies(
unsigned long jiffies)
434 return (
u64) (jiffies /
HZ) << 32;
442 unsigned long long old_tick;
444 unsigned long cr0, cr0_sync;
451 if (timer_pending(&sclp_request_timer)) {
454 sclp_tod_from_jiffies(sclp_request_timer.expires -
463 old_tick = local_tick_disable();
467 cr0_sync &= 0xffff00a0;
468 cr0_sync |= 0x00000200;
474 if (timer_pending(&sclp_request_timer) &&
477 sclp_request_timer.function(sclp_request_timer.data);
484 local_tick_enable(old_tick);
491 sclp_dispatch_state_change(
void)
514 spin_unlock_irqrestore(&sclp_lock, flags);
550 spin_unlock_irqrestore(&sclp_lock, flags);
553 sclp_dispatch_state_change();
558 .receiver_fn = sclp_state_change_cb
592 __sclp_get_mask(&receive_mask, &send_mask);
594 spin_unlock_irqrestore(&sclp_lock, flags);
601 list_add(®->
list, &sclp_reg_list);
602 spin_unlock_irqrestore(&sclp_lock, flags);
603 rc = sclp_init_mask(1);
607 spin_unlock_irqrestore(&sclp_lock, flags);
622 spin_unlock_irqrestore(&sclp_lock, flags);
640 while (remaining > 0) {
641 remaining -= evbuf->
length;
642 if (evbuf->
flags & 0x80) {
659 __sclp_make_init_req(
u32 receive_mask,
u32 send_mask)
663 sccb = (
struct init_sccb *) sclp_init_sccb;
668 sclp_init_req.start_count = 0;
669 sclp_init_req.callback =
NULL;
670 sclp_init_req.callback_data =
NULL;
671 sclp_init_req.sccb = sccb;
684 sclp_init_mask(
int calculate)
697 spin_unlock_irqrestore(&sclp_lock, flags);
701 spin_unlock_irqrestore(&sclp_lock, flags);
707 __sclp_get_mask(&receive_mask, &send_mask);
715 __sclp_make_init_req(receive_mask, send_mask);
716 spin_unlock_irqrestore(&sclp_lock, flags);
730 sccb->
header.response_code == 0x20) {
739 spin_unlock_irqrestore(&sclp_lock, flags);
740 sclp_dispatch_state_change();
747 spin_unlock_irqrestore(&sclp_lock, flags);
763 spin_unlock_irqrestore(&sclp_lock, flags);
767 spin_unlock_irqrestore(&sclp_lock, flags);
768 rc = sclp_init_mask(0);
774 spin_unlock_irqrestore(&sclp_lock, flags);
792 spin_unlock_irqrestore(&sclp_lock, flags);
796 spin_unlock_irqrestore(&sclp_lock, flags);
797 rc = sclp_init_mask(1);
803 spin_unlock_irqrestore(&sclp_lock, flags);
811 static void sclp_check_handler(
struct ext_code ext_code,
812 unsigned int param32,
unsigned long param64)
817 finished_sccb = param32 & 0xfffffff8;
819 if (finished_sccb == 0)
821 if (finished_sccb != (
u32) (
addr_t) sclp_init_sccb)
822 panic(
"sclp: unsolicited interrupt for buffer at 0x%x\n",
824 spin_lock(&sclp_lock);
829 spin_unlock(&sclp_lock);
834 sclp_check_timeout(
unsigned long data)
843 spin_unlock_irqrestore(&sclp_lock, flags);
850 sclp_check_interface(
void)
861 spin_unlock_irqrestore(&sclp_lock, flags);
865 __sclp_make_init_req(0, 0);
866 sccb = (
struct init_sccb *) sclp_init_req.sccb;
873 sclp_check_timeout, 0);
874 spin_unlock_irqrestore(&sclp_lock, flags);
886 sccb->
header.response_code == 0x20) {
893 spin_unlock_irqrestore(&sclp_lock, flags);
907 .notifier_call = sclp_reboot_event
923 spin_unlock_irqrestore(&sclp_lock, flags);
933 spin_unlock_irqrestore(&sclp_lock, flags);
936 spin_unlock_irqrestore(&sclp_lock, flags);
947 static int sclp_freeze(
struct device *
dev)
956 spin_unlock_irqrestore(&sclp_lock, flags);
959 memset(&sclp_suspend_req, 0,
sizeof(sclp_suspend_req));
960 sclp_suspend_req.callback = sclp_suspend_req_cb;
962 init_completion(&sclp_request_queue_flushed);
978 spin_unlock_irqrestore(&sclp_lock, flags);
983 static int sclp_undo_suspend(
enum sclp_pm_event
event)
994 spin_unlock_irqrestore(&sclp_lock, flags);
1000 static int sclp_thaw(
struct device *dev)
1005 static int sclp_restore(
struct device *dev)
1010 static const struct dev_pm_ops sclp_pm_ops = {
1011 .freeze = sclp_freeze,
1013 .restore = sclp_restore,
1031 unsigned long flags;
1040 INIT_LIST_HEAD(&sclp_req_queue);
1041 INIT_LIST_HEAD(&sclp_reg_list);
1042 list_add(&sclp_state_change_event.
list, &sclp_reg_list);
1045 spin_unlock_irqrestore(&sclp_lock, flags);
1046 rc = sclp_check_interface();
1049 goto fail_init_state_uninitialized;
1053 goto fail_init_state_uninitialized;
1057 goto fail_unregister_reboot_notifier;
1059 spin_unlock_irqrestore(&sclp_lock, flags);
1066 fail_unregister_reboot_notifier:
1068 fail_init_state_uninitialized:
1071 spin_unlock_irqrestore(&sclp_lock, flags);
1080 unsigned long event,
void *data)
1088 .notifier_call = sclp_panic_notify,
1092 static __init int sclp_initcall(
void)
1099 sclp_pdev = platform_device_register_simple(
"sclp", -1,
NULL, 0);
1100 rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
1102 goto fail_platform_driver_unregister;
1106 goto fail_platform_device_unregister;
1110 fail_platform_device_unregister:
1112 fail_platform_driver_unregister: