15 #include <linux/hrtimer.h>
16 #include <linux/signal.h>
17 #include <linux/slab.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/uaccess.h>
24 static int psw_extint_disabled(
struct kvm_vcpu *vcpu)
29 static int psw_interrupts_disabled(
struct kvm_vcpu *vcpu)
38 static int __interrupt_is_deliverable(
struct kvm_vcpu *vcpu,
43 if (psw_extint_disabled(vcpu))
45 if (vcpu->
arch.sie_block->gcr[0] & 0x2000ul)
48 if (psw_extint_disabled(vcpu))
50 if (vcpu->
arch.sie_block->gcr[0] & 0x4000ul)
54 if (psw_extint_disabled(vcpu))
56 if (vcpu->
arch.sie_block->gcr[0] & 0x200ul)
60 if (psw_extint_disabled(vcpu))
62 if (vcpu->
arch.sie_block->gcr[0] & 0x200ul)
76 static void __set_cpu_idle(
struct kvm_vcpu *vcpu)
83 static void __unset_cpu_idle(
struct kvm_vcpu *vcpu)
90 static void __reset_intercept_indicators(
struct kvm_vcpu *vcpu)
94 &vcpu->
arch.sie_block->cpuflags);
95 vcpu->
arch.sie_block->lctl = 0x0000;
103 static void __set_intercept_indicator(
struct kvm_vcpu *vcpu,
106 switch (inti->
type) {
111 if (psw_extint_disabled(vcpu))
124 static void __do_deliver_interrupt(
struct kvm_vcpu *vcpu,
127 const unsigned short table[] = { 2, 4, 4, 6 };
130 switch (inti->
type) {
132 VCPU_EVENT(vcpu, 4,
"%s",
"interrupt: sigp emerg");
133 vcpu->
stat.deliver_emergency_signal++;
134 trace_kvm_s390_deliver_interrupt(vcpu->
vcpu_id, inti->
type,
135 inti->
emerg.code, 0);
136 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
140 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->
emerg.code);
144 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
145 &vcpu->
arch.sie_block->gpsw,
sizeof(psw_t));
149 rc = copy_from_guest(vcpu, &vcpu->
arch.sie_block->gpsw,
150 __LC_EXT_NEW_PSW,
sizeof(psw_t));
156 VCPU_EVENT(vcpu, 4,
"%s",
"interrupt: sigp ext call");
157 vcpu->
stat.deliver_external_call++;
158 trace_kvm_s390_deliver_interrupt(vcpu->
vcpu_id, inti->
type,
160 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202);
164 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->
extcall.code);
168 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
169 &vcpu->
arch.sie_block->gpsw,
sizeof(psw_t));
173 rc = copy_from_guest(vcpu, &vcpu->
arch.sie_block->gpsw,
174 __LC_EXT_NEW_PSW,
sizeof(psw_t));
180 VCPU_EVENT(vcpu, 4,
"interrupt: sclp parm:%x",
181 inti->
ext.ext_params);
182 vcpu->
stat.deliver_service_signal++;
183 trace_kvm_s390_deliver_interrupt(vcpu->
vcpu_id, inti->
type,
184 inti->
ext.ext_params, 0);
185 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
189 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
190 &vcpu->
arch.sie_block->gpsw,
sizeof(psw_t));
194 rc = copy_from_guest(vcpu, &vcpu->
arch.sie_block->gpsw,
195 __LC_EXT_NEW_PSW,
sizeof(psw_t));
199 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->
ext.ext_params);
205 VCPU_EVENT(vcpu, 4,
"interrupt: virtio parm:%x,parm64:%llx",
206 inti->
ext.ext_params, inti->
ext.ext_params2);
207 vcpu->
stat.deliver_virtio_interrupt++;
208 trace_kvm_s390_deliver_interrupt(vcpu->
vcpu_id, inti->
type,
209 inti->
ext.ext_params,
210 inti->
ext.ext_params2);
211 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
215 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00);
219 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
220 &vcpu->
arch.sie_block->gpsw,
sizeof(psw_t));
224 rc = copy_from_guest(vcpu, &vcpu->
arch.sie_block->gpsw,
225 __LC_EXT_NEW_PSW,
sizeof(psw_t));
229 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->
ext.ext_params);
233 rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
234 inti->
ext.ext_params2);
240 VCPU_EVENT(vcpu, 4,
"%s",
"interrupt: cpu stop");
241 vcpu->
stat.deliver_stop_signal++;
242 trace_kvm_s390_deliver_interrupt(vcpu->
vcpu_id, inti->
type,
244 __set_intercept_indicator(vcpu, inti);
248 VCPU_EVENT(vcpu, 4,
"interrupt: set prefix to %x",
250 vcpu->
stat.deliver_prefix_signal++;
251 trace_kvm_s390_deliver_interrupt(vcpu->
vcpu_id, inti->
type,
253 kvm_s390_set_prefix(vcpu, inti->
prefix.address);
257 VCPU_EVENT(vcpu, 4,
"%s",
"interrupt: cpu restart");
258 vcpu->
stat.deliver_restart_signal++;
259 trace_kvm_s390_deliver_interrupt(vcpu->
vcpu_id, inti->
type,
262 restart_old_psw), &vcpu->
arch.sie_block->gpsw,
sizeof(psw_t));
266 rc = copy_from_guest(vcpu, &vcpu->
arch.sie_block->gpsw,
274 VCPU_EVENT(vcpu, 4,
"interrupt: pgm check code:%x, ilc:%x",
276 table[vcpu->
arch.sie_block->ipa >> 14]);
277 vcpu->
stat.deliver_program_int++;
278 trace_kvm_s390_deliver_interrupt(vcpu->
vcpu_id, inti->
type,
280 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->
pgm.code);
284 rc = put_guest_u16(vcpu, __LC_PGM_ILC,
285 table[vcpu->
arch.sie_block->ipa >> 14]);
289 rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
290 &vcpu->
arch.sie_block->gpsw,
sizeof(psw_t));
294 rc = copy_from_guest(vcpu, &vcpu->
arch.sie_block->gpsw,
295 __LC_PGM_NEW_PSW,
sizeof(psw_t));
304 printk(
"kvm: The guest lowcore is not mapped during interrupt "
305 "delivery, killing userspace\n");
310 static int __try_deliver_ckc_interrupt(
struct kvm_vcpu *vcpu)
312 int rc, exception = 0;
314 if (psw_extint_disabled(vcpu))
316 if (!(vcpu->
arch.sie_block->gcr[0] & 0x800ul))
318 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
321 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
322 &vcpu->
arch.sie_block->gpsw,
sizeof(psw_t));
325 rc = copy_from_guest(vcpu, &vcpu->
arch.sie_block->gpsw,
326 __LC_EXT_NEW_PSW,
sizeof(psw_t));
330 printk(
"kvm: The guest lowcore is not mapped during interrupt "
331 "delivery, killing userspace\n");
337 static int kvm_cpu_has_interrupt(
struct kvm_vcpu *vcpu)
345 spin_lock_bh(&li->
lock);
347 if (__interrupt_is_deliverable(vcpu, inti)) {
351 spin_unlock_bh(&li->
lock);
355 spin_lock(&fi->
lock);
357 if (__interrupt_is_deliverable(vcpu, inti)) {
361 spin_unlock(&fi->
lock);
364 if ((!rc) && (vcpu->
arch.sie_block->ckc <
366 if ((!psw_extint_disabled(vcpu)) &&
367 (vcpu->
arch.sie_block->gcr[0] & 0x800ul))
384 vcpu->
stat.exit_wait_state++;
385 if (kvm_cpu_has_interrupt(vcpu))
388 __set_cpu_idle(vcpu);
389 spin_lock_bh(&vcpu->
arch.local_int.lock);
390 vcpu->
arch.local_int.timer_due = 0;
391 spin_unlock_bh(&vcpu->
arch.local_int.lock);
393 if (psw_interrupts_disabled(vcpu)) {
395 __unset_cpu_idle(vcpu);
399 if (psw_extint_disabled(vcpu) ||
400 (!(vcpu->
arch.sie_block->gcr[0] & 0x800ul))) {
401 VCPU_EVENT(vcpu, 3,
"%s",
"enabled wait w/o timer");
406 if (vcpu->
arch.sie_block->ckc < now) {
407 __unset_cpu_idle(vcpu);
411 sltime = ((vcpu->
arch.sie_block->ckc - now)*125)>>9;
414 VCPU_EVENT(vcpu, 5,
"enabled wait via clock comparator: %llx ns", sltime);
416 spin_lock(&vcpu->
arch.local_int.float_int->lock);
417 spin_lock_bh(&vcpu->
arch.local_int.lock);
419 while (list_empty(&vcpu->
arch.local_int.list) &&
420 list_empty(&vcpu->
arch.local_int.float_int->list) &&
421 (!vcpu->
arch.local_int.timer_due) &&
424 spin_unlock_bh(&vcpu->
arch.local_int.lock);
425 spin_unlock(&vcpu->
arch.local_int.float_int->lock);
427 spin_lock(&vcpu->
arch.local_int.float_int->lock);
428 spin_lock_bh(&vcpu->
arch.local_int.lock);
430 __unset_cpu_idle(vcpu);
433 spin_unlock_bh(&vcpu->
arch.local_int.lock);
434 spin_unlock(&vcpu->
arch.local_int.float_int->lock);
443 spin_lock(&vcpu->
arch.local_int.lock);
444 vcpu->
arch.local_int.timer_due = 1;
445 if (waitqueue_active(&vcpu->
arch.local_int.wq))
447 spin_unlock(&vcpu->
arch.local_int.lock);
459 tasklet_schedule(&vcpu->
arch.tasklet);
471 __reset_intercept_indicators(vcpu);
475 spin_lock_bh(&li->
lock);
477 if (__interrupt_is_deliverable(vcpu, inti)) {
482 __set_intercept_indicator(vcpu, inti);
484 if (list_empty(&li->
list))
486 spin_unlock_bh(&li->
lock);
488 __do_deliver_interrupt(vcpu, inti);
494 if ((vcpu->
arch.sie_block->ckc <
496 __try_deliver_ckc_interrupt(vcpu);
501 spin_lock(&fi->
lock);
503 if (__interrupt_is_deliverable(vcpu, inti)) {
508 __set_intercept_indicator(vcpu, inti);
510 if (list_empty(&fi->
list))
512 spin_unlock(&fi->
lock);
514 __do_deliver_interrupt(vcpu, inti);
533 VCPU_EVENT(vcpu, 3,
"inject: program check %d (from kernel)", code);
534 trace_kvm_s390_inject_vcpu(vcpu->
vcpu_id, inti->
type, code, 0, 1);
535 spin_lock_bh(&li->
lock);
539 spin_unlock_bh(&li->
lock);
555 switch (s390int->
type) {
557 VM_EVENT(kvm, 5,
"inject: virtio parm:%x,parm64:%llx",
560 inti->
ext.ext_params = s390int->
parm;
564 VM_EVENT(kvm, 5,
"inject: sclp parm:%x", s390int->
parm);
566 inti->
ext.ext_params = s390int->
parm;
576 trace_kvm_s390_inject_vm(s390int->
type, s390int->
parm, s390int->
parm64,
580 fi = &kvm->
arch.float_int;
581 spin_lock(&fi->
lock);
593 spin_lock_bh(&li->
lock);
595 if (waitqueue_active(&li->
wq))
597 spin_unlock_bh(&li->
lock);
598 spin_unlock(&fi->
lock);
613 switch (s390int->
type) {
615 if (s390int->
parm & 0xffff0000) {
620 inti->
pgm.code = s390int->
parm;
621 VCPU_EVENT(vcpu, 3,
"inject: program check %d (from user)",
627 VCPU_EVENT(vcpu, 3,
"inject: set prefix to %x (from user)",
643 trace_kvm_s390_inject_vcpu(vcpu->
vcpu_id, s390int->
type, s390int->
parm,
647 li = &vcpu->
arch.local_int;
648 spin_lock_bh(&li->
lock);
657 if (waitqueue_active(&li->
wq))
659 spin_unlock_bh(&li->
lock);