15 #include <linux/errno.h>
18 #include <asm/kvm_host.h>
25 static int handle_lctlg(
struct kvm_vcpu *vcpu)
27 int reg1 = (vcpu->
arch.sie_block->ipa & 0x00f0) >> 4;
28 int reg3 = vcpu->
arch.sie_block->ipa & 0x000f;
29 int base2 = vcpu->
arch.sie_block->ipb >> 28;
30 int disp2 = ((vcpu->
arch.sie_block->ipb & 0x0fff0000) >> 16) +
31 ((vcpu->
arch.sie_block->ipb & 0xff00) << 4);
35 vcpu->
stat.instruction_lctlg++;
36 if ((vcpu->
arch.sie_block->ipb & 0xff) != 0x2f)
41 useraddr += vcpu->
run->s.regs.gprs[
base2];
48 VCPU_EVENT(vcpu, 5,
"lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
50 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
53 rc = get_guest_u64(vcpu, useraddr,
54 &vcpu->
arch.sie_block->gcr[reg]);
67 static int handle_lctl(
struct kvm_vcpu *vcpu)
69 int reg1 = (vcpu->
arch.sie_block->ipa & 0x00f0) >> 4;
70 int reg3 = vcpu->
arch.sie_block->ipa & 0x000f;
71 int base2 = vcpu->
arch.sie_block->ipb >> 28;
72 int disp2 = ((vcpu->
arch.sie_block->ipb & 0x0fff0000) >> 16);
77 vcpu->
stat.instruction_lctl++;
81 useraddr += vcpu->
run->s.regs.gprs[
base2];
86 VCPU_EVENT(vcpu, 5,
"lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
88 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
92 rc = get_guest_u32(vcpu, useraddr, &val);
97 vcpu->
arch.sie_block->gcr[
reg] &= 0xffffffff00000000ul;
102 reg = (reg + 1) % 16;
112 [0xb7] = handle_lctl,
114 [0xeb] = handle_lctlg,
117 static int handle_noop(
struct kvm_vcpu *vcpu)
119 switch (vcpu->
arch.sie_block->icptcode) {
121 vcpu->
stat.exit_null++;
124 vcpu->
stat.exit_external_request++;
127 vcpu->
stat.exit_external_interrupt++;
135 static int handle_stop(
struct kvm_vcpu *vcpu)
139 vcpu->
stat.exit_stop_request++;
140 spin_lock_bh(&vcpu->
arch.local_int.lock);
142 trace_kvm_s390_stop_request(vcpu->
arch.local_int.action_bits);
152 &vcpu->
arch.sie_block->cpuflags);
163 spin_unlock_bh(&vcpu->
arch.local_int.lock);
169 spin_unlock_bh(&vcpu->
arch.local_int.lock);
173 static int handle_validity(
struct kvm_vcpu *vcpu)
175 unsigned long vmaddr;
176 int viwhy = vcpu->
arch.sie_block->ipb >> 16;
179 vcpu->
stat.exit_validity++;
180 trace_kvm_s390_intercept_validity(vcpu, viwhy);
188 rc = fault_in_pages_writeable((
char __user *) vmaddr,
201 rc = fault_in_pages_writeable((
char __user *) vmaddr,
213 VCPU_EVENT(vcpu, 2,
"unhandled validity intercept code %d",
218 static int handle_instruction(
struct kvm_vcpu *vcpu)
222 vcpu->
stat.exit_instruction++;
223 trace_kvm_s390_intercept_instruction(vcpu,
224 vcpu->
arch.sie_block->ipa,
225 vcpu->
arch.sie_block->ipb);
226 handler = instruction_handlers[vcpu->
arch.sie_block->ipa >> 8];
228 return handler(vcpu);
232 static int handle_prog(
struct kvm_vcpu *vcpu)
234 vcpu->
stat.exit_program_interruption++;
235 trace_kvm_s390_intercept_prog(vcpu, vcpu->
arch.sie_block->iprcc);
239 static int handle_instruction_and_prog(
struct kvm_vcpu *vcpu)
243 vcpu->
stat.exit_instr_and_program++;
244 rc = handle_instruction(vcpu);
245 rc2 = handle_prog(vcpu);
248 vcpu->
arch.sie_block->icptcode = 0x04;
255 [0x00 >> 2] = handle_noop,
256 [0x04 >> 2] = handle_instruction,
257 [0x08 >> 2] = handle_prog,
258 [0x0C >> 2] = handle_instruction_and_prog,
259 [0x10 >> 2] = handle_noop,
260 [0x14 >> 2] = handle_noop,
262 [0x20 >> 2] = handle_validity,
263 [0x28 >> 2] = handle_stop,
271 if (code & 3 || (code >> 2) >=
ARRAY_SIZE(intercept_funcs))
273 func = intercept_funcs[code >> 2];