33 #define IA64_VHPT_TRANS_VECTOR 0x0000
34 #define IA64_INST_TLB_VECTOR 0x0400
35 #define IA64_DATA_TLB_VECTOR 0x0800
36 #define IA64_ALT_INST_TLB_VECTOR 0x0c00
37 #define IA64_ALT_DATA_TLB_VECTOR 0x1000
38 #define IA64_DATA_NESTED_TLB_VECTOR 0x1400
39 #define IA64_INST_KEY_MISS_VECTOR 0x1800
40 #define IA64_DATA_KEY_MISS_VECTOR 0x1c00
41 #define IA64_DIRTY_BIT_VECTOR 0x2000
42 #define IA64_INST_ACCESS_BIT_VECTOR 0x2400
43 #define IA64_DATA_ACCESS_BIT_VECTOR 0x2800
44 #define IA64_BREAK_VECTOR 0x2c00
45 #define IA64_EXTINT_VECTOR 0x3000
46 #define IA64_PAGE_NOT_PRESENT_VECTOR 0x5000
47 #define IA64_KEY_PERMISSION_VECTOR 0x5100
48 #define IA64_INST_ACCESS_RIGHTS_VECTOR 0x5200
49 #define IA64_DATA_ACCESS_RIGHTS_VECTOR 0x5300
50 #define IA64_GENEX_VECTOR 0x5400
51 #define IA64_DISABLED_FPREG_VECTOR 0x5500
52 #define IA64_NAT_CONSUMPTION_VECTOR 0x5600
53 #define IA64_SPECULATION_VECTOR 0x5700
54 #define IA64_DEBUG_VECTOR 0x5900
55 #define IA64_UNALIGNED_REF_VECTOR 0x5a00
56 #define IA64_UNSUPPORTED_DATA_REF_VECTOR 0x5b00
57 #define IA64_FP_FAULT_VECTOR 0x5c00
58 #define IA64_FP_TRAP_VECTOR 0x5d00
59 #define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 0x5e00
60 #define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00
61 #define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000
64 #define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\
65 IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT | \
66 IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT)
68 #define DOMN_PAL_REQUEST 0x110000
69 #define DOMN_SAL_REQUEST 0x110001
71 static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800,
72 0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00,
73 0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400,
74 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00,
75 0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600,
76 0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00,
77 0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800,
78 0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00
81 static void collect_interruption(
struct kvm_vcpu *vcpu)
102 vcpu_set_ipsr(vcpu, vpsr);
109 vcpu_set_iip(vcpu , regs->
cr_iip);
114 vcpu_set_ifs(vcpu, vifs);
116 vcpu_set_iipa(vcpu,
VMX(vcpu, cr_iipa));
146 regs = vcpu_regs(vcpu);
149 pt_isr.
val =
VMX(vcpu, cr_isr);
151 VMX(vcpu, cr_isr) = pt_isr.
val;
153 collect_interruption(vcpu);
155 viva = vcpu_get_iva(vcpu);
156 regs->cr_iip = viva + vec;
163 rr.
val = vcpu_get_rr(vcpu, ifa);
178 int set_ifa,
int set_itir,
int set_iha)
183 vpsr =
VCPU(vcpu, vpsr);
185 if (vpsr & IA64_PSR_IC) {
187 vcpu_set_ifa(vcpu, vadr);
189 value = vcpu_get_itir_on_fault(vcpu, vadr);
190 vcpu_set_itir(vcpu, value);
195 vcpu_set_iha(vcpu, value);
259 static void _vhpt_fault(
struct kvm_vcpu *vcpu,
u64 vadr)
273 _vhpt_fault(vcpu, vadr);
283 _vhpt_fault(vcpu, vadr);
360 static void _nat_consumption_fault(
struct kvm_vcpu *vcpu,
u64 vadr,
389 _nat_consumption_fault(vcpu, 0,
REGISTER);
399 _nat_consumption_fault(vcpu, vadr,
DATA);
405 static void __page_not_present(
struct kvm_vcpu *vcpu,
u64 vadr)
414 __page_not_present(vcpu, vadr);
419 __page_not_present(vcpu, vadr);
433 unsigned long *
fpsr,
unsigned long *
isr,
unsigned long *
pr,
442 if (!vmm_fpswa_interface)
473 ret = (*vmm_fpswa_interface->
fpswa) (fp_fault, bundle,
474 ipsr, fpsr, isr, pr, ifs, &fp_state);
488 unsigned long fault_ip;
497 if (!fp_fault && (
ia64_psr(regs)->ri == 0))
503 if (!bundle.
i64[0] && !bundle.
i64[1])
507 &isr, ®s->
pr, ®s->
cr_ifs, regs);
519 vector = vec2off[vec];
522 panic_vm(vcpu,
"Interruption with vector :0x%lx occurs "
523 "with psr.ic = 0\n", vector);
533 }
else if (-
EAGAIN == status)
553 static unsigned long kvm_trans_pal_call_args(
struct kvm_vcpu *vcpu,
557 unsigned long gpa, poff;
571 poff = arg & (
PSIZE(data->
ps) - 1);
576 return (
unsigned long)
__va(arg);
579 static void set_pal_call_data(
struct kvm_vcpu *vcpu)
593 p->
u.
pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29);
598 p->
u.
pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30);
610 static void get_pal_call_result(
struct kvm_vcpu *vcpu)
620 panic_vm(vcpu,
"Mis-set for exit reason!\n");
623 static void set_sal_call_data(
struct kvm_vcpu *vcpu)
638 static void get_sal_call_result(
struct kvm_vcpu *vcpu)
648 panic_vm(vcpu,
"Mis-set for exit reason!\n");
652 unsigned long isr,
unsigned long iim)
661 set_pal_call_data(v);
663 get_pal_call_result(v);
669 set_sal_call_data(v);
671 get_sal_call_result(v);
682 int mask, h_pending, h_inservice;
687 h_pending = highest_pending_irq(vcpu);
692 h_inservice = highest_inservice_irq(vcpu);
694 vpsr =
VCPU(vcpu, vpsr);
695 mask =
irq_masked(vcpu, h_pending, h_inservice);
701 if (
VCPU(vcpu, vhpi))
709 static void generate_exirq(
struct kvm_vcpu *vcpu)
716 vpsr =
VCPU(vcpu, vpsr);
718 if (!(vpsr & IA64_PSR_IC))
719 panic_vm(vcpu,
"Trying to inject one IRQ with psr.ic=0\n");
732 threshold = ((!vpsr.i) << 5) | (vtpr.
mmi << 4) | vtpr.
mic;
733 vhpi =
VCPU(vcpu, vhpi);
734 if (vhpi > threshold) {
736 generate_exirq(vcpu);
744 if (
VMX(v, timer_check)) {
745 VMX(v, timer_check) = 0;
746 if (
VMX(v, itc_check)) {
748 if (!(
VCPU(v, itv) & (1 << 16))) {
751 VMX(v, itc_check) = 0;
753 v->
arch.timer_pending = 1;
755 VMX(v, last_itc) =
VCPU(v, itm) + 1;
761 if (v->
arch.irq_new_pending) {
762 v->
arch.irq_new_pending = 0;
763 VMX(v, irq_check) = 0;
767 if (
VMX(v, irq_check)) {
768 VMX(v, irq_check) = 0;
773 static inline void handle_lds(
struct kvm_pt_regs *regs)
795 u64 vhpt_adr, gppa, pteval, rr, itir;
801 vpsr =
VCPU(v, vpsr);
802 misr.
val =
VMX(v, cr_isr);
808 if (__gpfn_is_io((vadr << 1) >> (
PAGE_SHIFT + 1))) {
819 gppa = (vadr & ((1
UL << data->
ps) - 1))
820 + (data->
ppn >> (data->
ps - 12) << data->
ps);
826 vcpu_set_isr(v, misr.
val);
834 }
else if (type ==
D_TLB) {
840 rr = vcpu_get_rr(v, vadr);
844 if (vpsr & IA64_PSR_IC) {
845 vcpu_set_isr(v, misr.
val);
853 vpta.
val = vcpu_get_pta(v);
860 if (vpsr & IA64_PSR_IC) {
861 vcpu_set_isr(v, misr.
val);
869 }
else if (vpsr & IA64_PSR_IC) {
870 vcpu_set_isr(v, misr.
val);
877 if (vpsr & IA64_PSR_IC) {
878 vcpu_set_isr(v, misr.
val);
884 }
else if (type ==
I_TLB) {
885 if (!(vpsr & IA64_PSR_IC))
888 vcpu_set_isr(v, misr.
val);
893 vpta.
val = vcpu_get_pta(v);
900 vcpu_set_isr(v, misr.
val);
904 rr = vcpu_get_rr(v, vadr);
909 vcpu_set_isr(v, misr.
val);
913 vcpu_set_isr(v, misr.
val);
924 regs = vcpu_regs(vcpu);
925 vpsr =
VCPU(vcpu, vpsr);
940 VMX(v, timer_check) = 1;
944 static void ptc_ga_remote_func(
struct kvm_vcpu *
v,
int pos)
946 u64 oldrid, moldrid, oldpsbits,
vaddr;
950 oldrid =
VMX(v, vrr[0]);
951 VMX(v, vrr[0]) = p->
rr;
952 oldpsbits =
VMX(v, psbits[0]);
961 VMX(v, vrr[0]) = oldrid;
962 VMX(v, psbits[0]) = oldpsbits;
967 static void vcpu_do_resume(
struct kvm_vcpu *vcpu)
975 ia64_set_pta(vcpu->
arch.vhpt.pta.val);
978 static void vmm_sanity_check(
struct kvm_vcpu *vcpu)
983 panic_vm(vcpu,
"Failed to do vmm sanity check,"
984 "it maybe caused by crashed vmm!!\n\n");
988 static void kvm_do_resume_op(
struct kvm_vcpu *vcpu)
990 vmm_sanity_check(vcpu);
993 vcpu_do_resume(vcpu);
1003 while (vcpu->
arch.ptc_g_count > 0)
1004 ptc_ga_remote_func(vcpu, --vcpu->
arch.ptc_g_count);
1015 kvm_do_resume_op(vcpu);
1022 panic_vm(vcpu,
"Unexpected interruption occurs in VMM, vector:0x%lx\n",