25 #include <linux/types.h>
27 #include <asm/processor.h>
29 #include <asm/gcc_intrin.h>
31 #include <asm/pgtable.h>
34 #include "asm-offsets.h"
60 {0, 0, 0, 0, 0, 0, 0, 0},
61 {0, 0, 0, 0, 0, 0, 0, 0},
71 {0, 0, 0, 0, 0, 0, 0,
SW_P2V},
79 {0, 0, 0, 0, 0, 0, 0, 0},
105 psr = ia64_clear_ic();
119 psr = ia64_clear_ic();
137 act = mm_switch_action(old_psr, new_psr);
188 if ((old_psr.
dt != new_psr.
dt)
189 || (old_psr.
it != new_psr.
it)
190 || (old_psr.
rt != new_psr.
rt))
231 #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
233 static u16 gr_info[32] = {
245 #define IA64_FIRST_STACKED_GR 32
246 #define IA64_FIRST_ROTATING_FR 32
248 static inline unsigned long
249 rotate_reg(
unsigned long sor,
unsigned long rrb,
unsigned long reg)
265 unsigned long rrb_fr = (regs->
cr_ifs >> 25) & 0x7f;
273 static inline unsigned long *kvm_rse_skip_regs(
unsigned long *
addr,
276 long delta = ia64_rse_slot_num(addr) + num_regs;
282 while (delta <= -0x3f) {
287 while (delta >= 0x3f) {
293 return addr + num_regs +
i;
296 static void get_rse_reg(
struct kvm_pt_regs *regs,
unsigned long r1,
297 unsigned long *
val,
int *nat)
299 unsigned long *bsp, *
addr, *rnat_addr, *bspstore;
301 unsigned long nat_mask;
302 unsigned long old_rsc, new_rsc;
303 long sof = (regs->
cr_ifs) & 0x7f;
304 long sor = (((regs->
cr_ifs >> 14) & 0xf) << 3);
305 long rrb_gr = (regs->
cr_ifs >> 18) & 0x7f;
309 ridx = rotate_reg(sor, rrb_gr, ridx);
312 new_rsc = old_rsc&(~(0x3));
316 bsp = kbs + (regs->
loadrs >> 19);
318 addr = kvm_rse_skip_regs(bsp, -sof + ridx);
319 nat_mask = 1
UL << ia64_rse_slot_num(addr);
320 rnat_addr = ia64_rse_rnat_addr(addr);
322 if (addr >= bspstore) {
329 if (bspstore < rnat_addr)
333 *nat = (
int)!!((*rnat_addr) & nat_mask);
339 unsigned long val,
unsigned long nat)
341 unsigned long *bsp, *bspstore, *
addr, *rnat_addr;
343 unsigned long nat_mask;
344 unsigned long old_rsc, new_rsc,
psr;
346 long sof = (regs->
cr_ifs) & 0x7f;
347 long sor = (((regs->
cr_ifs >> 14) & 0xf) << 3);
348 long rrb_gr = (regs->
cr_ifs >> 18) & 0x7f;
352 ridx = rotate_reg(sor, rrb_gr, ridx);
356 new_rsc = old_rsc & (~0x3fff0003);
358 bsp = kbs + (regs->
loadrs >> 19);
360 addr = kvm_rse_skip_regs(bsp, -sof + ridx);
361 nat_mask = 1
UL << ia64_rse_slot_num(addr);
362 rnat_addr = ia64_rse_rnat_addr(addr);
366 if (addr >= bspstore) {
373 if (bspstore < rnat_addr)
374 rnat = rnat & (~nat_mask);
376 *rnat_addr = (*rnat_addr)&(~nat_mask);
384 if (bspstore < rnat_addr)
385 rnat = rnat&(~nat_mask);
387 *rnat_addr = (*rnat_addr) & (~nat_mask);
396 void getreg(
unsigned long regnum,
unsigned long *val,
399 unsigned long addr, *unat;
401 get_rse_reg(regs, regnum, val, nat);
408 addr = (
unsigned long)regs;
411 addr += gr_info[regnum];
413 *val = *(
unsigned long *)addr;
418 *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
421 void setreg(
unsigned long regnum,
unsigned long val,
439 addr = (
unsigned long)regs;
445 addr += gr_info[regnum];
447 *(
unsigned long *)addr = val;
453 bitmask = 1UL << ((addr >> 3) & 0x3f);
468 getreg(reg, &val, 0, regs);
475 long sof = (regs->
cr_ifs) & 0x7f;
481 setreg(reg, value, nat, regs);
490 #define CASE_FIXED_FP(reg) \
492 ia64_stf_spill(fpval, reg); \
637 #define CASE_FIXED_FP(reg) \
639 ia64_ldf_fill(reg, fpval); \
794 #define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT))
795 static long kvm_get_itc(
struct kvm_vcpu *vcpu)
797 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
800 if (kvm->
arch.is_sn2)
812 unsigned long guest_itc;
813 guest_itc =
VMX(vcpu, itc_offset) + kvm_get_itc(vcpu);
815 if (guest_itc >=
VMX(vcpu, last_itc)) {
816 VMX(vcpu, last_itc) = guest_itc;
819 return VMX(vcpu, last_itc);
822 static inline void vcpu_set_itm(
struct kvm_vcpu *vcpu,
u64 val);
823 static void vcpu_set_itc(
struct kvm_vcpu *vcpu,
u64 val)
828 long itc_offset = val - kvm_get_itc(vcpu);
829 unsigned long vitv =
VCPU(vcpu, itv);
833 if (kvm_vcpu_is_bsp(vcpu)) {
835 v = (
struct kvm_vcpu *)((
char *)vcpu +
837 VMX(v, itc_offset) = itc_offset;
838 VMX(v, last_itc) = 0;
841 VMX(vcpu, last_itc) = 0;
842 if (
VCPU(vcpu, itm) <= val) {
843 VMX(vcpu, itc_check) = 0;
846 VMX(vcpu, itc_check) = 1;
847 vcpu_set_itm(vcpu,
VCPU(vcpu, itm));
852 static inline u64 vcpu_get_itm(
struct kvm_vcpu *vcpu)
857 static inline void vcpu_set_itm(
struct kvm_vcpu *vcpu,
u64 val)
859 unsigned long vitv =
VCPU(vcpu, itv);
863 VMX(vcpu, itc_check) = 1;
865 VMX(vcpu, timer_pending) = 0;
867 VMX(vcpu, itc_check) = 0;
870 #define ITV_VECTOR(itv) (itv&0xff)
871 #define ITV_IRQ_MASK(itv) (itv&(1<<16))
873 static inline void vcpu_set_itv(
struct kvm_vcpu *vcpu,
u64 val)
878 vcpu->
arch.timer_pending = 0;
882 static inline void vcpu_set_eoi(
struct kvm_vcpu *vcpu,
u64 val)
886 vec = highest_inservice_irq(vcpu);
889 VMX(vcpu, insvc[vec >> 6]) &= ~(1
UL << (vec & 63));
891 vcpu->
arch.irq_new_pending = 1;
921 if (is_higher_irq(h_pending, h_inservice)) {
922 if (is_higher_class(h_pending, vtpr.
mic + (vtpr.
mmi << 4)))
940 vcpu->
arch.irq_new_pending = 1;
952 vcpu->
arch.irq_new_pending = 1;
970 VCPU(vcpu, vhpi) = vhpi;
973 (
u64)vcpu->
arch.vpd, 0, 0, 0, 0, 0, 0);
978 int vec, h_inservice,
mask;
980 vec = highest_pending_irq(vcpu);
981 h_inservice = highest_inservice_irq(vcpu);
984 if (
VCPU(vcpu, vhpi))
992 VMX(vcpu, insvc[vec >> 6]) |= (1
UL << (vec & 63));
1007 vpta.
val = vcpu_get_pta(vcpu);
1008 vrr.
val = vcpu_get_rr(vcpu, vadr);
1009 vhpt_offset = ((vadr >> vrr.
ps) << 3) & ((1
UL << (vpta.
size)) - 1);
1012 vpta.
val, 0, 0, 0, 0);
1014 pval = (vadr &
VRN_MASK) | vhpt_offset |
1015 (vpta.
val << 3 >> (vpta.
size + 3) << (vpta.
size));
1026 vpta.
val = vcpu_get_pta(vcpu);
1027 vrr.
val = vcpu_get_rr(vcpu, vadr);
1043 vpta.
val = vcpu_get_pta(vcpu);
1049 if (!data || !data->
p)
1059 unsigned long thash, vadr;
1068 unsigned long tag, vadr;
1082 regs = vcpu_regs(vcpu);
1083 pt_isr.
val =
VMX(vcpu, cr_isr);
1085 visr.
ei = pt_isr.
ei;
1086 visr.
ir = pt_isr.
ir;
1093 vcpu_set_isr(vcpu, visr.
val);
1097 vcpu_set_isr(vcpu, visr.
val);
1101 *padr = (data->
gpaddr >> data->
ps << data->
ps) |
1102 (vadr & (
PSIZE(data->
ps) - 1));
1110 vcpu_set_isr(vcpu, visr.
val);
1114 vcpu_set_isr(vcpu, visr.
val);
1118 *padr = ((data->
ppn >> (data->
ps - 12)) << data->
ps)
1119 | (vadr & (
PSIZE(data->
ps) - 1));
1125 vcpu_set_isr(vcpu, visr.
val);
1134 vcpu_set_isr(vcpu, visr.
val);
1148 unsigned long r1,
r3;
1161 unsigned long r1,
r3;
1189 rid = vcpu_get_rr(vcpu, ifa);
1192 vcpu_set_tr(p_itr, pte, itir, va, rid);
1210 if (__gpfn_is_io(gpfn))
1212 rid = vcpu_get_rr(vcpu, va);
1287 unsigned long ifa, itir;
1296 unsigned long ifa, itir;
1305 unsigned long ifa, itir;
1314 unsigned long ifa, itir;
1323 unsigned long ifa, itir;
1332 unsigned long itir, ifa,
pte,
slot;
1336 itir = vcpu_get_itir(vcpu);
1337 ifa = vcpu_get_ifa(vcpu);
1345 unsigned long itir, ifa,
pte,
slot;
1349 itir = vcpu_get_itir(vcpu);
1350 ifa = vcpu_get_ifa(vcpu);
1356 unsigned long itir, ifa,
pte;
1358 itir = vcpu_get_itir(vcpu);
1359 ifa = vcpu_get_ifa(vcpu);
1366 unsigned long itir, ifa,
pte;
1368 itir = vcpu_get_itir(vcpu);
1369 ifa = vcpu_get_ifa(vcpu);
1387 vcpu_set_itc(vcpu, imm);
1395 vcpu_set_itc(vcpu, r2);
1427 unsigned long rrval;
1431 oldrr.
val = vcpu_get_rr(vcpu, reg);
1435 switch ((
unsigned long)(reg >>
VRN_SHIFT)) {
1437 vcpu->
arch.vmm_rr = vrrtomrr(val);
1444 rrval = vrrtomrr(val);
1445 vcpu->
arch.metaphysical_saved_rr4 = rrval;
1450 rrval = vrrtomrr(val);
1451 vcpu->
arch.metaphysical_saved_rr0 = rrval;
1465 unsigned long r3,
r2;
1482 unsigned long r3,
r2;
1486 vcpu_set_pmc(vcpu, r3, r2);
1491 unsigned long r3,
r2;
1495 vcpu_set_pmd(vcpu, r3, r2);
1509 unsigned long r3,
r1;
1512 r1 = vcpu_get_rr(vcpu, r3);
1518 unsigned long r3,
r1;
1527 unsigned long r3,
r1;
1530 r1 = vcpu_get_dbr(vcpu, r3);
1536 unsigned long r3,
r1;
1539 r1 = vcpu_get_ibr(vcpu, r3);
1545 unsigned long r3,
r1;
1548 r1 = vcpu_get_pmc(vcpu, r3);
1563 unsigned long r3,
r1;
1573 vcpu->
arch.irq_check = 1;
1585 vcpu_set_dcr(vcpu, r2);
1588 vcpu_set_itm(vcpu, r2);
1594 vcpu_set_eoi(vcpu, r2);
1605 unsigned long tgt = inst.
M33.
r1;
1635 regs = vcpu_regs(vcpu);
1642 panic_vm(vcpu,
"Only support guests with vpsr.pk =0 "
1650 VCPU(vcpu, vpsr) = val
1656 vcpu->
arch.irq_check = 1;
1694 #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
1696 __asm__ __volatile__ ( \
1697 ";;extr.u %0 = %3,%6,16;;\n" \
1698 "dep %1 = %0, %1, 0, 16;;\n" \
1700 "extr.u %0 = %2, 16, 16;;\n" \
1701 "dep %3 = %0, %3, %6, 16;;\n" \
1703 ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
1704 "r"(*runat), "r"(b1unat), "r"(runat), \
1705 "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
1713 unsigned long *
r = ®s->
r16;
1714 unsigned long *
b0 = &
VCPU(vcpu, vbgr[0]);
1715 unsigned long *b1 = &
VCPU(vcpu, vgr[0]);
1716 unsigned long *runat = ®s->
eml_unat;
1717 unsigned long *b0unat = &
VCPU(vcpu, vbnat);
1718 unsigned long *b1unat = &
VCPU(vcpu, vnat);
1722 for (i = 0; i < 16; i++) {
1727 VMM_PT_REGS_R16_SLOT);
1728 VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
1732 #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
1734 __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
1735 "dep %1 = %0, %1, 16, 16;;\n" \
1737 "extr.u %0 = %2, 0, 16;;\n" \
1738 "dep %3 = %0, %3, %6, 16;;\n" \
1740 ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
1741 "r"(*runat), "r"(b0unat), "r"(runat), \
1742 "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
1749 unsigned long *
r = ®s->
r16;
1750 unsigned long *
b0 = &
VCPU(vcpu, vbgr[0]);
1751 unsigned long *b1 = &
VCPU(vcpu, vgr[0]);
1752 unsigned long *runat = ®s->
eml_unat;
1753 unsigned long *b0unat = &
VCPU(vcpu, vbnat);
1754 unsigned long *b1unat = &
VCPU(vcpu, vnat);
1757 for (i = 0; i < 16; i++) {
1762 VMM_PT_REGS_R16_SLOT);
1772 psr =
VCPU(vcpu, ipsr);
1778 ifs =
VCPU(vcpu, ifs);
1802 unsigned long imm24 = (inst.
M44.
i<<23) | (inst.
M44.
i2<<21)
1813 unsigned long imm24 = (inst.
M44.
i << 23) | (inst.
M44.
i2 << 21)
1826 #define MASK(bit,len) \
1830 __asm __volatile("dep %0=-1, r0, %1, %2"\
1856 val = (val &
MASK(0, 32)) | (val &
MASK(35, 2));
1864 if (ipsr->
ri == 2) {
1876 if (ipsr->
ri == 0) {
1897 cause =
VMX(vcpu, cause);
1898 opcode =
VMX(vcpu, opcode);
2042 VMX(vcpu, vrr[0]) = 0x38;
2043 VMX(vcpu, vrr[1]) = 0x38;
2044 VMX(vcpu, vrr[2]) = 0x38;
2045 VMX(vcpu, vrr[3]) = 0x38;
2046 VMX(vcpu, vrr[4]) = 0x38;
2047 VMX(vcpu, vrr[5]) = 0x38;
2048 VMX(vcpu, vrr[6]) = 0x38;
2049 VMX(vcpu, vrr[7]) = 0x38;
2053 VCPU(vcpu, pta) = 15 << 2;
2054 VCPU(vcpu, itv) = 0x10000;
2055 VCPU(vcpu, itm) = 0;
2056 VMX(vcpu, last_itc) = 0;
2059 VCPU(vcpu, ivr) = 0;
2066 VCPU(vcpu, pmv) = 0x10000;
2067 VCPU(vcpu, cmcv) = 0x10000;
2068 VCPU(vcpu, lrr0) = 0x10000;
2069 VCPU(vcpu, lrr1) = 0x10000;
2073 for (i = 0; i < 4; i++)
2087 vcpu->
arch.metaphysical_saved_rr0 = vrrtomrr(
VMX(vcpu, vrr[
VRN0]));
2088 vcpu->
arch.metaphysical_saved_rr4 = vrrtomrr(
VMX(vcpu, vrr[
VRN4]));
2092 panic_vm(vcpu,
"Machine Status conflicts!\n");
2100 vcpu->
arch.metaphysical_saved_rr0);
2103 vcpu->
arch.metaphysical_saved_rr4);
2107 vrrtomrr(
VMX(vcpu, vrr[
VRN1])));
2110 vrrtomrr(
VMX(vcpu, vrr[
VRN2])));
2113 vrrtomrr(
VMX(vcpu, vrr[
VRN3])));
2116 vrrtomrr(
VMX(vcpu, vrr[
VRN5])));
2119 vrrtomrr(
VMX(vcpu, vrr[
VRN7])));
2141 static void kvm_show_registers(
struct kvm_pt_regs *regs)
2147 printk(
"vcpu 0x%p vcpu %d\n",
2150 printk(
"psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
2153 printk(
"unat: %016lx pfs : %016lx rsc : %016lx\n",
2155 printk(
"rnat: %016lx bspstore: %016lx pr : %016lx\n",
2157 printk(
"ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
2160 printk(
"b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->
b0,
2161 regs->
b6, regs->
b7);
2162 printk(
"f6 : %05lx%016lx f7 : %05lx%016lx\n",
2163 regs->
f6.u.bits[1], regs->
f6.u.bits[0],
2164 regs->
f7.u.bits[1], regs->
f7.u.bits[0]);
2165 printk(
"f8 : %05lx%016lx f9 : %05lx%016lx\n",
2166 regs->
f8.u.bits[1], regs->
f8.u.bits[0],
2167 regs->
f9.u.bits[1], regs->
f9.u.bits[0]);
2168 printk(
"f10 : %05lx%016lx f11 : %05lx%016lx\n",
2169 regs->
f10.u.bits[1], regs->
f10.u.bits[0],
2170 regs->
f11.u.bits[1], regs->
f11.u.bits[0]);
2172 printk(
"r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->
r1,
2173 regs->
r2, regs->
r3);
2174 printk(
"r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->
r8,
2175 regs->
r9, regs->
r10);
2176 printk(
"r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->
r11,
2178 printk(
"r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->
r14,
2180 printk(
"r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->
r17,
2182 printk(
"r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->
r20,
2184 printk(
"r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->
r23,
2186 printk(
"r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->
r26,
2188 printk(
"r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->
r29,
2204 kvm_show_registers(regs);