23 #include <linux/module.h>
24 #include <linux/errno.h>
27 #include <linux/slab.h>
31 #include <linux/bitops.h>
32 #include <linux/hrtimer.h>
36 #include <linux/pci.h>
38 #include <asm/pgtable.h>
39 #include <asm/gcc_intrin.h>
41 #include <asm/cacheflush.h>
42 #include <asm/div64.h>
45 #include <asm/sn/addrs.h>
56 static unsigned long kvm_vmm_base;
57 static unsigned long kvm_vsa_base;
58 static unsigned long kvm_vm_buffer;
59 static unsigned long kvm_vm_buffer_size;
62 static long vp_env_info;
72 static unsigned long kvm_get_itc(
struct kvm_vcpu *vcpu)
74 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
75 if (vcpu->
kvm->arch.is_sn2)
82 static void kvm_flush_icache(
unsigned long start,
unsigned long len)
86 for (l = 0; l < (len + 32); l += 32)
93 static void kvm_flush_tlb_all(
void)
95 unsigned long i,
j, count0,
count1, stride0, stride1,
addr;
105 for (i = 0; i < count0; ++
i) {
133 unsigned long saved_psr;
144 status = ia64_pal_vp_init_env(kvm_vsa_base ?
148 spin_unlock(&vp_lock);
154 kvm_vsa_base = tmp_base;
157 spin_unlock(&vp_lock);
169 unsigned long saved_psr;
181 status = ia64_pal_vp_exit_env(host_iva);
220 kvm_run->
hw.hardware_exit_reason = 1;
230 p = kvm_get_vcpu_ioreq(vcpu);
234 vcpu->mmio_needed = 1;
235 vcpu->mmio_fragments[0].gpa = kvm_run->
mmio.phys_addr = p->
addr;
236 vcpu->mmio_fragments[0].len = kvm_run->
mmio.len = p->
size;
237 vcpu->mmio_is_write = kvm_run->
mmio.is_write = !p->
dir;
239 if (vcpu->mmio_is_write)
258 static int handle_pal_call(
struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
262 p = kvm_get_exit_data(vcpu);
268 kvm_run->
hw.hardware_exit_reason = 2;
273 static int handle_sal_call(
struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
277 p = kvm_get_exit_data(vcpu);
284 kvm_run->
hw.hardware_exit_reason = 3;
295 vcpu->
arch.irq_new_pending = 1;
324 __apic_accept_irq(vcpu, vector);
327 static struct kvm_vcpu *lid_to_vcpu(
struct kvm *
kvm,
unsigned long id,
336 if (
lid.id ==
id &&
lid.eid == eid)
351 target_vcpu = lid_to_vcpu(vcpu->
kvm, addr.
id, addr.
eid);
353 return handle_vm_error(vcpu, kvm_run);
355 if (!target_vcpu->
arch.launched) {
356 regs = vcpu_regs(target_vcpu);
358 regs->
cr_iip = vcpu->
kvm->arch.rdv_sal_data.boot_ip;
359 regs->
r1 = vcpu->
kvm->arch.rdv_sal_data.boot_gp;
362 if (waitqueue_active(&target_vcpu->
wq))
365 vcpu_deliver_ipi(target_vcpu, data.
dm, data.
vector);
366 if (target_vcpu != vcpu)
378 static void vcpu_global_purge(
void *
info)
388 vcpu->
arch.ptc_g_data[vcpu->
arch.ptc_g_count++] =
392 vcpu->
arch.ptc_g_count = 0;
397 static int handle_global_purge(
struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
400 struct kvm *kvm = vcpu->
kvm;
412 if (waitqueue_active(&vcpui->
wq))
415 if (vcpui->
cpu != -1) {
426 static int handle_switch_rr6(
struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
431 static int kvm_sn2_setup_mappings(
struct kvm_vcpu *vcpu)
433 unsigned long pte, rtc_phys_addr, map_addr;
453 unsigned long vcpu_now_itc;
454 unsigned long expires;
457 struct vpd *vpd = to_host(vcpu->
kvm, vcpu->
arch.vpd);
459 if (irqchip_in_kernel(vcpu->
kvm)) {
461 vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->
arch.itc_offset;
464 vcpu->
arch.timer_check = 1;
467 itc_diff = vpd->
itm - vcpu_now_itc;
469 itc_diff = -itc_diff;
471 expires = div64_u64(itc_diff, cyc_per_usec);
472 kt = ktime_set(0, 1000 * expires);
474 vcpu->
arch.ht_active = 1;
480 vcpu->
arch.ht_active = 0;
496 static int handle_vm_shutdown(
struct kvm_vcpu *vcpu,
497 struct kvm_run *kvm_run)
503 static int handle_external_interrupt(
struct kvm_vcpu *vcpu,
504 struct kvm_run *kvm_run)
509 static int handle_vcpu_debug(
struct kvm_vcpu *vcpu,
510 struct kvm_run *kvm_run)
516 static int (*kvm_vti_exit_handlers[])(
struct kvm_vcpu *vcpu,
517 struct kvm_run *kvm_run) = {
531 static const int kvm_vti_max_exit_handlers =
532 sizeof(kvm_vti_exit_handlers)/
sizeof(*kvm_vti_exit_handlers);
538 p_exit_data = kvm_get_exit_data(vcpu);
546 static int kvm_handle_exit(
struct kvm_run *kvm_run,
struct kvm_vcpu *vcpu)
551 if (exit_reason < kvm_vti_max_exit_handlers
552 && kvm_vti_exit_handlers[exit_reason])
553 return kvm_vti_exit_handlers[
exit_reason](vcpu, kvm_run);
561 static inline void vti_set_rr6(
unsigned long rr6)
567 static int kvm_insert_vmm_mapping(
struct kvm_vcpu *vcpu)
570 struct kvm *kvm = vcpu->
kvm;
578 vcpu->
arch.vmm_tr_slot =
r;
585 vcpu->
arch.vm_tr_slot =
r;
587 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
588 if (kvm->
arch.is_sn2) {
589 r = kvm_sn2_setup_mappings(vcpu);
600 static void kvm_purge_vmm_mapping(
struct kvm_vcpu *vcpu)
602 struct kvm *kvm = vcpu->
kvm;
605 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
606 if (kvm->
arch.is_sn2)
611 static int kvm_vcpu_pre_transition(
struct kvm_vcpu *vcpu)
617 if (vcpu->
arch.last_run_cpu != cpu ||
618 per_cpu(last_vcpu, cpu) != vcpu) {
619 per_cpu(last_vcpu, cpu) = vcpu;
625 vti_set_rr6(vcpu->
arch.vmm_rr);
627 r = kvm_insert_vmm_mapping(vcpu);
632 static void kvm_vcpu_post_transition(
struct kvm_vcpu *vcpu)
634 kvm_purge_vmm_mapping(vcpu);
635 vti_set_rr6(vcpu->
arch.host_rr6);
638 static int __vcpu_run(
struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
640 union context *host_ctx, *guest_ctx;
643 idx = srcu_read_lock(&vcpu->
kvm->srcu);
656 host_ctx = kvm_get_host_context(vcpu);
657 guest_ctx = kvm_get_guest_context(vcpu);
661 r = kvm_vcpu_pre_transition(vcpu);
665 srcu_read_unlock(&vcpu->
kvm->srcu, idx);
674 kvm_vcpu_post_transition(vcpu);
676 vcpu->
arch.launched = 1;
691 idx = srcu_read_lock(&vcpu->
kvm->srcu);
693 r = kvm_handle_exit(kvm_run, vcpu);
701 srcu_read_unlock(&vcpu->
kvm->srcu, idx);
704 idx = srcu_read_lock(&vcpu->
kvm->srcu);
717 static void kvm_set_mmio_data(
struct kvm_vcpu *vcpu)
721 if (!vcpu->mmio_is_write)
741 if (vcpu->mmio_needed) {
743 kvm_set_mmio_data(vcpu);
744 vcpu->mmio_read_completed = 1;
745 vcpu->mmio_needed = 0;
747 r = __vcpu_run(vcpu, kvm_run);
769 kvm = (
struct kvm *)(vm_base +
771 kvm->
arch.vm_base = vm_base;
791 static void kvm_build_io_pmt(
struct kvm *kvm)
796 for (i = 0; i < (
sizeof(io_ranges) /
sizeof(
struct kvm_io_range));
798 for (j = io_ranges[i].start;
802 io_ranges[i].
type, 0);
808 #define GUEST_PHYSICAL_RR0 0x1739
809 #define GUEST_PHYSICAL_RR4 0x2739
810 #define VMM_INIT_RR 0x1660
828 kvm_build_io_pmt(kvm);
830 INIT_LIST_HEAD(&kvm->
arch.assigned_dev_head);
838 static int kvm_vm_ioctl_get_irqchip(
struct kvm *kvm,
855 static int kvm_vm_ioctl_set_irqchip(
struct kvm *kvm,
struct kvm_irqchip *chip)
871 #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
875 struct vpd *vpd = to_host(vcpu->
kvm, vcpu->
arch.vpd);
878 for (i = 0; i < 16; i++) {
882 for (i = 0; i < 128; i++)
883 vpd->
vcr[i] = regs->
vpd.vcr[i];
905 for (i = 0; i < 8; i++) {
910 for (i = 0; i < 4; i++)
920 vcpu->
arch.irq_new_pending = 1;
929 if (!irqchip_in_kernel(kvm))
938 unsigned int ioctl,
unsigned long arg)
952 kvm_userspace_mem.
slot = kvm_mem.
slot;
958 &kvm_userspace_mem, 0);
984 if (!irqchip_in_kernel(kvm))
986 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
1003 if (!irqchip_in_kernel(kvm))
1005 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1037 static int kvm_alloc_vmm_area(
void)
1046 kvm_vm_buffer = kvm_vmm_base +
VMM_SIZE;
1049 kvm_vmm_base, kvm_vm_buffer);
1055 static void kvm_free_vmm_area(
void)
1067 static int vti_init_vpd(
struct kvm_vcpu *vcpu)
1071 struct vpd *vpd = to_host(vcpu->
kvm, vcpu->
arch.vpd);
1074 return PTR_ERR(vpd);
1077 for (i = 0; i < 5; i++)
1081 cpuid3.value = vpd->
vcpuid[3];
1083 vpd->
vcpuid[3] = cpuid3.value;
1101 static int vti_create_vp(
struct kvm_vcpu *vcpu)
1104 struct vpd *vpd = vcpu->
arch.vpd;
1105 unsigned long vmm_ivt;
1107 vmm_ivt = kvm_vmm_info->
vmm_ivt;
1120 static void init_ptce_info(
struct kvm_vcpu *vcpu)
1124 ia64_get_ptce(&ptce);
1126 vcpu->
arch.ptce_count[0] = ptce.
count[0];
1127 vcpu->
arch.ptce_count[1] = ptce.
count[1];
1132 static void kvm_migrate_hlt_timer(
struct kvm_vcpu *vcpu)
1151 if (waitqueue_active(q))
1155 vcpu->
arch.timer_fired = 1;
1156 vcpu->
arch.timer_check = 1;
1160 #define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1164 return irqchip_in_kernel(vcpu->
kvm) == (vcpu->
arch.apic !=
NULL);
1173 struct kvm *kvm = vcpu->
kvm;
1177 struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->
kvm, vcpu);
1180 if (IS_ERR(vmm_vcpu))
1181 return PTR_ERR(vmm_vcpu);
1183 if (kvm_vcpu_is_bsp(vcpu)) {
1190 itc_offset = 0
UL - kvm_get_itc(vcpu);
1192 v = (
struct kvm_vcpu *)((
char *)vcpu +
1194 v->
arch.itc_offset = itc_offset;
1195 v->
arch.last_itc = 0;
1202 if (!vcpu->
arch.apic)
1204 vcpu->
arch.apic->vcpu = vcpu;
1208 p_ctx->
gr[13] = (
unsigned long)vmm_vcpu;
1209 p_ctx->
psr = 0x1008522000
UL;
1213 p_ctx->
ar[36] = 0x0;
1214 p_ctx->
ar[19] = 0x0;
1215 p_ctx->
ar[18] = (
unsigned long)vmm_vcpu +
1216 ((
sizeof(
struct kvm_vcpu)+15) & ~15);
1217 p_ctx->
ar[64] = 0x0;
1218 p_ctx->
cr[0] = 0x7e04
UL;
1220 p_ctx->
cr[8] = 0x3c;
1223 p_ctx->
rr[0] = 0x30;
1224 p_ctx->
rr[1] = 0x30;
1225 p_ctx->
rr[2] = 0x30;
1226 p_ctx->
rr[3] = 0x30;
1227 p_ctx->
rr[4] = 0x30;
1228 p_ctx->
rr[5] = 0x30;
1229 p_ctx->
rr[7] = 0x30;
1232 p_ctx->
br[0] = *(
unsigned long *)kvm_vmm_info->
vmm_entry;
1234 vcpu->
arch.vmm_rr = kvm->
arch.vmm_init_rr;
1235 vcpu->
arch.metaphysical_rr0 = kvm->
arch.metaphysical_rr0;
1236 vcpu->
arch.metaphysical_rr4 = kvm->
arch.metaphysical_rr4;
1239 vcpu->
arch.hlt_timer.function = hlt_timer_fn;
1241 vcpu->
arch.last_run_cpu = -1;
1243 vcpu->
arch.vsa_base = kvm_vsa_base;
1245 vcpu->
arch.dirty_log_lock_pa =
__pa(&kvm->
arch.dirty_log_lock);
1248 init_ptce_info(vcpu);
1255 static int vti_vcpu_setup(
struct kvm_vcpu *vcpu,
int id)
1261 r = kvm_insert_vmm_mapping(vcpu);
1269 r = vti_init_vpd(vcpu);
1275 r = vti_create_vp(vcpu);
1279 kvm_purge_vmm_mapping(vcpu);
1292 unsigned long vm_base = kvm->
arch.vm_base;
1311 vcpu_data[
id].vcpu_struct));
1315 r = vti_vcpu_setup(vcpu,
id);
1351 unsigned long vm_base = kvm->
arch.vm_base;
1360 static void kvm_release_vm_pages(
struct kvm *kvm)
1370 for (j = 0; j < memslot->
npages; j++) {
1371 if (memslot->rmap[j])
1384 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
1388 kvm_release_vm_pages(kvm);
1397 if (cpu != vcpu->
cpu) {
1399 if (vcpu->
arch.ht_active)
1400 kvm_migrate_hlt_timer(vcpu);
1404 #define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1408 struct vpd *vpd = to_host(vcpu->
kvm, vcpu->
arch.vpd);
1413 for (i = 0; i < 16; i++) {
1417 for (i = 0; i < 128; i++)
1418 regs->
vpd.vcr[i] = vpd->
vcr[i];
1423 regs->
vpd.vpr = vpd->
vpr;
1439 for (i = 0; i < 8; i++) {
1444 for (i = 0; i < 4; i++)
1446 regs->
saved_itc = vcpu->
arch.itc_offset + kvm_get_itc(vcpu);
1472 vcpu->
arch.exit_data = ((
struct kvm_vcpu *)stack)->arch.exit_data;
1485 unsigned int ioctl,
unsigned long arg)
1495 void __user *first_p = argp;
1504 "Illegal user destination address for stack\n");
1527 void __user *first_p = argp;
1536 "Illegal user address for stack\n");
1563 return VM_FAULT_SIGBUS;
1584 int npages = memslot->
npages;
1585 unsigned long base_gfn = memslot->
base_gfn;
1590 for (i = 0; i < npages; i++) {
1593 kvm_set_pmt_entry(kvm, base_gfn + i,
1598 kvm_set_pmt_entry(kvm, base_gfn + i,
1601 memslot->rmap[
i] = 0;
1628 unsigned int ioctl,
unsigned long arg)
1638 static int vti_cpu_has_kvm_support(
void)
1643 ret = ia64_pal_proc_get_features(&avail, &
status, &
control, 0);
1652 ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
1659 "vm_env_info:0x%lx\n", vp_env_info);
1672 static void kvm_patch_vmm(
struct kvm_vmm_info *
vmm_info,
1675 unsigned long new_ar, new_ar_sn2;
1676 unsigned long module_base;
1681 module_base = (
unsigned long)module->module_core;
1683 new_ar = kvm_vmm_base + vmm_info->
patch_mov_ar - module_base;
1693 memcpy((
void *)new_ar, (
void *)new_ar_sn2, 0x60);
1696 static int kvm_relocate_vmm(
struct kvm_vmm_info *vmm_info,
1697 struct module *module)
1699 unsigned long module_base;
1700 unsigned long vmm_size;
1702 unsigned long vmm_offset, func_offset, fdesc_offset;
1703 struct fdesc *p_fdesc;
1707 if (!kvm_vmm_base) {
1708 printk(
"kvm: kvm area hasn't been initialized yet!!\n");
1713 module_base = (
unsigned long)module->module_core;
1714 vmm_size = module->core_size;
1718 memcpy((
void *)kvm_vmm_base, (
void *)module_base, vmm_size);
1719 kvm_patch_vmm(vmm_info, module);
1720 kvm_flush_icache(kvm_vmm_base, vmm_size);
1723 vmm_offset = vmm_info->
vmm_ivt - module_base;
1728 fdesc_offset = (
unsigned long)vmm_info->
vmm_entry - module_base;
1731 func_offset = *(
unsigned long *)vmm_info->
vmm_entry - module_base;
1732 p_fdesc = (
struct fdesc *)(kvm_vmm_base + fdesc_offset);
1742 func_offset = *(
unsigned long *)vmm_info->
tramp_entry - module_base;
1743 p_fdesc = (
struct fdesc *)(kvm_vmm_base + fdesc_offset);
1760 struct kvm_vmm_info *vmm_info = (
struct kvm_vmm_info *)opaque;
1762 if (!vti_cpu_has_kvm_support()) {
1775 kvm_vmm_info = kzalloc(
sizeof(
struct kvm_vmm_info),
GFP_KERNEL);
1779 if (kvm_alloc_vmm_area())
1782 r = kvm_relocate_vmm(vmm_info, vmm_info->
module);
1789 kvm_free_vmm_area();
1791 kfree(kvm_vmm_info);
1798 kvm_free_vmm_area();
1799 kfree(kvm_vmm_info);
1800 kvm_vmm_info =
NULL;
1803 static void kvm_ia64_sync_dirty_log(
struct kvm *kvm,
1809 unsigned long *dirty_bitmap = (
unsigned long *)(kvm->
arch.vm_base +
1812 n = kvm_dirty_bitmap_bytes(memslot);
1815 spin_lock(&kvm->
arch.dirty_log_lock);
1816 for (i = 0; i < n/
sizeof(
long); ++
i) {
1818 dirty_bitmap[base +
i] = 0;
1820 spin_unlock(&kvm->
arch.dirty_log_lock);
1842 kvm_ia64_sync_dirty_log(kvm, memslot);
1850 n = kvm_dirty_bitmap_bytes(memslot);
1870 return __apic_accept_irq(vcpu, irq->
vector);
1875 return apic->
vcpu->vcpu_id ==
dest;
1885 return vcpu1->
arch.xtp - vcpu2->
arch.xtp;
1892 return (dest_mode == 0) ?
1897 static int find_highest_bits(
int *
dat)
1903 for (i = 7; i >= 0 ; i--) {
1907 return i * 32 + bitnum - 1;
1916 struct vpd *vpd = to_host(vcpu->
kvm, vcpu->
arch.vpd);
1923 return find_highest_bits((
int *)&vpd->
irr[0]);
1928 return vcpu->
arch.timer_fired;
1949 static int vcpu_reset(
struct kvm_vcpu *vcpu)
1954 r = kvm_insert_vmm_mapping(vcpu);
1959 vcpu->
arch.launched = 0;
1965 kvm_purge_vmm_mapping(vcpu);
1978 r = vcpu_reset(vcpu);