38 return ((trp->
p) && (trp->
rid == rid)
49 if (!trp->
p || trp->
rid != rid)
55 if ((sva > ea1) || (sa1 > eva))
71 unsigned long stride0, stride1,
addr;
80 for (i = 0; i < count0; ++
i) {
98 vrr.
val = vcpu_get_rr(vcpu, vadr);
99 vpta.
val = vcpu_get_pta(vcpu);
101 if (vrr.
ve & vpta.
ve) {
107 return vpsr.dt && vpsr.it && vpsr.ic;
109 return vpsr.dt && vpsr.rt;
120 pfn_bits = vpta.
size - 5 - 8;
123 index = ((rid & 0xff) << pfn_bits)|(pfn & ((1
UL << pfn_bits) - 1));
124 *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16);
137 rid = vcpu_get_rr(vcpu, va);
142 i <
NDTRS; i++, trp++) {
143 if (__is_tr_translated(trp, rid, va))
150 i <
NITRS; i++, trp++) {
151 if (__is_tr_translated(trp, rid, va))
170 (ifa & ((1
UL <<
ps) - 1));
176 head->itir = rr.ps << 2;
183 u64 i, dirty_pages = 1;
191 for (i = 0; i < dirty_pages; i++) {
193 if (!
test_bit(base_gfn + i, dirty_bitmap))
194 set_bit(base_gfn + i , dirty_bitmap);
207 if (itir_ps(itir) >= mrr.
ps) {
208 vhpt_insert(phy_pte, itir, va, pte);
211 psr = ia64_clear_ic();
212 ia64_itc(type, va, phy_pte, itir_ps(itir));
231 if (head->
etag == tag)
246 asm volatile (
"rsm psr.ic|psr.i;;"
252 "(p7) extr.u r9=r9,0,53;;"
259 :
"=r"(
ret) :
"r"(iha),
"r"(
pte):
"memory");
275 vrr.val = vcpu_get_rr(v, va);
276 psbits =
VMX(v, psbits[(va >> 61)]);
277 start = va & ~((1
UL <<
ps) - 1);
280 rr_ps =
__ffs(psbits);
281 psbits &= ~(1
UL << rr_ps);
282 num = 1
UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
287 if (cur->
etag == tag && cur->
ps == rr_ps)
305 start = va & ~((1
UL <<
ps) - 1);
308 num = 1
UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
312 if (cur->
etag == tag)
336 vrr.
val = vcpu_get_rr(v, va);
337 vrr.
ps = itir_ps(itir);
338 VMX(v, psbits[va >> 61]) |= (1
UL << vrr.
ps);
351 rid = vcpu_get_rr(vcpu, va);
353 end = va +
PSIZE(ps);
357 i <
NDTRS; i++, trp++) {
358 if (__is_tr_overlap(trp, rid, va, end))
365 i <
NITRS; i++, trp++) {
366 if (__is_tr_overlap(trp, rid, va, end))
380 vtlb_purge(v, va, ps);
381 vhpt_purge(v, va, ps);
389 vtlb_purge(v, va, ps);
390 vhpt_purge(v, va, ps);
399 ps_mask = ~((1
UL <<
ps) - 1);
427 vrr.
val = vcpu_get_rr(v, ifa);
440 pte &= ~_PAGE_MA_MASK;
441 phy_pte &= ~_PAGE_MA_MASK;
444 vtlb_purge(v, ifa, ps);
445 vhpt_purge(v, ifa, ps);
447 if ((ps != mrr.
ps) || (pte & VTLB_PTE_IO)) {
451 if (pte & VTLB_PTE_IO)
455 vhpt_insert(phy_pte, itir, ifa, pte);
459 psr = ia64_clear_ic();
460 ia64_itc(type, ifa, phy_pte, ps);
464 if (!(pte&VTLB_PTE_IO))
479 vtlb = &v->
arch.vtlb;
480 vhpt = &v->
arch.vhpt;
482 for (i = 0; i < 8; i++)
483 VMX(v, psbits[i]) = 0;
486 for (i = 0; i < vtlb->
num; i++) {
495 for (i = 0; i < vhpt->
num; i++) {
528 psbits =
VMX(v, psbits[(va >> 61)]);
529 vrr.
val = vcpu_get_rr(v, va);
532 psbits &= ~(1
UL <<
ps);
535 if (cch->
etag == tag && cch->
ps == ps)
555 for (i = 0; i < hcb->
num; i++) {
571 return *(base + gpfn);
606 gpip = (tlb->
ppn >> (tlb->
ps - 12) << tlb->
ps) |
617 maddr = (tlb->
ppn >> (tlb->
ps - 12) << tlb->
ps)
618 | (gip & (
PSIZE(tlb->
ps) - 1));
622 pbundle->
i64[0] = *vpa++;
623 pbundle->
i64[1] = *vpa;
632 ia64_set_pta(v->
arch.vhpt.pta.val);