105 static inline void psb_clflush(
void *
addr)
107 __asm__ __volatile__(
"clflush (%0)\n" : :
"r"(addr) :
"memory");
130 for (i = 0; i < clflush_count; ++
i) {
139 struct page *page[],
unsigned long num_pages)
147 psb_page_clflush(driver, *page++);
150 static void psb_mmu_flush_pd_locked(
struct psb_mmu_driver *driver,
159 psb_mmu_flush_pd_locked(driver, force);
174 psb_pages_clflush(pd->
driver, &pd->
p, 1);
177 psb_mmu_flush_pd_locked(pd->
driver, 1);
183 static inline unsigned long psb_pd_addr_end(
unsigned long addr,
188 return (addr < end) ? addr :
end;
206 int trap_pagefaults,
int invalid_type)
225 if (!trap_pagefaults) {
273 static void psb_mmu_free_pt(
struct psb_mmu_pt *pt)
287 psb_mmu_flush_pd_locked(driver, 1);
292 for (i = 0; i < 1024; ++
i) {
337 for (i = 0; i < clflush_count; ++
i) {
366 pt = psb_mmu_alloc_pt(pd);
386 psb_mmu_clflush(pd->
driver, (
void *) &v[index]);
397 uint32_t index = psb_mmu_pd_index(addr);
411 static void psb_mmu_pt_unmap_unlock(
struct psb_mmu_pt *pt)
417 if (pt->
count == 0) {
423 psb_mmu_clflush(pd->
driver,
424 (
void *) &v[pt->
index]);
428 spin_unlock(&pd->
driver->lock);
432 spin_unlock(&pd->
driver->lock);
435 static inline void psb_mmu_set_pte(
struct psb_mmu_pt *pt,
438 pt->
v[psb_mmu_pt_index(addr)] =
pte;
441 static inline void psb_mmu_invalidate_pte(
struct psb_mmu_pt *pt,
444 pt->
v[psb_mmu_pt_index(addr)] = pt->
pd->invalid_pte;
455 int num_pages = gtt_pages;
458 spin_lock(&driver->
lock);
463 while (gtt_pages--) {
464 *v++ = gtt_start | pd->
pd_mask;
469 psb_pages_clflush(pd->
driver, &pd->
p, num_pages);
471 spin_unlock(&driver->
lock);
477 psb_mmu_flush_pd(pd->
driver, 0);
531 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
532 clflush_size = ((misc >> 8) & 0xff) * 8;
548 static void psb_mmu_flush_ptes(
struct psb_mmu_pd *pd,
560 unsigned long row_add;
561 unsigned long clflush_add = pd->
driver->clflush_add;
562 unsigned long clflush_mask = pd->
driver->clflush_mask;
564 if (!pd->
driver->has_clflush) {
566 psb_pages_clflush(pd->
driver, &pd->
p, num_pages);
571 rows = num_pages / desired_tile_stride;
578 for (i = 0; i <
rows; ++
i) {
584 next = psb_pd_addr_end(addr, end);
585 pt = psb_mmu_pt_map_lock(pd, addr);
590 [psb_mmu_pt_index(addr)]);
593 (addr & clflush_mask) < next);
595 psb_mmu_pt_unmap_unlock(pt);
596 }
while (addr = next, next != end);
603 unsigned long address,
uint32_t num_pages)
609 unsigned long f_address =
address;
617 next = psb_pd_addr_end(addr, end);
618 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
622 psb_mmu_invalidate_pte(pt, addr);
624 }
while (addr +=
PAGE_SIZE, addr < next);
625 psb_mmu_pt_unmap_unlock(pt);
627 }
while (addr = next, next != end);
631 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
652 unsigned long row_add;
653 unsigned long f_address =
address;
656 rows = num_pages / desired_tile_stride;
667 for (i = 0; i <
rows; ++
i) {
673 next = psb_pd_addr_end(addr, end);
674 pt = psb_mmu_pt_map_lock(pd, addr);
678 psb_mmu_invalidate_pte(pt, addr);
681 }
while (addr +=
PAGE_SIZE, addr < next);
682 psb_mmu_pt_unmap_unlock(pt);
684 }
while (addr = next, next != end);
688 psb_mmu_flush_ptes(pd, f_address, num_pages,
689 desired_tile_stride, hw_tile_stride);
698 unsigned long address,
uint32_t num_pages,
706 unsigned long f_address =
address;
715 next = psb_pd_addr_end(addr, end);
716 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
722 pte = psb_mmu_mask_pte(start_pfn++, type);
723 psb_mmu_set_pte(pt, addr, pte);
725 }
while (addr +=
PAGE_SIZE, addr < next);
726 psb_mmu_pt_unmap_unlock(pt);
728 }
while (addr = next, next != end);
732 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
743 unsigned long address,
uint32_t num_pages,
755 unsigned long row_add;
756 unsigned long f_address =
address;
759 if (hw_tile_stride) {
760 if (num_pages % desired_tile_stride != 0)
762 rows = num_pages / desired_tile_stride;
772 for (i = 0; i <
rows; ++
i) {
778 next = psb_pd_addr_end(addr, end);
779 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
788 psb_mmu_set_pte(pt, addr, pte);
790 }
while (addr +=
PAGE_SIZE, addr < next);
791 psb_mmu_pt_unmap_unlock(pt);
793 }
while (addr = next, next != end);
799 psb_mmu_flush_ptes(pd, f_address, num_pages,
800 desired_tile_stride, hw_tile_stride);
819 pt = psb_mmu_pt_map_lock(pd,
virtual);
825 tmp = v[psb_mmu_pd_index(
virtual)];
838 tmp = pt->
v[psb_mmu_pt_index(
virtual)];
845 psb_mmu_pt_unmap_unlock(pt);