27 #include <linux/kernel.h>
28 #include <linux/errno.h>
32 #include <linux/device.h>
36 #include <linux/prefetch.h>
37 #include <asm/pgtable.h>
42 #include <asm/uv/uv_hub.h>
45 #define VTOP_SUCCESS 0
46 #define VTOP_INVALID -1
53 static inline int is_gru_paddr(
unsigned long paddr)
154 static void get_clear_fault_map(
struct gru_state *gru,
178 gru_flush_cache(tfm);
191 unsigned long vaddr,
int write,
192 unsigned long *
paddr,
int *pageshift)
196 #ifdef CONFIG_HUGETLB_PAGE
219 static int atomic_pte_lookup(
struct vm_area_struct *vma,
unsigned long vaddr,
220 int write,
unsigned long *paddr,
int *pageshift)
240 pte = *(
pte_t *) pmdp;
250 #ifdef CONFIG_HUGETLB_PAGE
262 int write,
int atomic,
unsigned long *gpa,
int *pageshift)
278 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
282 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
285 if (is_gru_paddr(paddr))
287 paddr = paddr & ~((1
UL <<
ps) - 1);
288 *gpa = uv_soc_phys_ram_to_gpa(paddr);
309 gru_flush_cache(cbe);
318 static void gru_preload_tlb(
struct gru_state *gru,
320 unsigned long fault_vaddr,
int asid,
int write,
321 unsigned char tlb_preload_count,
325 unsigned long vaddr = 0, gpa;
331 if (fault_vaddr == cbe->cbe_baddr0)
333 else if (fault_vaddr == cbe->cbe_baddr1)
334 vaddr = fault_vaddr + (1 << cbe->
xtypecpy) * cbe->cbe_nelemcur - 1;
338 vaddr =
min(vaddr, fault_vaddr + tlb_preload_count *
PAGE_SIZE);
340 while (vaddr > fault_vaddr) {
341 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
346 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
347 atomic ?
"atomic" :
"non-atomic", gru->
gs_gid, gts, tfh,
348 vaddr, asid, write, pageshift, gpa);
350 STAT(tlb_preload_page);
364 static int gru_try_dropin(
struct gru_state *gru,
371 int pageshift = 0,
asid,
write,
ret, atomic = !cbk, indexway;
372 unsigned long gpa = 0, vaddr = 0;
385 cbe = gru_tfh_to_cbe(tfh);
395 gru_flush_cache(tfh);
398 goto failnoexception;
399 STAT(tfh_stale_on_fault);
422 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
437 gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
438 gru_flush_cache_cbe(cbe);
441 gru_cb_set_istatus_active(cbk);
446 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
447 " rw %d, ps %d, gpa 0x%lx\n",
448 atomic ?
"atomic" :
"non-atomic", gru->
gs_gid, gts, tfh, vaddr, asid,
449 indexway, write, pageshift, gpa);
455 STAT(tlb_dropin_fail_no_asid);
456 gru_dbg(
grudev,
"FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
460 gru_flush_cache(tfh);
461 gru_flush_cache_cbe(cbe);
467 gru_flush_cache_cbe(cbe);
468 STAT(tlb_dropin_fail_upm);
469 gru_dbg(
grudev,
"FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
474 gru_flush_cache(tfh);
475 gru_flush_cache_cbe(cbe);
476 STAT(tlb_dropin_fail_fmm);
482 gru_flush_cache(tfh);
483 gru_flush_cache_cbe(cbe);
485 gru_flush_cache(cbk);
486 STAT(tlb_dropin_fail_no_exception);
487 gru_dbg(
grudev,
"FAILED non-exception tfh: 0x%p, status %d, state %d\n",
493 gru_flush_cache(tfh);
494 gru_flush_cache_cbe(cbe);
496 gru_flush_cache(cbk);
497 STAT(tlb_dropin_fail_idle);
504 gru_flush_cache_cbe(cbe);
505 STAT(tlb_dropin_fail_invalid);
506 gru_dbg(
grudev,
"FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
514 gru_flush_cache(tfh);
515 gru_flush_cache_cbe(cbe);
516 STAT(tlb_dropin_fail_range_active);
517 gru_dbg(
grudev,
"FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
528 static irqreturn_t gru_intr(
int chiplet,
int blade)
539 gru = &
gru_base[blade]->bs_grus[chiplet];
541 dev_err(
grudev,
"GRU: invalid interrupt: cpu %d, chiplet %d\n",
545 get_clear_fault_map(gru, &imap, &dmap);
547 "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
573 gts = gru->
gs_gts[ctxnum];
585 gts->
ustats.fmm_tlbmiss++;
588 gru_try_dropin(gru, gts, tfh,
NULL);
592 STAT(intr_mm_lock_failed);
600 return gru_intr(0, uv_numa_blade_id());
605 return gru_intr(1, uv_numa_blade_id());
612 for_each_possible_blade(blade) {
613 if (uv_blade_nr_possible_cpus(blade))
629 gts->
ustats.upm_tlbmiss++;
634 ret = gru_try_dropin(gts->
ts_gru, gts, tfh, cb);
637 STAT(call_os_wait_queue);
651 int ucbnum, cbrnum, ret = -
EINVAL;
656 ucbnum = get_cb_number((
void *)cb);
660 gts = gru_find_lock_gts(cb);
682 cbk = get_gseg_base_address_cb(gts->
ts_gru->gs_gru_base_vaddr,
684 ret = gru_user_dropin(gts, tfh, cbk);
700 int ucbnum, cbrnum,
ret;
706 gts = gru_find_lock_gts(excdet.
cb);
711 ucbnum = get_cb_number((
void *)excdet.
cb);
717 gru_flush_cache(cbe);
726 gru_flush_cache_cbe(cbe);
734 "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
735 "exdet0 0x%lx, exdet1 0x%x\n",
746 static int gru_unload_all_contexts(
void)
758 gts = gru->
gs_gts[ctxnum];
776 STAT(user_unload_context);
783 return gru_unload_all_contexts();
785 gts = gru_find_lock_gts(req.
gseg);
806 STAT(user_flush_tlb);
813 gts = gru_find_lock_gts(req.
gseg);
840 gts = gru_find_lock_gts(req.
gseg);
864 STAT(set_context_option);
869 gts = gru_find_lock_gts(req.
gseg);
871 gts = gru_alloc_locked_gts(req.
gseg);