Go to the documentation of this file.
23 #ifndef __GRUTABLES_H__
24 #define __GRUTABLES_H__
148 #include <linux/wait.h>
160 #define GRU_MAX_BLADES MAX_NUMNODES
161 #define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE)
163 #define GRU_DRIVER_ID_STR "SGI GRU Device Driver"
164 #define GRU_DRIVER_VERSION_STR "0.85"
268 #define GRU_ASSIGN_DELAY ((HZ * 20) / 1000)
274 #define GRU_STEAL_DELAY ((HZ * 200) / 1000)
276 #define STAT(id) do { \
277 if (gru_options & OPT_STATS) \
278 atomic_long_inc(&gru_stats.id); \
281 #ifdef CONFIG_SGI_GRU_DEBUG
282 #define gru_dbg(dev, fmt, x...) \
284 if (gru_options & OPT_DPRINT) \
285 printk(KERN_DEBUG "GRU:%d %s: " fmt, smp_processor_id(), __func__, x);\
288 #define gru_dbg(x...)
294 #define MAX_ASID 0xfffff0
299 #define VADDR_HI_BIT 64
300 #define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
301 #define GRUASID(asid, addr) ((asid) + GRUREGION(addr))
398 #define TSID(a, v) (((a) - (v)->vm_start) / GRU_GSEG_PAGESIZE)
399 #define UGRUADDR(gts) ((gts)->ts_vma->vm_start + \
400 (gts)->ts_tsid * GRU_GSEG_PAGESIZE)
485 #define get_tfm_for_cpu(g, c) \
486 ((struct gru_tlb_fault_map *)get_tfm((g)->gs_gru_base_vaddr, (c)))
487 #define get_tfh_by_index(g, i) \
488 ((struct gru_tlb_fault_handle *)get_tfh((g)->gs_gru_base_vaddr, (i)))
489 #define get_tgh_by_index(g, i) \
490 ((struct gru_tlb_global_handle *)get_tgh((g)->gs_gru_base_vaddr, (i)))
491 #define get_cbe_by_index(g, i) \
492 ((struct gru_control_block_extended *)get_cbe((g)->gs_gru_base_vaddr,\
500 #define get_gru(b, c) (&gru_base[b]->bs_grus[c])
503 #define DSR_BYTES(dsr) ((dsr) * GRU_DSR_AU_BYTES)
504 #define CBR_BYTES(cbr) ((cbr) * GRU_HANDLE_BYTES * GRU_CBR_AU_SIZE * 2)
507 #define thread_cbr_number(gts, n) ((gts)->ts_cbr_idx[(n) / GRU_CBR_AU_SIZE] \
508 * GRU_CBR_AU_SIZE + (n) % GRU_CBR_AU_SIZE)
511 #define GID_TO_GRU(gid) \
512 (gru_base[(gid) / GRU_CHIPLETS_PER_BLADE] ? \
513 (&gru_base[(gid) / GRU_CHIPLETS_PER_BLADE]-> \
514 bs_grus[(gid) % GRU_CHIPLETS_PER_BLADE]) : \
518 #define for_each_gru_in_bitmap(gid, map) \
519 for_each_set_bit((gid), (map), GRU_MAX_GRUS)
522 #define for_each_gru_on_blade(gru, nid, i) \
523 for ((gru) = gru_base[nid]->bs_grus, (i) = 0; \
524 (i) < GRU_CHIPLETS_PER_BLADE; \
528 #define foreach_gid(gid) \
529 for ((gid) = 0; (gid) < gru_max_gids; (gid)++)
532 #define for_each_gts_on_gru(gts, gru, ctxnum) \
533 for ((ctxnum) = 0; (ctxnum) < GRU_NUM_CCH; (ctxnum)++) \
534 if (((gts) = (gru)->gs_gts[ctxnum]))
537 #define for_each_cbr_in_tfm(i, map) \
538 for_each_set_bit((i), (map), GRU_NUM_CBE)
541 #define for_each_cbr_in_allocation_map(i, map, k) \
542 for_each_set_bit((k), (map), GRU_CBR_AU) \
543 for ((i) = (k)*GRU_CBR_AU_SIZE; \
544 (i) < ((k) + 1) * GRU_CBR_AU_SIZE; (i)++)
547 #define for_each_dsr_in_allocation_map(i, map, k) \
548 for_each_set_bit((k), (const unsigned long *)(map), GRU_DSR_AU) \
549 for ((i) = (k) * GRU_DSR_AU_CL; \
550 (i) < ((k) + 1) * GRU_DSR_AU_CL; (i)++)
552 #define gseg_physical_address(gru, ctxnum) \
553 ((gru)->gs_gru_base_paddr + ctxnum * GRU_GSEG_STRIDE)
554 #define gseg_virtual_address(gru, ctxnum) \
555 ((gru)->gs_gru_base_vaddr + ctxnum * GRU_GSEG_STRIDE)
565 static inline int __trylock_handle(
void *
h)
570 static inline void __lock_handle(
void *
h)
576 static inline void __unlock_handle(
void *h)
583 return __trylock_handle(cch);
594 __unlock_handle(cch);
604 __unlock_handle(tgh);
616 #define UV_MAX_INT_CORES 8
617 #define uv_cpu_socket_number(p) ((cpu_physical_id(p) >> 5) & 1)
618 #define uv_cpu_ht_number(p) (cpu_physical_id(p) & 1)
619 #define uv_cpu_core_number(p) (((cpu_physical_id(p) >> 2) & 4) | \
620 ((cpu_physical_id(p) >> 1) & 3))
626 extern const struct vm_operations_struct
gru_vm_ops;
662 int cbr_au_count,
int dsr_au_count,
663 unsigned char tlb_preload_count,
int options,
int tsid);
665 int cbr_au_count,
char *cbmap);
667 int dsr_au_count,
char *dsmap);