26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
33 #include <linux/device.h>
41 #include <asm/uv/uv.h>
46 #include <asm/uv/uv_hub.h>
47 #include <asm/uv/uv_mmrs.h>
57 static int max_user_cbrs, max_user_dsr_bytes;
104 if ((vma->
vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE))
107 if (vma->
vm_start & (GRU_GSEG_PAGESIZE - 1) ||
108 vma->
vm_end & (GRU_GSEG_PAGESIZE - 1))
111 vma->
vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED |
112 VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
128 static int gru_create_new_context(
unsigned long arg)
138 if (
req.data_segment_bytes > max_user_dsr_bytes)
140 if (
req.control_blocks > max_user_cbrs || !
req.maximum_thread_count)
165 static long gru_get_config_info(
unsigned long arg)
171 (uv_node_to_blade_id(1) == uv_node_to_blade_id(0)))
177 info.blades =
info.nodes / nodesperblade;
190 static long gru_file_unlocked_ioctl(
struct file *file,
unsigned int req,
195 gru_dbg(
grudev,
"file %p, req 0x%x, 0x%lx\n", file, req, arg);
199 err = gru_create_new_context(arg);
223 err = gru_get_config_info(arg);
236 static void gru_init_chiplet(
struct gru_state *gru,
unsigned long paddr,
237 void *
vaddr,
int blade_id,
int chiplet_id)
251 if (gru->
gs_gid >= gru_max_gids)
258 static int gru_init_tables(
unsigned long gru_base_paddr,
void *gru_base_vaddr)
261 int cbrs, dsrbytes,
n;
270 for_each_possible_blade(bid) {
271 pnode = uv_blade_to_pnode(bid);
272 nid = uv_blade_to_memory_nid(bid);
273 page = alloc_pages_node(nid,
GFP_KERNEL, order);
284 for (gru = gru_base[bid]->bs_grus, chip = 0;
287 paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
288 vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
289 gru_init_chiplet(gru, paddr, vaddr, bid, chip);
293 dsrbytes =
max(dsrbytes, n);
295 max_user_cbrs =
min(max_user_cbrs, cbrs);
296 max_user_dsr_bytes =
min(max_user_dsr_bytes, dsrbytes);
302 for (bid--; bid >= 0; bid--)
303 free_pages((
unsigned long)gru_base[bid], order);
307 static void gru_free_tables(
void)
314 free_pages((
unsigned long)gru_base[bid], order);
317 static unsigned long gru_chiplet_cpu_to_mmr(
int chiplet,
int cpu,
int *corep)
319 unsigned long mmr = 0;
335 }
else if (chiplet == 1) {
357 .irq_unmask = gru_noop,
362 static int gru_chiplet_setup_tlb_irq(
int chiplet,
char *irq_name,
369 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
373 if (gru_irq_count[chiplet] == 0) {
374 gru_chip[chiplet].
name = irq_name;
389 gru_irq_count[chiplet]++;
394 static void gru_chiplet_teardown_tlb_irq(
int chiplet,
int cpu,
int blade)
399 if (gru_irq_count[chiplet] == 0)
402 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
406 if (--gru_irq_count[chiplet] == 0)
410 #elif defined CONFIG_X86_64
412 static int gru_chiplet_setup_tlb_irq(
int chiplet,
char *irq_name,
419 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
441 static void gru_chiplet_teardown_tlb_irq(
int chiplet,
int cpu,
int blade)
446 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
458 static void gru_teardown_tlb_irqs(
void)
464 blade = uv_cpu_to_blade_id(cpu);
465 gru_chiplet_teardown_tlb_irq(0, cpu, blade);
466 gru_chiplet_teardown_tlb_irq(1, cpu, blade);
468 for_each_possible_blade(blade) {
469 if (uv_blade_nr_possible_cpus(blade))
471 gru_chiplet_teardown_tlb_irq(0, 0, blade);
472 gru_chiplet_teardown_tlb_irq(1, 0, blade);
476 static int gru_setup_tlb_irqs(
void)
483 blade = uv_cpu_to_blade_id(cpu);
484 ret = gru_chiplet_setup_tlb_irq(0,
"GRU0_TLB",
gru0_intr, cpu, blade);
488 ret = gru_chiplet_setup_tlb_irq(1,
"GRU1_TLB",
gru1_intr, cpu, blade);
492 for_each_possible_blade(blade) {
493 if (uv_blade_nr_possible_cpus(blade))
495 ret = gru_chiplet_setup_tlb_irq(0,
"GRU0_TLB",
gru_intr_mblade, 0, blade);
499 ret = gru_chiplet_setup_tlb_irq(1,
"GRU1_TLB",
gru_intr_mblade, 0, blade);
507 gru_teardown_tlb_irqs();
516 static int __init gru_init(
void)
523 #if defined CONFIG_IA64
532 gru_start_paddr, gru_end_paddr);
546 ret = gru_init_tables(gru_start_paddr, gru_start_vaddr);
552 ret = gru_setup_tlb_irqs();
573 static void __exit gru_exit(
void)
578 gru_teardown_tlb_irqs();
587 .unlocked_ioctl = gru_file_unlocked_ioctl,
588 .mmap = gru_file_mmap,
599 .close = gru_vma_close,