27 #include <linux/slab.h>
28 #include <linux/export.h>
32 #include <linux/utsname.h>
38 #include <asm/processor.h>
39 #include <asm/topology.h>
40 #include <asm/uaccess.h>
42 #include <asm/sn/io.h>
47 #include <asm/sn/addrs.h>
49 static void *sn_hwperf_salheap =
NULL;
50 static int sn_hwperf_obj_cnt = 0;
52 static int sn_hwperf_init(
void);
55 #define cnode_possible(n) ((n) < num_cnodes)
63 if ((e = sn_hwperf_init()) < 0) {
71 printk(
"sn_hwperf_enum_objects: vmalloc(%d) failed\n", (
int)sz);
77 0, sz, (
u64) objbuf, 0, 0,
NULL);
84 *nobj = sn_hwperf_obj_cnt;
89 static int sn_hwperf_location_to_bpos(
char *
location,
90 int *rack,
int *bay,
int *
slot,
int *
slab)
95 if (
sscanf(location,
"%03d%c%02d#%d",
96 rack, &type, bay, slab) == 4)
99 if (
sscanf(location,
"%03d%c%02d^%02d#%d",
100 rack, &type, bay, slot, slab) != 5)
106 static int sn_hwperf_geoid_to_cnode(
char *location)
112 int this_rack, this_bay, this_slot, this_slab;
114 if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab))
121 for (cnode = 0; cnode <
num_cnodes; cnode++) {
123 module_id = geo_module(geoid);
126 this_slot = geo_slot(geoid);
127 this_slab = geo_slab(geoid);
128 if (rack == this_rack && bay == this_bay &&
129 slot == this_slot && slab == this_slab) {
143 return sn_hwperf_geoid_to_cnode(obj->
location);
152 for (ordinal=0, p=objs; p != obj; p++) {
162 static const char *slabname_node =
"node";
163 static const char *slabname_ionode =
"ionode";
164 static const char *slabname_router =
"router";
165 static const char *slabname_other =
"other";
171 const char *slabname = slabname_other;
174 slabname = isnode ? slabname_node : slabname_ionode;
175 *ordinal = sn_hwperf_obj_to_cnode(obj);
178 *ordinal = sn_hwperf_generic_ordinal(obj, objs);
180 slabname = slabname_router;
186 static void print_pci_topology(
struct seq_file *
s)
195 e = ia64_sn_ioif_get_pci_topology(
__pa(p), sz);
221 for (i=0; i < nobj; i++, p++) {
247 if (sn_hwperf_has_cpus(node)) {
249 *near_cpu_node =
node;
253 if (sn_hwperf_has_mem(node)) {
255 *near_mem_node =
node;
259 if (found_cpu && found_mem)
263 for (i=0, op=objbuf; i < nobj; i++, op++) {
266 if (node == sn_hwperf_obj_to_cnode(op)) {
278 BUG_ON(sz >
sizeof(ptdata));
279 e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
288 for (router=
NULL, j=0; j < op->
ports; j++) {
289 dest = sn_hwperf_findobj_id(objbuf, nobj, ptdata[j].
conn_id);
296 c = sn_hwperf_obj_to_cnode(dest);
297 if (!found_cpu && sn_hwperf_has_cpus(c)) {
302 if (!found_mem && sn_hwperf_has_mem(c)) {
309 if (router && (!found_cpu || !found_mem)) {
312 BUG_ON(sz >
sizeof(ptdata));
313 e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
320 for (j=0; j < router->
ports; j++) {
321 dest = sn_hwperf_findobj_id(objbuf, nobj,
323 if (!dest || dest->
id == node ||
329 c = sn_hwperf_obj_to_cnode(dest);
330 if (!found_cpu && sn_hwperf_has_cpus(c)) {
335 if (!found_mem && sn_hwperf_has_mem(c)) {
340 if (found_cpu && found_mem)
345 if (!found_cpu || !found_mem) {
347 for (i=0, op=objbuf; i < nobj; i++, op++) {
353 c = sn_hwperf_obj_to_cnode(op);
354 if (!found_cpu && sn_hwperf_has_cpus(c)) {
359 if (!found_mem && sn_hwperf_has_mem(c)) {
364 if (found_cpu && found_mem)
369 if (!found_cpu || !found_mem)
377 static int sn_topology_show(
struct seq_file *s,
void *
d)
384 const char *slabname;
404 seq_printf(s,
"# objtype ordinal location partition"
405 " [attribute value [, ...]]\n");
407 if (ia64_sn_get_sn_info(0,
408 &shubtype, &nasid_mask, &nasid_shift, &system_size,
409 &sharing_size, &partid, &coher, ®ion_size))
411 for (nasid_msb=63; nasid_msb > 0; nasid_msb--) {
412 if (((
u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb))
417 "nasid_mask 0x%016llx, "
421 "coherency_domain %d, "
424 partid, utsname()->nodename,
425 shubtype ?
"shub2" :
"shub1",
426 (
u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift,
427 system_size, sharing_size, coher, region_size);
429 print_pci_topology(s);
438 if (obj->
name[i] ==
' ')
442 slabname = sn_hwperf_get_slabname(obj, objs, &ordinal);
444 obj->sn_hwp_this_part ?
"local" :
"shared", obj->
name);
454 if (sn_hwperf_get_nearest_node_objdata(objs, sn_hwperf_obj_cnt,
455 ordinal, &near_mem, &near_cpu) == 0) {
456 seq_printf(s,
", near_mem_nodeid %d, near_cpu_nodeid %d",
478 " freq %luMHz, arch ia64",
499 e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
504 for (ordinal=0, p=objs; p != obj; p++) {
508 for (pt = 0; pt < obj->
ports; pt++) {
509 for (p = objs, i = 0; i < sn_hwperf_obj_cnt; i++, p++) {
517 if (i >= sn_hwperf_obj_cnt) {
519 seq_puts(s,
" local endpoint disconnected"
520 ", protocol unknown\n");
524 if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
540 seq_printf(s,
" endpoint %s-%d, protocol %s\n",
551 static void *sn_topology_start(
struct seq_file *s, loff_t *
pos)
555 if (*pos < sn_hwperf_obj_cnt)
556 return (
void *)(objs + *
pos);
561 static void *sn_topology_next(
struct seq_file *s,
void *
v, loff_t * pos)
564 return sn_topology_start(s, pos);
567 static void sn_topology_stop(
struct seq_file *
m,
void *
v)
576 .start = sn_topology_start,
577 .next = sn_topology_next,
578 .stop = sn_topology_stop,
579 .show = sn_topology_show
590 static void sn_hwperf_call_sal(
void *
info)
595 r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op_info->
op,
596 op_info->
a->arg, op_info->
a->sz,
597 (
u64) op_info->
p, 0, 0, op_info->
v0);
621 sn_hwperf_call_sal(op_info);
624 sn_hwperf_call_sal(op_info);
635 save_allowed =
current->cpus_allowed;
637 sn_hwperf_call_sal(op_info);
638 set_cpus_allowed_ptr(
current, &save_allowed);
648 static int sn_hwperf_map_err(
int hwperf_err)
689 static long sn_hwperf_ioctl(
struct file *
fp,
u32 op,
unsigned long arg)
740 if (
a.sz ==
sizeof(
u64)) {
749 if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
750 int cpuobj_index = 0;
753 for (i = 0; i < nobj; i++) {
756 node = sn_hwperf_obj_to_cnode(objs + i);
765 sizeof(cpuobj->
name),
781 if (
a.sz !=
sizeof(
u64) ||
791 if (
a.sz !=
sizeof(
u64) || i < 0) {
795 if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
801 if (objs[i].
id !=
a.arg) {
802 for (i = 0; i < nobj; i++) {
803 if (objs[i].
id ==
a.arg)
820 *(
u64 *)p = (
u64)sn_hwperf_obj_to_cnode(objs + i);
832 r = sn_hwperf_op_cpu(&op_info);
834 r = sn_hwperf_map_err(r);
842 r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op,
843 a.arg,
a.sz, (
u64) p, 0, 0, &v0);
845 r = sn_hwperf_map_err(r);
867 .unlocked_ioctl = sn_hwperf_ioctl,
877 static int sn_hwperf_init(
void)
886 if (sn_hwperf_salheap) {
895 sn_hwperf_master_nasid = (
nasid_t) ia64_sn_get_console_nasid();
901 salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
913 salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
915 (
u64) sn_hwperf_salheap, 0, 0,
NULL);
921 salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
928 sn_hwperf_obj_cnt = (
int)v;
931 if (e < 0 && sn_hwperf_salheap) {
932 vfree(sn_hwperf_salheap);
933 sn_hwperf_salheap =
NULL;
934 sn_hwperf_obj_cnt = 0;
947 if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) {
948 e =
seq_open(file, &sn_topology_seq_ops);
971 if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) {
972 e = sn_hwperf_get_nearest_node_objdata(objbuf, nobj,
973 node, near_mem_node, near_cpu_node);
980 static int __devinit sn_hwperf_misc_register_init(
void)
996 "register misc device for \"%s\"\n", sn_hwperf_dev.
name);