17 #include <linux/kernel.h>
20 #include <linux/slab.h>
25 #include <linux/export.h>
26 #include <asm/mmzone.h>
42 #ifdef CONFIG_HOTPLUG_CPU
43 int __ref arch_register_cpu(
int num)
50 if (can_cpei_retarget() || !is_cpu_cpei_target(num))
51 sysfs_cpus[num].
cpu.hotpluggable = 1;
58 void __ref arch_unregister_cpu(
int num)
60 unregister_cpu(&sysfs_cpus[num].
cpu);
67 static int __init arch_register_cpu(
int num)
74 static int __init topology_init(
void)
90 panic(
"kzalloc in topology_init failed - NR_CPUS too big?");
93 if((err = arch_register_cpu(i)))
110 static const char *cache_types[] = {
117 static const char *cache_mattrib[]={
139 #define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
142 static void __cpuinit cache_shared_cpu_map_setup(
unsigned int cpu,
146 int num_shared, i = 0;
150 cpu_data(cpu)->cores_per_socket <= 1) {
155 if (ia64_pal_cache_shared_info(this_leaf->
level,
165 &&
cpu_data(j)->core_id == csi.log1_cid
166 &&
cpu_data(j)->thread_id == csi.log1_tid)
170 }
while (i < num_shared &&
171 ia64_pal_cache_shared_info(this_leaf->level,
177 static void __cpuinit cache_shared_cpu_map_setup(
unsigned int cpu,
188 return sprintf(buf,
"%u\n", 1 << this_leaf->
cci.pcci_line_size);
194 return sprintf(buf,
"%u\n", this_leaf->
cci.pcci_assoc);
201 cache_mattrib[this_leaf->
cci.pcci_cache_attr]);
206 return sprintf(buf,
"%uK\n", this_leaf->
cci.pcci_cache_size / 1024);
211 unsigned number_of_sets = this_leaf->
cci.pcci_cache_size;
212 number_of_sets /= this_leaf->
cci.pcci_assoc;
213 number_of_sets /= 1 << this_leaf->
cci.pcci_line_size;
215 return sprintf(buf,
"%u\n", number_of_sets);
223 cpumask_and(&shared_cpu_map,
225 len = cpumask_scnprintf(buf,
NR_CPUS+1, &shared_cpu_map);
232 int type = this_leaf->
type + this_leaf->
cci.pcci_unified;
233 return sprintf(buf,
"%s\n", cache_types[type]);
250 #define define_one_ro(_name) \
251 static struct cache_attr _name = \
252 __ATTR(_name, 0444, show_##_name, NULL)
263 static struct attribute * cache_default_attrs[] = {
266 &coherency_line_size.attr,
267 &ways_of_associativity.attr,
270 &number_of_sets.attr,
271 &shared_cpu_map.attr,
275 #define to_object(k) container_of(k, struct cache_info, kobj)
276 #define to_attr(a) container_of(a, struct cache_attr, attr)
284 ret = fattr->
show ? fattr->
show(this_leaf, buf) : 0;
288 static const struct sysfs_ops cache_sysfs_ops = {
293 .sysfs_ops = &cache_sysfs_ops,
294 .default_attrs = cache_default_attrs,
297 static struct kobj_type cache_ktype_percpu_entry = {
298 .sysfs_ops = &cache_sysfs_ops,
301 static void __cpuinit cpu_cache_sysfs_exit(
unsigned int cpu)
303 kfree(all_cpu_cache_info[cpu].cache_leaves);
304 all_cpu_cache_info[
cpu].cache_leaves =
NULL;
305 all_cpu_cache_info[
cpu].num_cache_leaves = 0;
306 memset(&all_cpu_cache_info[cpu].kobj, 0,
sizeof(
struct kobject));
310 static int __cpuinit cpu_cache_sysfs_init(
unsigned int cpu)
312 unsigned long i, levels, unique_caches;
319 if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
324 this_cache=kzalloc(
sizeof(
struct cache_info)*unique_caches,
326 if (this_cache ==
NULL)
329 for (i=0; i < levels; i++) {
330 for (j=2; j >0 ; j--) {
331 if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
339 cache_shared_cpu_map_setup(cpu,
340 &this_cache[num_cache_leaves]);
345 all_cpu_cache_info[
cpu].cache_leaves = this_cache;
356 unsigned int cpu = sys_dev->
id;
362 if (all_cpu_cache_info[cpu].
kobj.parent)
365 oldmask =
current->cpus_allowed;
370 retval = cpu_cache_sysfs_init(cpu);
371 set_cpus_allowed_ptr(
current, &oldmask);
376 &cache_ktype_percpu_entry, &sys_dev->
kobj,
379 cpu_cache_sysfs_exit(cpu);
383 for (i = 0; i < all_cpu_cache_info[
cpu].num_cache_leaves; i++) {
387 &all_cpu_cache_info[cpu].kobj,
390 for (j = 0; j <
i; j++) {
394 cpu_cache_sysfs_exit(cpu);
406 unsigned int cpu = sys_dev->
id;
409 for (i = 0; i < all_cpu_cache_info[
cpu].num_cache_leaves; i++)
412 if (all_cpu_cache_info[cpu].
kobj.parent) {
419 cpu_cache_sysfs_exit(cpu);
429 unsigned long action,
void *hcpu)
431 unsigned int cpu = (
unsigned long)hcpu;
438 cache_add_dev(sys_dev);
442 cache_remove_dev(sys_dev);
450 .notifier_call = cache_cpu_callback
453 static int __init cache_sysfs_init(
void)
459 cache_add_dev(sys_dev);