16 #include <linux/kernel.h>
18 #include <linux/list.h>
22 #include <linux/slab.h>
65 #define CACHE_TYPE_UNIFIED 0
66 #define CACHE_TYPE_INSTRUCTION 1
67 #define CACHE_TYPE_DATA 2
75 .size_prop =
"d-cache-size",
76 .line_size_props = {
"d-cache-line-size",
77 "d-cache-block-size", },
78 .nr_sets_prop =
"d-cache-sets",
81 .name =
"Instruction",
82 .size_prop =
"i-cache-size",
83 .line_size_props = {
"i-cache-line-size",
84 "i-cache-block-size", },
85 .nr_sets_prop =
"i-cache-sets",
89 .size_prop =
"d-cache-size",
90 .line_size_props = {
"d-cache-line-size",
91 "d-cache-block-size", },
92 .nr_sets_prop =
"d-cache-sets",
129 static const char *cache_type_string(
const struct cache *
cache)
131 return cache_type_info[cache->
type].
name;
138 cache->
ofnode = of_node_get(ofnode);
139 INIT_LIST_HEAD(&cache->
list);
140 list_add(&cache->
list, &cache_list);
154 static void release_cache_debugcheck(
struct cache *cache)
161 iter->ofnode->full_name,
162 cache_type_string(iter),
163 cache->ofnode->full_name,
164 cache_type_string(cache));
167 static
void release_cache(
struct cache *cache)
172 pr_debug(
"freeing L%d %s cache for %s\n", cache->level,
173 cache_type_string(cache), cache->ofnode->full_name);
175 release_cache_debugcheck(cache);
177 of_node_put(cache->ofnode);
181 static void cache_cpu_set(
struct cache *cache,
int cpu)
187 "CPU %i already accounted in %s(%s)\n",
188 cpu, next->
ofnode->full_name,
189 cache_type_string(next));
195 static int cache_size(
const struct cache *cache,
unsigned int *
ret)
197 const char *propname;
198 const u32 *cache_size;
210 static int cache_size_kb(
const struct cache *cache,
unsigned int *ret)
214 if (cache_size(cache, &size))
222 static int cache_get_line_size(
const struct cache *cache,
unsigned int *ret)
229 for (i = 0; i < lim; i++) {
230 const char *propname;
245 static int cache_nr_sets(
const struct cache *cache,
unsigned int *ret)
247 const char *propname;
260 static int cache_associativity(
const struct cache *cache,
unsigned int *ret)
266 if (cache_nr_sets(cache, &nr_sets))
277 if (cache_get_line_size(cache, &line_size))
279 if (cache_size(cache, &size))
282 if (!(nr_sets > 0 && size > 0 && line_size > 0))
285 *ret = (size /
nr_sets) / line_size;
292 static struct cache *cache_find_first_sibling(
struct cache *cache)
300 if (iter->ofnode == cache->ofnode && iter->
next_local == cache)
309 struct cache *cache =
NULL;
315 cache = cache_find_first_sibling(iter);
322 static bool cache_node_is_unified(
const struct device_node *np)
327 static struct cache *
__cpuinit cache_do_one_devnode_unified(
struct device_node *node,
int level)
342 pr_debug(
"creating L%d dcache and icache for %s\n", level,
348 if (!dcache || !icache)
355 release_cache(dcache);
356 release_cache(icache);
364 if (cache_node_is_unified(node))
365 cache = cache_do_one_devnode_unified(node, level);
367 cache = cache_do_one_devnode_split(node, level);
376 cache = cache_lookup_by_node(node);
379 "cache level mismatch on lookup (got %d, expected %d)\n",
380 cache->
level, level);
383 cache = cache_do_one_devnode(node, level);
388 static void __cpuinit link_cache_lists(
struct cache *
smaller,
struct cache *bigger)
399 static void __cpuinit do_subsidiary_caches_debugcheck(
struct cache *cache)
405 static void __cpuinit do_subsidiary_caches(
struct cache *cache)
408 int level = cache->
level;
410 do_subsidiary_caches_debugcheck(cache);
413 struct cache *subcache;
416 subcache = cache_lookup_or_instantiate(subcache_node, level);
417 of_node_put(subcache_node);
421 link_cache_lists(cache, subcache);
426 static struct cache *
__cpuinit cache_chain_instantiate(
unsigned int cpu_id)
429 struct cache *cpu_cache =
NULL;
431 pr_debug(
"creating cache object(s) for CPU %i\n", cpu_id);
434 WARN_ONCE(!cpu_node,
"no OF node found for CPU %i\n", cpu_id);
438 cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
442 do_subsidiary_caches(cpu_cache);
444 cache_cpu_set(cpu_cache, cpu_id);
446 of_node_put(cpu_node);
458 WARN_ONCE(!dev,
"no dev for CPU %i\n", cpu_id);
466 cache_dir = kzalloc(
sizeof(*cache_dir),
GFP_KERNEL);
470 cache_dir->
kobj = kobj;
474 per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
482 static void cache_index_release(
struct kobject *kobj)
486 index = kobj_to_cache_index_dir(kobj);
488 pr_debug(
"freeing index directory for L%d %s cache\n",
489 index->
cache->level, cache_type_string(index->
cache));
500 return kobj_attr->
show(k, kobj_attr, buf);
503 static struct cache *index_kobj_to_cache(
struct kobject *k)
507 index = kobj_to_cache_index_dir(k);
514 unsigned int size_kb;
517 cache = index_kobj_to_cache(k);
519 if (cache_size_kb(cache, &size_kb))
522 return sprintf(buf,
"%uK\n", size_kb);
531 unsigned int line_size;
534 cache = index_kobj_to_cache(k);
536 if (cache_get_line_size(cache, &line_size))
539 return sprintf(buf,
"%u\n", line_size);
543 __ATTR(coherency_line_size, 0444, line_size_show,
NULL);
547 unsigned int nr_sets;
550 cache = index_kobj_to_cache(k);
552 if (cache_nr_sets(cache, &nr_sets))
555 return sprintf(buf,
"%u\n", nr_sets);
559 __ATTR(number_of_sets, 0444, nr_sets_show,
NULL);
563 unsigned int associativity;
566 cache = index_kobj_to_cache(k);
568 if (cache_associativity(cache, &associativity))
571 return sprintf(buf,
"%u\n", associativity);
575 __ATTR(ways_of_associativity, 0444, associativity_show,
NULL);
581 cache = index_kobj_to_cache(k);
583 return sprintf(buf,
"%s\n", cache_type_string(cache));
594 index = kobj_to_cache_index_dir(k);
595 cache = index->
cache;
610 index = kobj_to_cache_index_dir(k);
611 cache = index->
cache;
623 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show,
NULL);
629 static struct attribute *cache_index_default_attrs[] = {
630 &cache_type_attr.
attr,
631 &cache_level_attr.
attr,
632 &cache_shared_cpu_map_attr.
attr,
641 &cache_line_size_attr,
646 static const struct sysfs_ops cache_index_ops = {
647 .show = cache_index_show,
650 static struct kobj_type cache_index_type = {
651 .release = cache_index_release,
652 .sysfs_ops = &cache_index_ops,
653 .default_attrs = cache_index_default_attrs,
658 const char *cache_name;
669 cache_name = cache->
ofnode->full_name;
670 cache_type = cache_type_string(cache);
677 for (i = 0; i <
ARRAY_SIZE(cache_index_opt_attrs); i++) {
681 attr = cache_index_opt_attrs[
i];
683 rc = attr->
show(&dir->
kobj, attr, buf);
685 pr_debug(
"not creating %s attribute for "
686 "%s(%s) (rc = %zd)\n",
687 attr->
attr.name, cache_name,
692 pr_debug(
"could not create %s attribute for %s(%s)\n",
693 attr->
attr.name, cache_name, cache_type);
699 static void __cpuinit cacheinfo_create_index_dir(
struct cache *cache,
int index,
struct cache_dir *cache_dir)
704 index_dir = kzalloc(
sizeof(*index_dir),
GFP_KERNEL);
711 cache_dir->
kobj,
"index%d", index);
716 cache_dir->
index = index_dir;
718 cacheinfo_create_index_opt_attrs(index_dir);
725 static void __cpuinit cacheinfo_sysfs_populate(
unsigned int cpu_id,
struct cache *cache_list)
727 struct cache_dir *cache_dir;
731 cache_dir = cacheinfo_create_cache_dir(cpu_id);
737 cacheinfo_create_index_dir(cache, index, cache_dir);
747 cache = cache_chain_instantiate(cpu_id);
751 cacheinfo_sysfs_populate(cpu_id, cache);
754 #ifdef CONFIG_HOTPLUG_CPU
756 static struct cache *cache_lookup_by_cpu(
unsigned int cpu_id)
762 WARN_ONCE(!cpu_node,
"no OF node found for CPU %i\n", cpu_id);
766 cache = cache_lookup_by_node(cpu_node);
767 of_node_put(cpu_node);
772 static void remove_index_dirs(
struct cache_dir *cache_dir)
776 index = cache_dir->
index;
787 static void remove_cache_dir(
struct cache_dir *cache_dir)
789 remove_index_dirs(cache_dir);
796 static void cache_cpu_clear(
struct cache *cache,
int cpu)
802 "CPU %i not accounted in %s(%s)\n",
803 cpu, cache->
ofnode->full_name,
804 cache_type_string(cache));
811 release_cache(cache);
819 struct cache_dir *cache_dir;
824 cache_dir =
per_cpu(cache_dir_pcpu, cpu_id);
828 remove_cache_dir(cache_dir);
834 cache = cache_lookup_by_cpu(cpu_id);
836 cache_cpu_clear(cache, cpu_id);