6 #include <linux/export.h>
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
64 static const int niagara_iterate_method[] = {
88 static const int generic_iterate_method[] = {
96 static int cpuinfo_id(
int cpu,
int level)
124 static int enumerate_cpuinfo_nodes(
struct cpuinfo_level *tree_level)
185 static struct cpuinfo_tree *build_cpuinfo_tree(
void)
187 struct cpuinfo_tree *new_tree;
195 n = enumerate_cpuinfo_nodes(tmp_level);
197 new_tree = kzalloc(
sizeof(
struct cpuinfo_tree) +
203 memcpy(&new_tree->
level, tmp_level,
sizeof(tmp_level));
205 prev_cpu = cpu = cpumask_first(cpu_online_mask);
212 node = &new_tree->
nodes[
n];
214 id = cpuinfo_id(cpu, level);
224 ? new_tree->
level[level - 1].start_index : -1;
228 ? cpu : new_tree->
level[level + 1].start_index;
239 while (++cpu <= last_cpu) {
245 id = cpuinfo_id(cpu, level);
251 if ((
id != prev_id[level]) || (cpu == last_cpu)) {
265 level_rover[level - 1];
269 (cpu == last_cpu) ? cpu : prev_cpu;
272 level_rover[level + 1] - 1;
276 n = ++level_rover[
level];
277 if (n <= new_tree->level[level].
end_index) {
278 node = &new_tree->
nodes[
n];
286 ? cpu : level_rover[level + 1];
297 static void increment_rover(
struct cpuinfo_tree *
t,
int node_index,
298 int root_index,
const int *rover_inc_table)
301 int top_level,
level;
303 top_level = t->
nodes[root_index].level;
304 for (level = node->
level; level >= top_level; level--) {
311 if ((level == top_level) ||
319 static int iterate_cpu(
struct cpuinfo_tree *t,
unsigned int root_index)
321 const int *rover_inc_table;
324 switch (sun4v_chip_type) {
325 case SUN4V_CHIP_NIAGARA1:
326 case SUN4V_CHIP_NIAGARA2:
327 case SUN4V_CHIP_NIAGARA3:
328 case SUN4V_CHIP_NIAGARA4:
329 case SUN4V_CHIP_NIAGARA5:
330 rover_inc_table = niagara_iterate_method;
333 rover_inc_table = generic_iterate_method;
336 for (level = t->
nodes[root_index].level; level < CPUINFO_LVL_MAX;
340 increment_rover(t, index, root_index, rover_inc_table);
347 static void _cpu_map_rebuild(
void)
356 cpuinfo_tree = build_cpuinfo_tree();
364 for (i = 0; i < cpuinfo_tree->
nodes[0].num_cpus; i++)
365 cpu_distribution_map[i] = iterate_cpu(cpuinfo_tree, 0);
371 static int simple_map_to_cpu(
unsigned int index)
373 int i,
end, cpu_rover;
379 if (cpu_rover >= end)
387 return cpumask_first(cpu_online_mask);
390 static int _map_to_cpu(
unsigned int index)
397 return simple_map_to_cpu(index);
400 root_node = &cpuinfo_tree->
nodes[0];
401 #ifdef CONFIG_HOTPLUG_CPU
405 return simple_map_to_cpu(index);
408 return cpu_distribution_map[index % root_node->
num_cpus];
417 mapped_cpu = _map_to_cpu(index);
419 #ifdef CONFIG_HOTPLUG_CPU
421 mapped_cpu = _map_to_cpu(index);
423 spin_unlock_irqrestore(&cpu_map_lock, flag);
434 spin_unlock_irqrestore(&cpu_map_lock, flag);