Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cpu_rmap.c
Go to the documentation of this file.
1 /*
2  * cpu_rmap.c: CPU affinity reverse-map support
3  * Copyright 2011 Solarflare Communications Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation, incorporated herein by reference.
8  */
9 
10 #include <linux/cpu_rmap.h>
11 #ifdef CONFIG_GENERIC_HARDIRQS
12 #include <linux/interrupt.h>
13 #endif
14 #include <linux/export.h>
15 
16 /*
17  * These functions maintain a mapping from CPUs to some ordered set of
18  * objects with CPU affinities. This can be seen as a reverse-map of
19  * CPU affinity. However, we do not assume that the object affinities
20  * cover all CPUs in the system. For those CPUs not directly covered
21  * by object affinities, we attempt to find a nearest object based on
22  * CPU topology.
23  */
24 
30 struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
31 {
32  struct cpu_rmap *rmap;
33  unsigned int cpu;
34  size_t obj_offset;
35 
36  /* This is a silly number of objects, and we use u16 indices. */
37  if (size > 0xffff)
38  return NULL;
39 
40  /* Offset of object pointer array from base structure */
41  obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
42  sizeof(void *));
43 
44  rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
45  if (!rmap)
46  return NULL;
47 
48  rmap->obj = (void **)((char *)rmap + obj_offset);
49 
50  /* Initially assign CPUs to objects on a rota, since we have
51  * no idea where the objects are. Use infinite distance, so
52  * any object with known distance is preferable. Include the
53  * CPUs that are not present/online, since we definitely want
54  * any newly-hotplugged CPUs to have some object assigned.
55  */
57  rmap->near[cpu].index = cpu % size;
58  rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
59  }
60 
61  rmap->size = size;
62  return rmap;
63 }
65 
66 /* Reevaluate nearest object for given CPU, comparing with the given
67  * neighbours at the given distance.
68  */
69 static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
70  const struct cpumask *mask, u16 dist)
71 {
72  int neigh;
73 
74  for_each_cpu(neigh, mask) {
75  if (rmap->near[cpu].dist > dist &&
76  rmap->near[neigh].dist <= dist) {
77  rmap->near[cpu].index = rmap->near[neigh].index;
78  rmap->near[cpu].dist = dist;
79  return true;
80  }
81  }
82  return false;
83 }
84 
85 #ifdef DEBUG
86 static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
87 {
88  unsigned index;
89  unsigned int cpu;
90 
91  pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
92 
94  index = rmap->near[cpu].index;
95  pr_info("cpu %d -> obj %u (distance %u)\n",
96  cpu, index, rmap->near[cpu].dist);
97  }
98 }
99 #else
100 static inline void
101 debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
102 {
103 }
104 #endif
105 
113 int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
114 {
115  u16 index;
116 
117  BUG_ON(rmap->used >= rmap->size);
118  index = rmap->used++;
119  rmap->obj[index] = obj;
120  return index;
121 }
123 
130 int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
131  const struct cpumask *affinity)
132 {
133  cpumask_var_t update_mask;
134  unsigned int cpu;
135 
136  if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
137  return -ENOMEM;
138 
139  /* Invalidate distance for all CPUs for which this used to be
140  * the nearest object. Mark those CPUs for update.
141  */
142  for_each_online_cpu(cpu) {
143  if (rmap->near[cpu].index == index) {
144  rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
145  cpumask_set_cpu(cpu, update_mask);
146  }
147  }
148 
149  debug_print_rmap(rmap, "after invalidating old distances");
150 
151  /* Set distance to 0 for all CPUs in the new affinity mask.
152  * Mark all CPUs within their NUMA nodes for update.
153  */
154  for_each_cpu(cpu, affinity) {
155  rmap->near[cpu].index = index;
156  rmap->near[cpu].dist = 0;
157  cpumask_or(update_mask, update_mask,
159  }
160 
161  debug_print_rmap(rmap, "after updating neighbours");
162 
163  /* Update distances based on topology */
164  for_each_cpu(cpu, update_mask) {
165  if (cpu_rmap_copy_neigh(rmap, cpu,
166  topology_thread_cpumask(cpu), 1))
167  continue;
168  if (cpu_rmap_copy_neigh(rmap, cpu,
169  topology_core_cpumask(cpu), 2))
170  continue;
171  if (cpu_rmap_copy_neigh(rmap, cpu,
172  cpumask_of_node(cpu_to_node(cpu)), 3))
173  continue;
174  /* We could continue into NUMA node distances, but for now
175  * we give up.
176  */
177  }
178 
179  debug_print_rmap(rmap, "after copying neighbours");
180 
181  free_cpumask_var(update_mask);
182  return 0;
183 }
185 
186 #ifdef CONFIG_GENERIC_HARDIRQS
187 
188 /* Glue between IRQ affinity notifiers and CPU rmaps */
189 
190 struct irq_glue {
191  struct irq_affinity_notify notify;
192  struct cpu_rmap *rmap;
193  u16 index;
194 };
195 
203 void free_irq_cpu_rmap(struct cpu_rmap *rmap)
204 {
205  struct irq_glue *glue;
206  u16 index;
207 
208  if (!rmap)
209  return;
210 
211  for (index = 0; index < rmap->used; index++) {
212  glue = rmap->obj[index];
213  irq_set_affinity_notifier(glue->notify.irq, NULL);
214  }
215  irq_run_affinity_notifiers();
216 
217  kfree(rmap);
218 }
219 EXPORT_SYMBOL(free_irq_cpu_rmap);
220 
221 static void
222 irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
223 {
224  struct irq_glue *glue =
225  container_of(notify, struct irq_glue, notify);
226  int rc;
227 
228  rc = cpu_rmap_update(glue->rmap, glue->index, mask);
229  if (rc)
230  pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
231 }
232 
233 static void irq_cpu_rmap_release(struct kref *ref)
234 {
235  struct irq_glue *glue =
236  container_of(ref, struct irq_glue, notify.kref);
237  kfree(glue);
238 }
239 
251 int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
252 {
253  struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
254  int rc;
255 
256  if (!glue)
257  return -ENOMEM;
258  glue->notify.notify = irq_cpu_rmap_notify;
259  glue->notify.release = irq_cpu_rmap_release;
260  glue->rmap = rmap;
261  glue->index = cpu_rmap_add(rmap, glue);
262  rc = irq_set_affinity_notifier(irq, &glue->notify);
263  if (rc)
264  kfree(glue);
265  return rc;
266 }
267 EXPORT_SYMBOL(irq_cpu_rmap_add);
268 
269 #endif /* CONFIG_GENERIC_HARDIRQS */