Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
uv_irq.c
Go to the documentation of this file.
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License. See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * SGI UV IRQ functions
7  *
8  * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/rbtree.h>
13 #include <linux/slab.h>
14 #include <linux/irq.h>
15 
16 #include <asm/apic.h>
17 #include <asm/uv/uv_irq.h>
18 #include <asm/uv/uv_hub.h>
19 
20 /* MMR offset and pnode of hub sourcing interrupts for a given irq */
22  struct rb_node list;
23  unsigned long offset;
24  int pnode;
25  int irq;
26 };
27 
28 static DEFINE_SPINLOCK(uv_irq_lock);
29 static struct rb_root uv_irq_root;
30 
31 static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool);
32 
33 static void uv_noop(struct irq_data *data) { }
34 
35 static void uv_ack_apic(struct irq_data *data)
36 {
37  ack_APIC_irq();
38 }
39 
40 static struct irq_chip uv_irq_chip = {
41  .name = "UV-CORE",
42  .irq_mask = uv_noop,
43  .irq_unmask = uv_noop,
44  .irq_eoi = uv_ack_apic,
45  .irq_set_affinity = uv_set_irq_affinity,
46 };
47 
48 /*
49  * Add offset and pnode information of the hub sourcing interrupts to the
50  * rb tree for a specific irq.
51  */
52 static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
53 {
54  struct rb_node **link = &uv_irq_root.rb_node;
55  struct rb_node *parent = NULL;
56  struct uv_irq_2_mmr_pnode *n;
57  struct uv_irq_2_mmr_pnode *e;
58  unsigned long irqflags;
59 
60  n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
61  uv_blade_to_memory_nid(blade));
62  if (!n)
63  return -ENOMEM;
64 
65  n->irq = irq;
66  n->offset = offset;
67  n->pnode = uv_blade_to_pnode(blade);
68  spin_lock_irqsave(&uv_irq_lock, irqflags);
69  /* Find the right place in the rbtree: */
70  while (*link) {
71  parent = *link;
72  e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
73 
74  if (unlikely(irq == e->irq)) {
75  /* irq entry exists */
76  e->pnode = uv_blade_to_pnode(blade);
77  e->offset = offset;
78  spin_unlock_irqrestore(&uv_irq_lock, irqflags);
79  kfree(n);
80  return 0;
81  }
82 
83  if (irq < e->irq)
84  link = &(*link)->rb_left;
85  else
86  link = &(*link)->rb_right;
87  }
88 
89  /* Insert the node into the rbtree. */
90  rb_link_node(&n->list, parent, link);
91  rb_insert_color(&n->list, &uv_irq_root);
92 
93  spin_unlock_irqrestore(&uv_irq_lock, irqflags);
94  return 0;
95 }
96 
97 /* Retrieve offset and pnode information from the rb tree for a specific irq */
98 int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
99 {
100  struct uv_irq_2_mmr_pnode *e;
101  struct rb_node *n;
102  unsigned long irqflags;
103 
104  spin_lock_irqsave(&uv_irq_lock, irqflags);
105  n = uv_irq_root.rb_node;
106  while (n) {
107  e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
108 
109  if (e->irq == irq) {
110  *offset = e->offset;
111  *pnode = e->pnode;
112  spin_unlock_irqrestore(&uv_irq_lock, irqflags);
113  return 0;
114  }
115 
116  if (irq < e->irq)
117  n = n->rb_left;
118  else
119  n = n->rb_right;
120  }
121  spin_unlock_irqrestore(&uv_irq_lock, irqflags);
122  return -1;
123 }
124 
125 /*
126  * Re-target the irq to the specified CPU and enable the specified MMR located
127  * on the specified blade to allow the sending of MSIs to the specified CPU.
128  */
129 static int
130 arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
131  unsigned long mmr_offset, int limit)
132 {
133  const struct cpumask *eligible_cpu = cpumask_of(cpu);
134  struct irq_cfg *cfg = irq_get_chip_data(irq);
135  unsigned long mmr_value;
137  int mmr_pnode, err;
138  unsigned int dest;
139 
140  BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
141  sizeof(unsigned long));
142 
143  err = assign_irq_vector(irq, cfg, eligible_cpu);
144  if (err != 0)
145  return err;
146 
147  err = apic->cpu_mask_to_apicid_and(eligible_cpu, eligible_cpu, &dest);
148  if (err != 0)
149  return err;
150 
151  if (limit == UV_AFFINITY_CPU)
152  irq_set_status_flags(irq, IRQ_NO_BALANCING);
153  else
154  irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
155 
157  irq_name);
158 
159  mmr_value = 0;
160  entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
161  entry->vector = cfg->vector;
163  entry->dest_mode = apic->irq_dest_mode;
164  entry->polarity = 0;
165  entry->trigger = 0;
166  entry->mask = 0;
167  entry->dest = dest;
168 
169  mmr_pnode = uv_blade_to_pnode(mmr_blade);
170  uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
171 
172  if (cfg->move_in_progress)
173  send_cleanup_vector(cfg);
174 
175  return irq;
176 }
177 
178 /*
179  * Disable the specified MMR located on the specified blade so that MSIs are
180  * longer allowed to be sent.
181  */
182 static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
183 {
184  unsigned long mmr_value;
186 
187  BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
188  sizeof(unsigned long));
189 
190  mmr_value = 0;
191  entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
192  entry->mask = 1;
193 
194  uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
195 }
196 
197 static int
198 uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
199  bool force)
200 {
201  struct irq_cfg *cfg = data->chip_data;
202  unsigned int dest;
203  unsigned long mmr_value, mmr_offset;
205  int mmr_pnode;
206 
207  if (__ioapic_set_affinity(data, mask, &dest))
208  return -1;
209 
210  mmr_value = 0;
211  entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
212 
213  entry->vector = cfg->vector;
215  entry->dest_mode = apic->irq_dest_mode;
216  entry->polarity = 0;
217  entry->trigger = 0;
218  entry->mask = 0;
219  entry->dest = dest;
220 
221  /* Get previously stored MMR and pnode of hub sourcing interrupts */
222  if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode))
223  return -1;
224 
225  uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
226 
227  if (cfg->move_in_progress)
228  send_cleanup_vector(cfg);
229 
230  return IRQ_SET_MASK_OK_NOCOPY;
231 }
232 
233 /*
234  * Set up a mapping of an available irq and vector, and enable the specified
235  * MMR that defines the MSI that is to be sent to the specified CPU when an
236  * interrupt is raised.
237  */
238 int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
239  unsigned long mmr_offset, int limit)
240 {
241  int irq, ret;
242 
243  irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
244 
245  if (irq <= 0)
246  return -EBUSY;
247 
248  ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
249  limit);
250  if (ret == irq)
251  uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
252  else
253  destroy_irq(irq);
254 
255  return ret;
256 }
258 
259 /*
260  * Tear down a mapping of an irq and vector, and disable the specified MMR that
261  * defined the MSI that was to be sent to the specified CPU when an interrupt
262  * was raised.
263  *
264  * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
265  */
266 void uv_teardown_irq(unsigned int irq)
267 {
268  struct uv_irq_2_mmr_pnode *e;
269  struct rb_node *n;
270  unsigned long irqflags;
271 
272  spin_lock_irqsave(&uv_irq_lock, irqflags);
273  n = uv_irq_root.rb_node;
274  while (n) {
275  e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
276  if (e->irq == irq) {
277  arch_disable_uv_irq(e->pnode, e->offset);
278  rb_erase(n, &uv_irq_root);
279  kfree(e);
280  break;
281  }
282  if (irq < e->irq)
283  n = n->rb_left;
284  else
285  n = n->rb_right;
286  }
287  spin_unlock_irqrestore(&uv_irq_lock, irqflags);
288  destroy_irq(irq);
289 }