perf: Fix unexported generic perf_arch_fetch_caller_regs
[linux-2.6/cjktty.git] / arch / x86 / kernel / uv_irq.c
blobece73d8e32409feffaba8fde7d8b8e042c6771e7
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * SGI UV IRQ functions
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9 */
11 #include <linux/module.h>
12 #include <linux/rbtree.h>
13 #include <linux/irq.h>
15 #include <asm/apic.h>
16 #include <asm/uv/uv_irq.h>
17 #include <asm/uv/uv_hub.h>
19 /* MMR offset and pnode of hub sourcing interrupts for a given irq */
20 struct uv_irq_2_mmr_pnode{
21 struct rb_node list;
22 unsigned long offset;
23 int pnode;
24 int irq;
27 static spinlock_t uv_irq_lock;
28 static struct rb_root uv_irq_root;
30 static int uv_set_irq_affinity(unsigned int, const struct cpumask *);
32 static void uv_noop(unsigned int irq)
36 static unsigned int uv_noop_ret(unsigned int irq)
38 return 0;
41 static void uv_ack_apic(unsigned int irq)
43 ack_APIC_irq();
46 struct irq_chip uv_irq_chip = {
47 .name = "UV-CORE",
48 .startup = uv_noop_ret,
49 .shutdown = uv_noop,
50 .enable = uv_noop,
51 .disable = uv_noop,
52 .ack = uv_noop,
53 .mask = uv_noop,
54 .unmask = uv_noop,
55 .eoi = uv_ack_apic,
56 .end = uv_noop,
57 .set_affinity = uv_set_irq_affinity,
61 * Add offset and pnode information of the hub sourcing interrupts to the
62 * rb tree for a specific irq.
64 static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
66 struct rb_node **link = &uv_irq_root.rb_node;
67 struct rb_node *parent = NULL;
68 struct uv_irq_2_mmr_pnode *n;
69 struct uv_irq_2_mmr_pnode *e;
70 unsigned long irqflags;
72 n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
73 uv_blade_to_memory_nid(blade));
74 if (!n)
75 return -ENOMEM;
77 n->irq = irq;
78 n->offset = offset;
79 n->pnode = uv_blade_to_pnode(blade);
80 spin_lock_irqsave(&uv_irq_lock, irqflags);
81 /* Find the right place in the rbtree: */
82 while (*link) {
83 parent = *link;
84 e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
86 if (unlikely(irq == e->irq)) {
87 /* irq entry exists */
88 e->pnode = uv_blade_to_pnode(blade);
89 e->offset = offset;
90 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
91 kfree(n);
92 return 0;
95 if (irq < e->irq)
96 link = &(*link)->rb_left;
97 else
98 link = &(*link)->rb_right;
101 /* Insert the node into the rbtree. */
102 rb_link_node(&n->list, parent, link);
103 rb_insert_color(&n->list, &uv_irq_root);
105 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
106 return 0;
109 /* Retrieve offset and pnode information from the rb tree for a specific irq */
110 int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
112 struct uv_irq_2_mmr_pnode *e;
113 struct rb_node *n;
114 unsigned long irqflags;
116 spin_lock_irqsave(&uv_irq_lock, irqflags);
117 n = uv_irq_root.rb_node;
118 while (n) {
119 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
121 if (e->irq == irq) {
122 *offset = e->offset;
123 *pnode = e->pnode;
124 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
125 return 0;
128 if (irq < e->irq)
129 n = n->rb_left;
130 else
131 n = n->rb_right;
133 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
134 return -1;
138 * Re-target the irq to the specified CPU and enable the specified MMR located
139 * on the specified blade to allow the sending of MSIs to the specified CPU.
141 static int
142 arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
143 unsigned long mmr_offset, int restrict)
145 const struct cpumask *eligible_cpu = cpumask_of(cpu);
146 struct irq_desc *desc = irq_to_desc(irq);
147 struct irq_cfg *cfg;
148 int mmr_pnode;
149 unsigned long mmr_value;
150 struct uv_IO_APIC_route_entry *entry;
151 int err;
153 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
154 sizeof(unsigned long));
156 cfg = irq_cfg(irq);
158 err = assign_irq_vector(irq, cfg, eligible_cpu);
159 if (err != 0)
160 return err;
162 if (restrict == UV_AFFINITY_CPU)
163 desc->status |= IRQ_NO_BALANCING;
164 else
165 desc->status |= IRQ_MOVE_PCNTXT;
167 set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
168 irq_name);
170 mmr_value = 0;
171 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
172 entry->vector = cfg->vector;
173 entry->delivery_mode = apic->irq_delivery_mode;
174 entry->dest_mode = apic->irq_dest_mode;
175 entry->polarity = 0;
176 entry->trigger = 0;
177 entry->mask = 0;
178 entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
180 mmr_pnode = uv_blade_to_pnode(mmr_blade);
181 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
183 if (cfg->move_in_progress)
184 send_cleanup_vector(cfg);
186 return irq;
190 * Disable the specified MMR located on the specified blade so that MSIs are
191 * longer allowed to be sent.
193 static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
195 unsigned long mmr_value;
196 struct uv_IO_APIC_route_entry *entry;
198 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
199 sizeof(unsigned long));
201 mmr_value = 0;
202 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
203 entry->mask = 1;
205 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
208 static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
210 struct irq_desc *desc = irq_to_desc(irq);
211 struct irq_cfg *cfg = desc->chip_data;
212 unsigned int dest;
213 unsigned long mmr_value;
214 struct uv_IO_APIC_route_entry *entry;
215 unsigned long mmr_offset;
216 unsigned mmr_pnode;
218 if (set_desc_affinity(desc, mask, &dest))
219 return -1;
221 mmr_value = 0;
222 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
224 entry->vector = cfg->vector;
225 entry->delivery_mode = apic->irq_delivery_mode;
226 entry->dest_mode = apic->irq_dest_mode;
227 entry->polarity = 0;
228 entry->trigger = 0;
229 entry->mask = 0;
230 entry->dest = dest;
232 /* Get previously stored MMR and pnode of hub sourcing interrupts */
233 if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode))
234 return -1;
236 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
238 if (cfg->move_in_progress)
239 send_cleanup_vector(cfg);
241 return 0;
245 * Set up a mapping of an available irq and vector, and enable the specified
246 * MMR that defines the MSI that is to be sent to the specified CPU when an
247 * interrupt is raised.
249 int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
250 unsigned long mmr_offset, int restrict)
252 int irq, ret;
254 irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
256 if (irq <= 0)
257 return -EBUSY;
259 ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
260 restrict);
261 if (ret == irq)
262 uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
263 else
264 destroy_irq(irq);
266 return ret;
268 EXPORT_SYMBOL_GPL(uv_setup_irq);
271 * Tear down a mapping of an irq and vector, and disable the specified MMR that
272 * defined the MSI that was to be sent to the specified CPU when an interrupt
273 * was raised.
275 * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
277 void uv_teardown_irq(unsigned int irq)
279 struct uv_irq_2_mmr_pnode *e;
280 struct rb_node *n;
281 unsigned long irqflags;
283 spin_lock_irqsave(&uv_irq_lock, irqflags);
284 n = uv_irq_root.rb_node;
285 while (n) {
286 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
287 if (e->irq == irq) {
288 arch_disable_uv_irq(e->pnode, e->offset);
289 rb_erase(n, &uv_irq_root);
290 kfree(e);
291 break;
293 if (irq < e->irq)
294 n = n->rb_left;
295 else
296 n = n->rb_right;
298 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
299 destroy_irq(irq);
301 EXPORT_SYMBOL_GPL(uv_teardown_irq);