added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / arch / ia64 / kernel / msi_ia64.c
blobdcb6b7c51ea7eb8dc3c8514bf24ef38ee18a6fe2
1 /*
2 * MSI hooks for standard x86 apic
3 */
5 #include <linux/pci.h>
6 #include <linux/irq.h>
7 #include <linux/msi.h>
8 #include <linux/dmar.h>
9 #include <asm/smp.h>
12 * Shifts for APIC-based data
15 #define MSI_DATA_VECTOR_SHIFT 0
16 #define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
17 #define MSI_DATA_VECTOR_MASK 0xffffff00
19 #define MSI_DATA_DELIVERY_SHIFT 8
20 #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
21 #define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT)
23 #define MSI_DATA_LEVEL_SHIFT 14
24 #define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
25 #define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
27 #define MSI_DATA_TRIGGER_SHIFT 15
28 #define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
29 #define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
32 * Shift/mask fields for APIC-based bus address
35 #define MSI_TARGET_CPU_SHIFT 4
36 #define MSI_ADDR_HEADER 0xfee00000
38 #define MSI_ADDR_DESTID_MASK 0xfff0000f
39 #define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT)
41 #define MSI_ADDR_DESTMODE_SHIFT 2
42 #define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT)
43 #define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
45 #define MSI_ADDR_REDIRECTION_SHIFT 3
46 #define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
47 #define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
49 static struct irq_chip ia64_msi_chip;
51 #ifdef CONFIG_SMP
52 static void ia64_set_msi_irq_affinity(unsigned int irq,
53 const cpumask_t *cpu_mask)
55 struct msi_msg msg;
56 u32 addr, data;
57 int cpu = first_cpu(*cpu_mask);
59 if (!cpu_online(cpu))
60 return;
62 if (irq_prepare_move(irq, cpu))
63 return;
65 read_msi_msg(irq, &msg);
67 addr = msg.address_lo;
68 addr &= MSI_ADDR_DESTID_MASK;
69 addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
70 msg.address_lo = addr;
72 data = msg.data;
73 data &= MSI_DATA_VECTOR_MASK;
74 data |= MSI_DATA_VECTOR(irq_to_vector(irq));
75 msg.data = data;
77 write_msi_msg(irq, &msg);
78 cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
80 #endif /* CONFIG_SMP */
82 int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
84 struct msi_msg msg;
85 unsigned long dest_phys_id;
86 int irq, vector;
87 cpumask_t mask;
89 irq = create_irq();
90 if (irq < 0)
91 return irq;
93 set_irq_msi(irq, desc);
94 cpus_and(mask, irq_to_domain(irq), cpu_online_map);
95 dest_phys_id = cpu_physical_id(first_cpu(mask));
96 vector = irq_to_vector(irq);
98 msg.address_hi = 0;
99 msg.address_lo =
100 MSI_ADDR_HEADER |
101 MSI_ADDR_DESTMODE_PHYS |
102 MSI_ADDR_REDIRECTION_CPU |
103 MSI_ADDR_DESTID_CPU(dest_phys_id);
105 msg.data =
106 MSI_DATA_TRIGGER_EDGE |
107 MSI_DATA_LEVEL_ASSERT |
108 MSI_DATA_DELIVERY_FIXED |
109 MSI_DATA_VECTOR(vector);
111 write_msi_msg(irq, &msg);
112 set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
114 return 0;
117 void ia64_teardown_msi_irq(unsigned int irq)
119 destroy_irq(irq);
122 static void ia64_ack_msi_irq(unsigned int irq)
124 irq_complete_move(irq);
125 move_native_irq(irq);
126 ia64_eoi();
129 static int ia64_msi_retrigger_irq(unsigned int irq)
131 unsigned int vector = irq_to_vector(irq);
132 ia64_resend_irq(vector);
134 return 1;
138 * Generic ops used on most IA64 platforms.
140 static struct irq_chip ia64_msi_chip = {
141 .name = "PCI-MSI",
142 .mask = mask_msi_irq,
143 .unmask = unmask_msi_irq,
144 .ack = ia64_ack_msi_irq,
145 #ifdef CONFIG_SMP
146 .set_affinity = ia64_set_msi_irq_affinity,
147 #endif
148 .retrigger = ia64_msi_retrigger_irq,
152 int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
154 if (platform_setup_msi_irq)
155 return platform_setup_msi_irq(pdev, desc);
157 return ia64_setup_msi_irq(pdev, desc);
160 void arch_teardown_msi_irq(unsigned int irq)
162 if (platform_teardown_msi_irq)
163 return platform_teardown_msi_irq(irq);
165 return ia64_teardown_msi_irq(irq);
168 #ifdef CONFIG_DMAR
169 #ifdef CONFIG_SMP
170 static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
172 struct irq_cfg *cfg = irq_cfg + irq;
173 struct msi_msg msg;
174 int cpu = cpumask_first(mask);
176 if (!cpu_online(cpu))
177 return;
179 if (irq_prepare_move(irq, cpu))
180 return;
182 dmar_msi_read(irq, &msg);
184 msg.data &= ~MSI_DATA_VECTOR_MASK;
185 msg.data |= MSI_DATA_VECTOR(cfg->vector);
186 msg.address_lo &= ~MSI_ADDR_DESTID_MASK;
187 msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
189 dmar_msi_write(irq, &msg);
190 cpumask_copy(irq_desc[irq].affinity, mask);
192 #endif /* CONFIG_SMP */
194 struct irq_chip dmar_msi_type = {
195 .name = "DMAR_MSI",
196 .unmask = dmar_msi_unmask,
197 .mask = dmar_msi_mask,
198 .ack = ia64_ack_msi_irq,
199 #ifdef CONFIG_SMP
200 .set_affinity = dmar_msi_set_affinity,
201 #endif
202 .retrigger = ia64_msi_retrigger_irq,
205 static int
206 msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
208 struct irq_cfg *cfg = irq_cfg + irq;
209 unsigned dest;
210 cpumask_t mask;
212 cpus_and(mask, irq_to_domain(irq), cpu_online_map);
213 dest = cpu_physical_id(first_cpu(mask));
215 msg->address_hi = 0;
216 msg->address_lo =
217 MSI_ADDR_HEADER |
218 MSI_ADDR_DESTMODE_PHYS |
219 MSI_ADDR_REDIRECTION_CPU |
220 MSI_ADDR_DESTID_CPU(dest);
222 msg->data =
223 MSI_DATA_TRIGGER_EDGE |
224 MSI_DATA_LEVEL_ASSERT |
225 MSI_DATA_DELIVERY_FIXED |
226 MSI_DATA_VECTOR(cfg->vector);
227 return 0;
230 int arch_setup_dmar_msi(unsigned int irq)
232 int ret;
233 struct msi_msg msg;
235 ret = msi_compose_msg(NULL, irq, &msg);
236 if (ret < 0)
237 return ret;
238 dmar_msi_write(irq, &msg);
239 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
240 "edge");
241 return 0;
243 #endif /* CONFIG_DMAR */