Remove unused USES_X509_AUTH macro from VNC sasl code
[qemu/ar7.git] / hw / xics.c
blob13a1d259445cf9b4478fec119eb171e0dadd3724
1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
28 #include "hw.h"
29 #include "hw/spapr.h"
30 #include "hw/xics.h"
32 #include <pthread.h>
35 * ICP: Presentation layer
38 struct icp_server_state {
39 uint32_t xirr;
40 uint8_t pending_priority;
41 uint8_t mfrr;
42 qemu_irq output;
45 #define XISR_MASK 0x00ffffff
46 #define CPPR_MASK 0xff000000
48 #define XISR(ss) (((ss)->xirr) & XISR_MASK)
49 #define CPPR(ss) (((ss)->xirr) >> 24)
51 struct ics_state;
53 struct icp_state {
54 long nr_servers;
55 struct icp_server_state *ss;
56 struct ics_state *ics;
59 static void ics_reject(struct ics_state *ics, int nr);
60 static void ics_resend(struct ics_state *ics);
61 static void ics_eoi(struct ics_state *ics, int nr);
63 static void icp_check_ipi(struct icp_state *icp, int server)
65 struct icp_server_state *ss = icp->ss + server;
67 if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
68 return;
71 if (XISR(ss)) {
72 ics_reject(icp->ics, XISR(ss));
75 ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
76 ss->pending_priority = ss->mfrr;
77 qemu_irq_raise(ss->output);
80 static void icp_resend(struct icp_state *icp, int server)
82 struct icp_server_state *ss = icp->ss + server;
84 if (ss->mfrr < CPPR(ss)) {
85 icp_check_ipi(icp, server);
87 ics_resend(icp->ics);
90 static void icp_set_cppr(struct icp_state *icp, int server, uint8_t cppr)
92 struct icp_server_state *ss = icp->ss + server;
93 uint8_t old_cppr;
94 uint32_t old_xisr;
96 old_cppr = CPPR(ss);
97 ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24);
99 if (cppr < old_cppr) {
100 if (XISR(ss) && (cppr <= ss->pending_priority)) {
101 old_xisr = XISR(ss);
102 ss->xirr &= ~XISR_MASK; /* Clear XISR */
103 qemu_irq_lower(ss->output);
104 ics_reject(icp->ics, old_xisr);
106 } else {
107 if (!XISR(ss)) {
108 icp_resend(icp, server);
113 static void icp_set_mfrr(struct icp_state *icp, int nr, uint8_t mfrr)
115 struct icp_server_state *ss = icp->ss + nr;
117 ss->mfrr = mfrr;
118 if (mfrr < CPPR(ss)) {
119 icp_check_ipi(icp, nr);
123 static uint32_t icp_accept(struct icp_server_state *ss)
125 uint32_t xirr;
127 qemu_irq_lower(ss->output);
128 xirr = ss->xirr;
129 ss->xirr = ss->pending_priority << 24;
130 return xirr;
133 static void icp_eoi(struct icp_state *icp, int server, uint32_t xirr)
135 struct icp_server_state *ss = icp->ss + server;
137 ics_eoi(icp->ics, xirr & XISR_MASK);
138 /* Send EOI -> ICS */
139 ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
140 if (!XISR(ss)) {
141 icp_resend(icp, server);
145 static void icp_irq(struct icp_state *icp, int server, int nr, uint8_t priority)
147 struct icp_server_state *ss = icp->ss + server;
149 if ((priority >= CPPR(ss))
150 || (XISR(ss) && (ss->pending_priority <= priority))) {
151 ics_reject(icp->ics, nr);
152 } else {
153 if (XISR(ss)) {
154 ics_reject(icp->ics, XISR(ss));
156 ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
157 ss->pending_priority = priority;
158 qemu_irq_raise(ss->output);
163 * ICS: Source layer
166 struct ics_irq_state {
167 int server;
168 uint8_t priority;
169 uint8_t saved_priority;
170 /* int pending:1; */
171 /* int presented:1; */
172 int rejected:1;
173 int masked_pending:1;
176 struct ics_state {
177 int nr_irqs;
178 int offset;
179 qemu_irq *qirqs;
180 struct ics_irq_state *irqs;
181 struct icp_state *icp;
184 static int ics_valid_irq(struct ics_state *ics, uint32_t nr)
186 return (nr >= ics->offset)
187 && (nr < (ics->offset + ics->nr_irqs));
190 static void ics_set_irq_msi(void *opaque, int nr, int val)
192 struct ics_state *ics = (struct ics_state *)opaque;
193 struct ics_irq_state *irq = ics->irqs + nr;
195 if (val) {
196 if (irq->priority == 0xff) {
197 irq->masked_pending = 1;
198 /* masked pending */ ;
199 } else {
200 icp_irq(ics->icp, irq->server, nr + ics->offset, irq->priority);
205 static void ics_reject_msi(struct ics_state *ics, int nr)
207 struct ics_irq_state *irq = ics->irqs + nr - ics->offset;
209 irq->rejected = 1;
212 static void ics_resend_msi(struct ics_state *ics)
214 int i;
216 for (i = 0; i < ics->nr_irqs; i++) {
217 struct ics_irq_state *irq = ics->irqs + i;
219 /* FIXME: filter by server#? */
220 if (irq->rejected) {
221 irq->rejected = 0;
222 if (irq->priority != 0xff) {
223 icp_irq(ics->icp, irq->server, i + ics->offset, irq->priority);
229 static void ics_write_xive_msi(struct ics_state *ics, int nr, int server,
230 uint8_t priority)
232 struct ics_irq_state *irq = ics->irqs + nr;
234 irq->server = server;
235 irq->priority = priority;
237 if (!irq->masked_pending || (priority == 0xff)) {
238 return;
241 irq->masked_pending = 0;
242 icp_irq(ics->icp, server, nr + ics->offset, priority);
245 static void ics_reject(struct ics_state *ics, int nr)
247 ics_reject_msi(ics, nr);
250 static void ics_resend(struct ics_state *ics)
252 ics_resend_msi(ics);
255 static void ics_eoi(struct ics_state *ics, int nr)
260 * Exported functions
263 qemu_irq xics_find_qirq(struct icp_state *icp, int irq)
265 if ((irq < icp->ics->offset)
266 || (irq >= (icp->ics->offset + icp->ics->nr_irqs))) {
267 return NULL;
270 return icp->ics->qirqs[irq - icp->ics->offset];
273 static target_ulong h_cppr(CPUState *env, sPAPREnvironment *spapr,
274 target_ulong opcode, target_ulong *args)
276 target_ulong cppr = args[0];
278 icp_set_cppr(spapr->icp, env->cpu_index, cppr);
279 return H_SUCCESS;
282 static target_ulong h_ipi(CPUState *env, sPAPREnvironment *spapr,
283 target_ulong opcode, target_ulong *args)
285 target_ulong server = args[0];
286 target_ulong mfrr = args[1];
288 if (server >= spapr->icp->nr_servers) {
289 return H_PARAMETER;
292 icp_set_mfrr(spapr->icp, server, mfrr);
293 return H_SUCCESS;
297 static target_ulong h_xirr(CPUState *env, sPAPREnvironment *spapr,
298 target_ulong opcode, target_ulong *args)
300 uint32_t xirr = icp_accept(spapr->icp->ss + env->cpu_index);
302 args[0] = xirr;
303 return H_SUCCESS;
306 static target_ulong h_eoi(CPUState *env, sPAPREnvironment *spapr,
307 target_ulong opcode, target_ulong *args)
309 target_ulong xirr = args[0];
311 icp_eoi(spapr->icp, env->cpu_index, xirr);
312 return H_SUCCESS;
315 static void rtas_set_xive(sPAPREnvironment *spapr, uint32_t token,
316 uint32_t nargs, target_ulong args,
317 uint32_t nret, target_ulong rets)
319 struct ics_state *ics = spapr->icp->ics;
320 uint32_t nr, server, priority;
322 if ((nargs != 3) || (nret != 1)) {
323 rtas_st(rets, 0, -3);
324 return;
327 nr = rtas_ld(args, 0);
328 server = rtas_ld(args, 1);
329 priority = rtas_ld(args, 2);
331 if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
332 || (priority > 0xff)) {
333 rtas_st(rets, 0, -3);
334 return;
337 ics_write_xive_msi(ics, nr - ics->offset, server, priority);
339 rtas_st(rets, 0, 0); /* Success */
342 static void rtas_get_xive(sPAPREnvironment *spapr, uint32_t token,
343 uint32_t nargs, target_ulong args,
344 uint32_t nret, target_ulong rets)
346 struct ics_state *ics = spapr->icp->ics;
347 uint32_t nr;
349 if ((nargs != 1) || (nret != 3)) {
350 rtas_st(rets, 0, -3);
351 return;
354 nr = rtas_ld(args, 0);
356 if (!ics_valid_irq(ics, nr)) {
357 rtas_st(rets, 0, -3);
358 return;
361 rtas_st(rets, 0, 0); /* Success */
362 rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
363 rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
366 static void rtas_int_off(sPAPREnvironment *spapr, uint32_t token,
367 uint32_t nargs, target_ulong args,
368 uint32_t nret, target_ulong rets)
370 struct ics_state *ics = spapr->icp->ics;
371 uint32_t nr;
373 if ((nargs != 1) || (nret != 1)) {
374 rtas_st(rets, 0, -3);
375 return;
378 nr = rtas_ld(args, 0);
380 if (!ics_valid_irq(ics, nr)) {
381 rtas_st(rets, 0, -3);
382 return;
385 /* This is a NOP for now, since the described PAPR semantics don't
386 * seem to gel with what Linux does */
387 #if 0
388 struct ics_irq_state *irq = xics->irqs + (nr - xics->offset);
390 irq->saved_priority = irq->priority;
391 ics_write_xive_msi(xics, nr - xics->offset, irq->server, 0xff);
392 #endif
394 rtas_st(rets, 0, 0); /* Success */
397 static void rtas_int_on(sPAPREnvironment *spapr, uint32_t token,
398 uint32_t nargs, target_ulong args,
399 uint32_t nret, target_ulong rets)
401 struct ics_state *ics = spapr->icp->ics;
402 uint32_t nr;
404 if ((nargs != 1) || (nret != 1)) {
405 rtas_st(rets, 0, -3);
406 return;
409 nr = rtas_ld(args, 0);
411 if (!ics_valid_irq(ics, nr)) {
412 rtas_st(rets, 0, -3);
413 return;
416 /* This is a NOP for now, since the described PAPR semantics don't
417 * seem to gel with what Linux does */
418 #if 0
419 struct ics_irq_state *irq = xics->irqs + (nr - xics->offset);
421 ics_write_xive_msi(xics, nr - xics->offset,
422 irq->server, irq->saved_priority);
423 #endif
425 rtas_st(rets, 0, 0); /* Success */
428 struct icp_state *xics_system_init(int nr_irqs)
430 CPUState *env;
431 int max_server_num;
432 int i;
433 struct icp_state *icp;
434 struct ics_state *ics;
436 max_server_num = -1;
437 for (env = first_cpu; env != NULL; env = env->next_cpu) {
438 if (env->cpu_index > max_server_num) {
439 max_server_num = env->cpu_index;
443 icp = qemu_mallocz(sizeof(*icp));
444 icp->nr_servers = max_server_num + 1;
445 icp->ss = qemu_mallocz(icp->nr_servers*sizeof(struct icp_server_state));
447 for (i = 0; i < icp->nr_servers; i++) {
448 icp->ss[i].mfrr = 0xff;
451 for (env = first_cpu; env != NULL; env = env->next_cpu) {
452 struct icp_server_state *ss = &icp->ss[env->cpu_index];
454 switch (PPC_INPUT(env)) {
455 case PPC_FLAGS_INPUT_POWER7:
456 ss->output = env->irq_inputs[POWER7_INPUT_INT];
457 break;
459 case PPC_FLAGS_INPUT_970:
460 ss->output = env->irq_inputs[PPC970_INPUT_INT];
461 break;
463 default:
464 hw_error("XICS interrupt model does not support this CPU bus "
465 "model\n");
466 exit(1);
470 ics = qemu_mallocz(sizeof(*ics));
471 ics->nr_irqs = nr_irqs;
472 ics->offset = 16;
473 ics->irqs = qemu_mallocz(nr_irqs * sizeof(struct ics_irq_state));
475 icp->ics = ics;
476 ics->icp = icp;
478 for (i = 0; i < nr_irqs; i++) {
479 ics->irqs[i].priority = 0xff;
480 ics->irqs[i].saved_priority = 0xff;
483 ics->qirqs = qemu_allocate_irqs(ics_set_irq_msi, ics, nr_irqs);
485 spapr_register_hypercall(H_CPPR, h_cppr);
486 spapr_register_hypercall(H_IPI, h_ipi);
487 spapr_register_hypercall(H_XIRR, h_xirr);
488 spapr_register_hypercall(H_EOI, h_eoi);
490 spapr_rtas_register("ibm,set-xive", rtas_set_xive);
491 spapr_rtas_register("ibm,get-xive", rtas_get_xive);
492 spapr_rtas_register("ibm,int-off", rtas_int_off);
493 spapr_rtas_register("ibm,int-on", rtas_int_on);
495 return icp;