Make USB hid devices self-powered
[qemu/aliguori-queue.git] / hw / ppc4xx_devs.c
blob7921ebfb4a00838a2720de736c8226684c05e33d
1 /*
2 * QEMU PowerPC 4xx embedded processors shared devices emulation
4 * Copyright (c) 2007 Jocelyn Mayer
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "hw.h"
25 #include "ppc.h"
26 #include "ppc4xx.h"
27 #include "sysemu.h"
28 #include "qemu-log.h"
30 //#define DEBUG_MMIO
31 //#define DEBUG_UNASSIGNED
32 #define DEBUG_UIC
35 #ifdef DEBUG_UIC
36 # define LOG_UIC(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
37 #else
38 # define LOG_UIC(...) do { } while (0)
39 #endif
41 /*****************************************************************************/
42 /* Generic PowerPC 4xx processor instanciation */
43 CPUState *ppc4xx_init (const char *cpu_model,
44 clk_setup_t *cpu_clk, clk_setup_t *tb_clk,
45 uint32_t sysclk)
47 CPUState *env;
49 /* init CPUs */
50 env = cpu_init(cpu_model);
51 if (!env) {
52 fprintf(stderr, "Unable to find PowerPC %s CPU definition\n",
53 cpu_model);
54 exit(1);
56 cpu_clk->cb = NULL; /* We don't care about CPU clock frequency changes */
57 cpu_clk->opaque = env;
58 /* Set time-base frequency to sysclk */
59 tb_clk->cb = ppc_emb_timers_init(env, sysclk);
60 tb_clk->opaque = env;
61 ppc_dcr_init(env, NULL, NULL);
62 /* Register qemu callbacks */
63 qemu_register_reset((QEMUResetHandler*)&cpu_reset, env);
65 return env;
68 /*****************************************************************************/
69 /* "Universal" Interrupt controller */
70 enum {
71 DCR_UICSR = 0x000,
72 DCR_UICSRS = 0x001,
73 DCR_UICER = 0x002,
74 DCR_UICCR = 0x003,
75 DCR_UICPR = 0x004,
76 DCR_UICTR = 0x005,
77 DCR_UICMSR = 0x006,
78 DCR_UICVR = 0x007,
79 DCR_UICVCR = 0x008,
80 DCR_UICMAX = 0x009,
83 #define UIC_MAX_IRQ 32
84 typedef struct ppcuic_t ppcuic_t;
85 struct ppcuic_t {
86 uint32_t dcr_base;
87 int use_vectors;
88 uint32_t level; /* Remembers the state of level-triggered interrupts. */
89 uint32_t uicsr; /* Status register */
90 uint32_t uicer; /* Enable register */
91 uint32_t uiccr; /* Critical register */
92 uint32_t uicpr; /* Polarity register */
93 uint32_t uictr; /* Triggering register */
94 uint32_t uicvcr; /* Vector configuration register */
95 uint32_t uicvr;
96 qemu_irq *irqs;
99 static void ppcuic_trigger_irq (ppcuic_t *uic)
101 uint32_t ir, cr;
102 int start, end, inc, i;
104 /* Trigger interrupt if any is pending */
105 ir = uic->uicsr & uic->uicer & (~uic->uiccr);
106 cr = uic->uicsr & uic->uicer & uic->uiccr;
107 LOG_UIC("%s: uicsr %08" PRIx32 " uicer %08" PRIx32
108 " uiccr %08" PRIx32 "\n"
109 " %08" PRIx32 " ir %08" PRIx32 " cr %08" PRIx32 "\n",
110 __func__, uic->uicsr, uic->uicer, uic->uiccr,
111 uic->uicsr & uic->uicer, ir, cr);
112 if (ir != 0x0000000) {
113 LOG_UIC("Raise UIC interrupt\n");
114 qemu_irq_raise(uic->irqs[PPCUIC_OUTPUT_INT]);
115 } else {
116 LOG_UIC("Lower UIC interrupt\n");
117 qemu_irq_lower(uic->irqs[PPCUIC_OUTPUT_INT]);
119 /* Trigger critical interrupt if any is pending and update vector */
120 if (cr != 0x0000000) {
121 qemu_irq_raise(uic->irqs[PPCUIC_OUTPUT_CINT]);
122 if (uic->use_vectors) {
123 /* Compute critical IRQ vector */
124 if (uic->uicvcr & 1) {
125 start = 31;
126 end = 0;
127 inc = -1;
128 } else {
129 start = 0;
130 end = 31;
131 inc = 1;
133 uic->uicvr = uic->uicvcr & 0xFFFFFFFC;
134 for (i = start; i <= end; i += inc) {
135 if (cr & (1 << i)) {
136 uic->uicvr += (i - start) * 512 * inc;
137 break;
141 LOG_UIC("Raise UIC critical interrupt - "
142 "vector %08" PRIx32 "\n", uic->uicvr);
143 } else {
144 LOG_UIC("Lower UIC critical interrupt\n");
145 qemu_irq_lower(uic->irqs[PPCUIC_OUTPUT_CINT]);
146 uic->uicvr = 0x00000000;
150 static void ppcuic_set_irq (void *opaque, int irq_num, int level)
152 ppcuic_t *uic;
153 uint32_t mask, sr;
155 uic = opaque;
156 mask = 1 << (31-irq_num);
157 LOG_UIC("%s: irq %d level %d uicsr %08" PRIx32
158 " mask %08" PRIx32 " => %08" PRIx32 " %08" PRIx32 "\n",
159 __func__, irq_num, level,
160 uic->uicsr, mask, uic->uicsr & mask, level << irq_num);
161 if (irq_num < 0 || irq_num > 31)
162 return;
163 sr = uic->uicsr;
165 /* Update status register */
166 if (uic->uictr & mask) {
167 /* Edge sensitive interrupt */
168 if (level == 1)
169 uic->uicsr |= mask;
170 } else {
171 /* Level sensitive interrupt */
172 if (level == 1) {
173 uic->uicsr |= mask;
174 uic->level |= mask;
175 } else {
176 uic->uicsr &= ~mask;
177 uic->level &= ~mask;
180 LOG_UIC("%s: irq %d level %d sr %" PRIx32 " => "
181 "%08" PRIx32 "\n", __func__, irq_num, level, uic->uicsr, sr);
182 if (sr != uic->uicsr)
183 ppcuic_trigger_irq(uic);
186 static target_ulong dcr_read_uic (void *opaque, int dcrn)
188 ppcuic_t *uic;
189 target_ulong ret;
191 uic = opaque;
192 dcrn -= uic->dcr_base;
193 switch (dcrn) {
194 case DCR_UICSR:
195 case DCR_UICSRS:
196 ret = uic->uicsr;
197 break;
198 case DCR_UICER:
199 ret = uic->uicer;
200 break;
201 case DCR_UICCR:
202 ret = uic->uiccr;
203 break;
204 case DCR_UICPR:
205 ret = uic->uicpr;
206 break;
207 case DCR_UICTR:
208 ret = uic->uictr;
209 break;
210 case DCR_UICMSR:
211 ret = uic->uicsr & uic->uicer;
212 break;
213 case DCR_UICVR:
214 if (!uic->use_vectors)
215 goto no_read;
216 ret = uic->uicvr;
217 break;
218 case DCR_UICVCR:
219 if (!uic->use_vectors)
220 goto no_read;
221 ret = uic->uicvcr;
222 break;
223 default:
224 no_read:
225 ret = 0x00000000;
226 break;
229 return ret;
232 static void dcr_write_uic (void *opaque, int dcrn, target_ulong val)
234 ppcuic_t *uic;
236 uic = opaque;
237 dcrn -= uic->dcr_base;
238 LOG_UIC("%s: dcr %d val " TARGET_FMT_lx "\n", __func__, dcrn, val);
239 switch (dcrn) {
240 case DCR_UICSR:
241 uic->uicsr &= ~val;
242 uic->uicsr |= uic->level;
243 ppcuic_trigger_irq(uic);
244 break;
245 case DCR_UICSRS:
246 uic->uicsr |= val;
247 ppcuic_trigger_irq(uic);
248 break;
249 case DCR_UICER:
250 uic->uicer = val;
251 ppcuic_trigger_irq(uic);
252 break;
253 case DCR_UICCR:
254 uic->uiccr = val;
255 ppcuic_trigger_irq(uic);
256 break;
257 case DCR_UICPR:
258 uic->uicpr = val;
259 break;
260 case DCR_UICTR:
261 uic->uictr = val;
262 ppcuic_trigger_irq(uic);
263 break;
264 case DCR_UICMSR:
265 break;
266 case DCR_UICVR:
267 break;
268 case DCR_UICVCR:
269 uic->uicvcr = val & 0xFFFFFFFD;
270 ppcuic_trigger_irq(uic);
271 break;
275 static void ppcuic_reset (void *opaque)
277 ppcuic_t *uic;
279 uic = opaque;
280 uic->uiccr = 0x00000000;
281 uic->uicer = 0x00000000;
282 uic->uicpr = 0x00000000;
283 uic->uicsr = 0x00000000;
284 uic->uictr = 0x00000000;
285 if (uic->use_vectors) {
286 uic->uicvcr = 0x00000000;
287 uic->uicvr = 0x0000000;
291 qemu_irq *ppcuic_init (CPUState *env, qemu_irq *irqs,
292 uint32_t dcr_base, int has_ssr, int has_vr)
294 ppcuic_t *uic;
295 int i;
297 uic = qemu_mallocz(sizeof(ppcuic_t));
298 uic->dcr_base = dcr_base;
299 uic->irqs = irqs;
300 if (has_vr)
301 uic->use_vectors = 1;
302 for (i = 0; i < DCR_UICMAX; i++) {
303 ppc_dcr_register(env, dcr_base + i, uic,
304 &dcr_read_uic, &dcr_write_uic);
306 qemu_register_reset(ppcuic_reset, uic);
308 return qemu_allocate_irqs(&ppcuic_set_irq, uic, UIC_MAX_IRQ);
311 /*****************************************************************************/
312 /* SDRAM controller */
313 typedef struct ppc4xx_sdram_t ppc4xx_sdram_t;
314 struct ppc4xx_sdram_t {
315 uint32_t addr;
316 int nbanks;
317 target_phys_addr_t ram_bases[4];
318 target_phys_addr_t ram_sizes[4];
319 uint32_t besr0;
320 uint32_t besr1;
321 uint32_t bear;
322 uint32_t cfg;
323 uint32_t status;
324 uint32_t rtr;
325 uint32_t pmit;
326 uint32_t bcr[4];
327 uint32_t tr;
328 uint32_t ecccfg;
329 uint32_t eccesr;
330 qemu_irq irq;
333 enum {
334 SDRAM0_CFGADDR = 0x010,
335 SDRAM0_CFGDATA = 0x011,
338 /* XXX: TOFIX: some patches have made this code become inconsistent:
339 * there are type inconsistencies, mixing target_phys_addr_t, target_ulong
340 * and uint32_t
342 static uint32_t sdram_bcr (target_phys_addr_t ram_base,
343 target_phys_addr_t ram_size)
345 uint32_t bcr;
347 switch (ram_size) {
348 case (4 * 1024 * 1024):
349 bcr = 0x00000000;
350 break;
351 case (8 * 1024 * 1024):
352 bcr = 0x00020000;
353 break;
354 case (16 * 1024 * 1024):
355 bcr = 0x00040000;
356 break;
357 case (32 * 1024 * 1024):
358 bcr = 0x00060000;
359 break;
360 case (64 * 1024 * 1024):
361 bcr = 0x00080000;
362 break;
363 case (128 * 1024 * 1024):
364 bcr = 0x000A0000;
365 break;
366 case (256 * 1024 * 1024):
367 bcr = 0x000C0000;
368 break;
369 default:
370 printf("%s: invalid RAM size " TARGET_FMT_plx "\n", __func__,
371 ram_size);
372 return 0x00000000;
374 bcr |= ram_base & 0xFF800000;
375 bcr |= 1;
377 return bcr;
380 static inline target_phys_addr_t sdram_base(uint32_t bcr)
382 return bcr & 0xFF800000;
385 static target_ulong sdram_size (uint32_t bcr)
387 target_ulong size;
388 int sh;
390 sh = (bcr >> 17) & 0x7;
391 if (sh == 7)
392 size = -1;
393 else
394 size = (4 * 1024 * 1024) << sh;
396 return size;
399 static void sdram_set_bcr (uint32_t *bcrp, uint32_t bcr, int enabled)
401 if (*bcrp & 0x00000001) {
402 /* Unmap RAM */
403 #ifdef DEBUG_SDRAM
404 printf("%s: unmap RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n",
405 __func__, sdram_base(*bcrp), sdram_size(*bcrp));
406 #endif
407 cpu_register_physical_memory(sdram_base(*bcrp), sdram_size(*bcrp),
408 IO_MEM_UNASSIGNED);
410 *bcrp = bcr & 0xFFDEE001;
411 if (enabled && (bcr & 0x00000001)) {
412 #ifdef DEBUG_SDRAM
413 printf("%s: Map RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n",
414 __func__, sdram_base(bcr), sdram_size(bcr));
415 #endif
416 cpu_register_physical_memory(sdram_base(bcr), sdram_size(bcr),
417 sdram_base(bcr) | IO_MEM_RAM);
421 static void sdram_map_bcr (ppc4xx_sdram_t *sdram)
423 int i;
425 for (i = 0; i < sdram->nbanks; i++) {
426 if (sdram->ram_sizes[i] != 0) {
427 sdram_set_bcr(&sdram->bcr[i],
428 sdram_bcr(sdram->ram_bases[i], sdram->ram_sizes[i]),
430 } else {
431 sdram_set_bcr(&sdram->bcr[i], 0x00000000, 0);
436 static void sdram_unmap_bcr (ppc4xx_sdram_t *sdram)
438 int i;
440 for (i = 0; i < sdram->nbanks; i++) {
441 #ifdef DEBUG_SDRAM
442 printf("%s: Unmap RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n",
443 __func__, sdram_base(sdram->bcr[i]), sdram_size(sdram->bcr[i]));
444 #endif
445 cpu_register_physical_memory(sdram_base(sdram->bcr[i]),
446 sdram_size(sdram->bcr[i]),
447 IO_MEM_UNASSIGNED);
451 static target_ulong dcr_read_sdram (void *opaque, int dcrn)
453 ppc4xx_sdram_t *sdram;
454 target_ulong ret;
456 sdram = opaque;
457 switch (dcrn) {
458 case SDRAM0_CFGADDR:
459 ret = sdram->addr;
460 break;
461 case SDRAM0_CFGDATA:
462 switch (sdram->addr) {
463 case 0x00: /* SDRAM_BESR0 */
464 ret = sdram->besr0;
465 break;
466 case 0x08: /* SDRAM_BESR1 */
467 ret = sdram->besr1;
468 break;
469 case 0x10: /* SDRAM_BEAR */
470 ret = sdram->bear;
471 break;
472 case 0x20: /* SDRAM_CFG */
473 ret = sdram->cfg;
474 break;
475 case 0x24: /* SDRAM_STATUS */
476 ret = sdram->status;
477 break;
478 case 0x30: /* SDRAM_RTR */
479 ret = sdram->rtr;
480 break;
481 case 0x34: /* SDRAM_PMIT */
482 ret = sdram->pmit;
483 break;
484 case 0x40: /* SDRAM_B0CR */
485 ret = sdram->bcr[0];
486 break;
487 case 0x44: /* SDRAM_B1CR */
488 ret = sdram->bcr[1];
489 break;
490 case 0x48: /* SDRAM_B2CR */
491 ret = sdram->bcr[2];
492 break;
493 case 0x4C: /* SDRAM_B3CR */
494 ret = sdram->bcr[3];
495 break;
496 case 0x80: /* SDRAM_TR */
497 ret = -1; /* ? */
498 break;
499 case 0x94: /* SDRAM_ECCCFG */
500 ret = sdram->ecccfg;
501 break;
502 case 0x98: /* SDRAM_ECCESR */
503 ret = sdram->eccesr;
504 break;
505 default: /* Error */
506 ret = -1;
507 break;
509 break;
510 default:
511 /* Avoid gcc warning */
512 ret = 0x00000000;
513 break;
516 return ret;
519 static void dcr_write_sdram (void *opaque, int dcrn, target_ulong val)
521 ppc4xx_sdram_t *sdram;
523 sdram = opaque;
524 switch (dcrn) {
525 case SDRAM0_CFGADDR:
526 sdram->addr = val;
527 break;
528 case SDRAM0_CFGDATA:
529 switch (sdram->addr) {
530 case 0x00: /* SDRAM_BESR0 */
531 sdram->besr0 &= ~val;
532 break;
533 case 0x08: /* SDRAM_BESR1 */
534 sdram->besr1 &= ~val;
535 break;
536 case 0x10: /* SDRAM_BEAR */
537 sdram->bear = val;
538 break;
539 case 0x20: /* SDRAM_CFG */
540 val &= 0xFFE00000;
541 if (!(sdram->cfg & 0x80000000) && (val & 0x80000000)) {
542 #ifdef DEBUG_SDRAM
543 printf("%s: enable SDRAM controller\n", __func__);
544 #endif
545 /* validate all RAM mappings */
546 sdram_map_bcr(sdram);
547 sdram->status &= ~0x80000000;
548 } else if ((sdram->cfg & 0x80000000) && !(val & 0x80000000)) {
549 #ifdef DEBUG_SDRAM
550 printf("%s: disable SDRAM controller\n", __func__);
551 #endif
552 /* invalidate all RAM mappings */
553 sdram_unmap_bcr(sdram);
554 sdram->status |= 0x80000000;
556 if (!(sdram->cfg & 0x40000000) && (val & 0x40000000))
557 sdram->status |= 0x40000000;
558 else if ((sdram->cfg & 0x40000000) && !(val & 0x40000000))
559 sdram->status &= ~0x40000000;
560 sdram->cfg = val;
561 break;
562 case 0x24: /* SDRAM_STATUS */
563 /* Read-only register */
564 break;
565 case 0x30: /* SDRAM_RTR */
566 sdram->rtr = val & 0x3FF80000;
567 break;
568 case 0x34: /* SDRAM_PMIT */
569 sdram->pmit = (val & 0xF8000000) | 0x07C00000;
570 break;
571 case 0x40: /* SDRAM_B0CR */
572 sdram_set_bcr(&sdram->bcr[0], val, sdram->cfg & 0x80000000);
573 break;
574 case 0x44: /* SDRAM_B1CR */
575 sdram_set_bcr(&sdram->bcr[1], val, sdram->cfg & 0x80000000);
576 break;
577 case 0x48: /* SDRAM_B2CR */
578 sdram_set_bcr(&sdram->bcr[2], val, sdram->cfg & 0x80000000);
579 break;
580 case 0x4C: /* SDRAM_B3CR */
581 sdram_set_bcr(&sdram->bcr[3], val, sdram->cfg & 0x80000000);
582 break;
583 case 0x80: /* SDRAM_TR */
584 sdram->tr = val & 0x018FC01F;
585 break;
586 case 0x94: /* SDRAM_ECCCFG */
587 sdram->ecccfg = val & 0x00F00000;
588 break;
589 case 0x98: /* SDRAM_ECCESR */
590 val &= 0xFFF0F000;
591 if (sdram->eccesr == 0 && val != 0)
592 qemu_irq_raise(sdram->irq);
593 else if (sdram->eccesr != 0 && val == 0)
594 qemu_irq_lower(sdram->irq);
595 sdram->eccesr = val;
596 break;
597 default: /* Error */
598 break;
600 break;
604 static void sdram_reset (void *opaque)
606 ppc4xx_sdram_t *sdram;
608 sdram = opaque;
609 sdram->addr = 0x00000000;
610 sdram->bear = 0x00000000;
611 sdram->besr0 = 0x00000000; /* No error */
612 sdram->besr1 = 0x00000000; /* No error */
613 sdram->cfg = 0x00000000;
614 sdram->ecccfg = 0x00000000; /* No ECC */
615 sdram->eccesr = 0x00000000; /* No error */
616 sdram->pmit = 0x07C00000;
617 sdram->rtr = 0x05F00000;
618 sdram->tr = 0x00854009;
619 /* We pre-initialize RAM banks */
620 sdram->status = 0x00000000;
621 sdram->cfg = 0x00800000;
622 sdram_unmap_bcr(sdram);
625 void ppc4xx_sdram_init (CPUState *env, qemu_irq irq, int nbanks,
626 target_phys_addr_t *ram_bases,
627 target_phys_addr_t *ram_sizes,
628 int do_init)
630 ppc4xx_sdram_t *sdram;
632 sdram = qemu_mallocz(sizeof(ppc4xx_sdram_t));
633 sdram->irq = irq;
634 sdram->nbanks = nbanks;
635 memset(sdram->ram_bases, 0, 4 * sizeof(target_phys_addr_t));
636 memcpy(sdram->ram_bases, ram_bases,
637 nbanks * sizeof(target_phys_addr_t));
638 memset(sdram->ram_sizes, 0, 4 * sizeof(target_phys_addr_t));
639 memcpy(sdram->ram_sizes, ram_sizes,
640 nbanks * sizeof(target_phys_addr_t));
641 qemu_register_reset(&sdram_reset, sdram);
642 ppc_dcr_register(env, SDRAM0_CFGADDR,
643 sdram, &dcr_read_sdram, &dcr_write_sdram);
644 ppc_dcr_register(env, SDRAM0_CFGDATA,
645 sdram, &dcr_read_sdram, &dcr_write_sdram);
646 if (do_init)
647 sdram_map_bcr(sdram);
650 /* Fill in consecutive SDRAM banks with 'ram_size' bytes of memory.
652 * sdram_bank_sizes[] must be 0-terminated.
654 * The 4xx SDRAM controller supports a small number of banks, and each bank
655 * must be one of a small set of sizes. The number of banks and the supported
656 * sizes varies by SoC. */
657 ram_addr_t ppc4xx_sdram_adjust(ram_addr_t ram_size, int nr_banks,
658 target_phys_addr_t ram_bases[],
659 target_phys_addr_t ram_sizes[],
660 const unsigned int sdram_bank_sizes[])
662 ram_addr_t size_left = ram_size;
663 int i;
664 int j;
666 for (i = 0; i < nr_banks; i++) {
667 for (j = 0; sdram_bank_sizes[j] != 0; j++) {
668 unsigned int bank_size = sdram_bank_sizes[j];
670 if (bank_size <= size_left) {
671 ram_bases[i] = qemu_ram_alloc(bank_size);
672 ram_sizes[i] = bank_size;
673 size_left -= bank_size;
674 break;
678 if (!size_left) {
679 /* No need to use the remaining banks. */
680 break;
684 ram_size -= size_left;
685 if (ram_size)
686 printf("Truncating memory to %d MiB to fit SDRAM controller limits.\n",
687 (int)(ram_size >> 20));
689 return ram_size;