dump: Recognize "fd:" protocols on Windows hosts
[qemu/armbru.git] / hw / misc / allwinner-r40-dramc.c
blob3d81ddb2e187f049f46905d9300bd0a87c8e3b78
1 /*
2 * Allwinner R40 SDRAM Controller emulation
4 * CCopyright (C) 2023 qianfan Zhao <qianfanguijin@163.com>
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/error-report.h"
23 #include "hw/sysbus.h"
24 #include "migration/vmstate.h"
25 #include "qemu/log.h"
26 #include "qemu/module.h"
27 #include "exec/address-spaces.h"
28 #include "hw/qdev-properties.h"
29 #include "qapi/error.h"
30 #include "qemu/bitops.h"
31 #include "hw/misc/allwinner-r40-dramc.h"
32 #include "trace.h"
34 #define REG_INDEX(offset) (offset / sizeof(uint32_t))
36 /* DRAMCOM register offsets */
37 enum {
38 REG_DRAMCOM_CR = 0x0000, /* Control Register */
41 /* DRAMCOMM register flags */
42 enum {
43 REG_DRAMCOM_CR_DUAL_RANK = (1 << 0),
46 /* DRAMCTL register offsets */
47 enum {
48 REG_DRAMCTL_PIR = 0x0000, /* PHY Initialization Register */
49 REG_DRAMCTL_PGSR = 0x0010, /* PHY General Status Register */
50 REG_DRAMCTL_STATR = 0x0018, /* Status Register */
51 REG_DRAMCTL_PGCR = 0x0100, /* PHY general configuration registers */
54 /* DRAMCTL register flags */
55 enum {
56 REG_DRAMCTL_PGSR_INITDONE = (1 << 0),
57 REG_DRAMCTL_PGSR_READ_TIMEOUT = (1 << 13),
58 REG_DRAMCTL_PGCR_ENABLE_READ_TIMEOUT = (1 << 25),
61 enum {
62 REG_DRAMCTL_STATR_ACTIVE = (1 << 0),
65 #define DRAM_MAX_ROW_BITS 16
66 #define DRAM_MAX_COL_BITS 13 /* 8192 */
67 #define DRAM_MAX_BANK 3
69 static uint64_t dram_autodetect_cells[DRAM_MAX_ROW_BITS]
70 [DRAM_MAX_BANK]
71 [DRAM_MAX_COL_BITS];
72 struct VirtualDDRChip {
73 uint32_t ram_size;
74 uint8_t bank_bits;
75 uint8_t row_bits;
76 uint8_t col_bits;
80 * Only power of 2 RAM sizes from 256MiB up to 2048MiB are supported,
81 * 2GiB memory is not supported due to dual rank feature.
83 static const struct VirtualDDRChip dummy_ddr_chips[] = {
85 .ram_size = 256,
86 .bank_bits = 3,
87 .row_bits = 12,
88 .col_bits = 13,
89 }, {
90 .ram_size = 512,
91 .bank_bits = 3,
92 .row_bits = 13,
93 .col_bits = 13,
94 }, {
95 .ram_size = 1024,
96 .bank_bits = 3,
97 .row_bits = 14,
98 .col_bits = 13,
99 }, {
104 static const struct VirtualDDRChip *get_match_ddr(uint32_t ram_size)
106 const struct VirtualDDRChip *ddr;
108 for (ddr = &dummy_ddr_chips[0]; ddr->ram_size; ddr++) {
109 if (ddr->ram_size == ram_size) {
110 return ddr;
114 return NULL;
117 static uint64_t *address_to_autodetect_cells(AwR40DramCtlState *s,
118 const struct VirtualDDRChip *ddr,
119 uint32_t offset)
121 int row_index = 0, bank_index = 0, col_index = 0;
122 uint32_t row_addr, bank_addr, col_addr;
124 row_addr = extract32(offset, s->set_col_bits + s->set_bank_bits,
125 s->set_row_bits);
126 bank_addr = extract32(offset, s->set_col_bits, s->set_bank_bits);
127 col_addr = extract32(offset, 0, s->set_col_bits);
129 for (int i = 0; i < ddr->row_bits; i++) {
130 if (row_addr & BIT(i)) {
131 row_index = i;
135 for (int i = 0; i < ddr->bank_bits; i++) {
136 if (bank_addr & BIT(i)) {
137 bank_index = i;
141 for (int i = 0; i < ddr->col_bits; i++) {
142 if (col_addr & BIT(i)) {
143 col_index = i;
147 trace_allwinner_r40_dramc_offset_to_cell(offset, row_index, bank_index,
148 col_index);
149 return &dram_autodetect_cells[row_index][bank_index][col_index];
152 static void allwinner_r40_dramc_map_rows(AwR40DramCtlState *s, uint8_t row_bits,
153 uint8_t bank_bits, uint8_t col_bits)
155 const struct VirtualDDRChip *ddr = get_match_ddr(s->ram_size);
156 bool enable_detect_cells;
158 trace_allwinner_r40_dramc_map_rows(row_bits, bank_bits, col_bits);
160 if (!ddr) {
161 return;
164 s->set_row_bits = row_bits;
165 s->set_bank_bits = bank_bits;
166 s->set_col_bits = col_bits;
168 enable_detect_cells = ddr->bank_bits != bank_bits
169 || ddr->row_bits != row_bits
170 || ddr->col_bits != col_bits;
172 if (enable_detect_cells) {
173 trace_allwinner_r40_dramc_detect_cells_enable();
174 } else {
175 trace_allwinner_r40_dramc_detect_cells_disable();
178 memory_region_set_enabled(&s->detect_cells, enable_detect_cells);
181 static uint64_t allwinner_r40_dramcom_read(void *opaque, hwaddr offset,
182 unsigned size)
184 const AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
185 const uint32_t idx = REG_INDEX(offset);
187 if (idx >= AW_R40_DRAMCOM_REGS_NUM) {
188 qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n",
189 __func__, (uint32_t)offset);
190 return 0;
193 trace_allwinner_r40_dramcom_read(offset, s->dramcom[idx], size);
194 return s->dramcom[idx];
197 static void allwinner_r40_dramcom_write(void *opaque, hwaddr offset,
198 uint64_t val, unsigned size)
200 AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
201 const uint32_t idx = REG_INDEX(offset);
203 trace_allwinner_r40_dramcom_write(offset, val, size);
205 if (idx >= AW_R40_DRAMCOM_REGS_NUM) {
206 qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n",
207 __func__, (uint32_t)offset);
208 return;
211 switch (offset) {
212 case REG_DRAMCOM_CR: /* Control Register */
213 if (!(val & REG_DRAMCOM_CR_DUAL_RANK)) {
214 allwinner_r40_dramc_map_rows(s, ((val >> 4) & 0xf) + 1,
215 ((val >> 2) & 0x1) + 2,
216 (((val >> 8) & 0xf) + 3));
218 break;
221 s->dramcom[idx] = (uint32_t) val;
224 static uint64_t allwinner_r40_dramctl_read(void *opaque, hwaddr offset,
225 unsigned size)
227 const AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
228 const uint32_t idx = REG_INDEX(offset);
230 if (idx >= AW_R40_DRAMCTL_REGS_NUM) {
231 qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n",
232 __func__, (uint32_t)offset);
233 return 0;
236 trace_allwinner_r40_dramctl_read(offset, s->dramctl[idx], size);
237 return s->dramctl[idx];
240 static void allwinner_r40_dramctl_write(void *opaque, hwaddr offset,
241 uint64_t val, unsigned size)
243 AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
244 const uint32_t idx = REG_INDEX(offset);
246 trace_allwinner_r40_dramctl_write(offset, val, size);
248 if (idx >= AW_R40_DRAMCTL_REGS_NUM) {
249 qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n",
250 __func__, (uint32_t)offset);
251 return;
254 switch (offset) {
255 case REG_DRAMCTL_PIR: /* PHY Initialization Register */
256 s->dramctl[REG_INDEX(REG_DRAMCTL_PGSR)] |= REG_DRAMCTL_PGSR_INITDONE;
257 s->dramctl[REG_INDEX(REG_DRAMCTL_STATR)] |= REG_DRAMCTL_STATR_ACTIVE;
258 break;
261 s->dramctl[idx] = (uint32_t) val;
264 static uint64_t allwinner_r40_dramphy_read(void *opaque, hwaddr offset,
265 unsigned size)
267 const AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
268 const uint32_t idx = REG_INDEX(offset);
270 if (idx >= AW_R40_DRAMPHY_REGS_NUM) {
271 qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n",
272 __func__, (uint32_t)offset);
273 return 0;
276 trace_allwinner_r40_dramphy_read(offset, s->dramphy[idx], size);
277 return s->dramphy[idx];
280 static void allwinner_r40_dramphy_write(void *opaque, hwaddr offset,
281 uint64_t val, unsigned size)
283 AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
284 const uint32_t idx = REG_INDEX(offset);
286 trace_allwinner_r40_dramphy_write(offset, val, size);
288 if (idx >= AW_R40_DRAMPHY_REGS_NUM) {
289 qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n",
290 __func__, (uint32_t)offset);
291 return;
294 s->dramphy[idx] = (uint32_t) val;
297 static const MemoryRegionOps allwinner_r40_dramcom_ops = {
298 .read = allwinner_r40_dramcom_read,
299 .write = allwinner_r40_dramcom_write,
300 .endianness = DEVICE_NATIVE_ENDIAN,
301 .valid = {
302 .min_access_size = 4,
303 .max_access_size = 4,
305 .impl.min_access_size = 4,
308 static const MemoryRegionOps allwinner_r40_dramctl_ops = {
309 .read = allwinner_r40_dramctl_read,
310 .write = allwinner_r40_dramctl_write,
311 .endianness = DEVICE_NATIVE_ENDIAN,
312 .valid = {
313 .min_access_size = 4,
314 .max_access_size = 4,
316 .impl.min_access_size = 4,
319 static const MemoryRegionOps allwinner_r40_dramphy_ops = {
320 .read = allwinner_r40_dramphy_read,
321 .write = allwinner_r40_dramphy_write,
322 .endianness = DEVICE_NATIVE_ENDIAN,
323 .valid = {
324 .min_access_size = 4,
325 .max_access_size = 4,
327 .impl.min_access_size = 4,
330 static uint64_t allwinner_r40_detect_read(void *opaque, hwaddr offset,
331 unsigned size)
333 AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
334 const struct VirtualDDRChip *ddr = get_match_ddr(s->ram_size);
335 uint64_t data = 0;
337 if (ddr) {
338 data = *address_to_autodetect_cells(s, ddr, (uint32_t)offset);
341 trace_allwinner_r40_dramc_detect_cell_read(offset, data);
342 return data;
345 static void allwinner_r40_detect_write(void *opaque, hwaddr offset,
346 uint64_t data, unsigned size)
348 AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
349 const struct VirtualDDRChip *ddr = get_match_ddr(s->ram_size);
351 if (ddr) {
352 uint64_t *cell = address_to_autodetect_cells(s, ddr, (uint32_t)offset);
353 trace_allwinner_r40_dramc_detect_cell_write(offset, data);
354 *cell = data;
358 static const MemoryRegionOps allwinner_r40_detect_ops = {
359 .read = allwinner_r40_detect_read,
360 .write = allwinner_r40_detect_write,
361 .endianness = DEVICE_NATIVE_ENDIAN,
362 .valid = {
363 .min_access_size = 4,
364 .max_access_size = 4,
366 .impl.min_access_size = 4,
370 * mctl_r40_detect_rank_count in u-boot will write the high 1G of DDR
371 * to detect whether the board support dual_rank or not. Create a virtual memory
372 * if the board's ram_size less or equal than 1G, and set read time out flag of
373 * REG_DRAMCTL_PGSR when the user touch this high dram.
375 static uint64_t allwinner_r40_dualrank_detect_read(void *opaque, hwaddr offset,
376 unsigned size)
378 AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
379 uint32_t reg;
381 reg = s->dramctl[REG_INDEX(REG_DRAMCTL_PGCR)];
382 if (reg & REG_DRAMCTL_PGCR_ENABLE_READ_TIMEOUT) { /* Enable read time out */
384 * this driver only support one rank, mark READ_TIMEOUT when try
385 * read the second rank.
387 s->dramctl[REG_INDEX(REG_DRAMCTL_PGSR)]
388 |= REG_DRAMCTL_PGSR_READ_TIMEOUT;
391 return 0;
394 static const MemoryRegionOps allwinner_r40_dualrank_detect_ops = {
395 .read = allwinner_r40_dualrank_detect_read,
396 .endianness = DEVICE_NATIVE_ENDIAN,
397 .valid = {
398 .min_access_size = 4,
399 .max_access_size = 4,
401 .impl.min_access_size = 4,
404 static void allwinner_r40_dramc_reset(DeviceState *dev)
406 AwR40DramCtlState *s = AW_R40_DRAMC(dev);
408 /* Set default values for registers */
409 memset(&s->dramcom, 0, sizeof(s->dramcom));
410 memset(&s->dramctl, 0, sizeof(s->dramctl));
411 memset(&s->dramphy, 0, sizeof(s->dramphy));
414 static void allwinner_r40_dramc_realize(DeviceState *dev, Error **errp)
416 AwR40DramCtlState *s = AW_R40_DRAMC(dev);
418 if (!get_match_ddr(s->ram_size)) {
419 error_report("%s: ram-size %u MiB is not supported",
420 __func__, s->ram_size);
421 exit(1);
424 /* R40 support max 2G memory but we only support up to 1G now. */
425 memory_region_init_io(&s->detect_cells, OBJECT(s),
426 &allwinner_r40_detect_ops, s,
427 "DRAMCELLS", 1 * GiB);
428 memory_region_add_subregion_overlap(get_system_memory(), s->ram_addr,
429 &s->detect_cells, 10);
430 memory_region_set_enabled(&s->detect_cells, false);
433 * We only support DRAM size up to 1G now, so prepare a high memory page
434 * after 1G for dualrank detect.
436 memory_region_init_io(&s->dram_high, OBJECT(s),
437 &allwinner_r40_dualrank_detect_ops, s,
438 "DRAMHIGH", KiB);
439 memory_region_add_subregion(get_system_memory(), s->ram_addr + GiB,
440 &s->dram_high);
443 static void allwinner_r40_dramc_init(Object *obj)
445 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
446 AwR40DramCtlState *s = AW_R40_DRAMC(obj);
448 /* DRAMCOM registers, index 0 */
449 memory_region_init_io(&s->dramcom_iomem, OBJECT(s),
450 &allwinner_r40_dramcom_ops, s,
451 "DRAMCOM", 4 * KiB);
452 sysbus_init_mmio(sbd, &s->dramcom_iomem);
454 /* DRAMCTL registers, index 1 */
455 memory_region_init_io(&s->dramctl_iomem, OBJECT(s),
456 &allwinner_r40_dramctl_ops, s,
457 "DRAMCTL", 4 * KiB);
458 sysbus_init_mmio(sbd, &s->dramctl_iomem);
460 /* DRAMPHY registers. index 2 */
461 memory_region_init_io(&s->dramphy_iomem, OBJECT(s),
462 &allwinner_r40_dramphy_ops, s,
463 "DRAMPHY", 4 * KiB);
464 sysbus_init_mmio(sbd, &s->dramphy_iomem);
467 static Property allwinner_r40_dramc_properties[] = {
468 DEFINE_PROP_UINT64("ram-addr", AwR40DramCtlState, ram_addr, 0x0),
469 DEFINE_PROP_UINT32("ram-size", AwR40DramCtlState, ram_size, 256), /* MiB */
470 DEFINE_PROP_END_OF_LIST()
473 static const VMStateDescription allwinner_r40_dramc_vmstate = {
474 .name = "allwinner-r40-dramc",
475 .version_id = 1,
476 .minimum_version_id = 1,
477 .fields = (VMStateField[]) {
478 VMSTATE_UINT32_ARRAY(dramcom, AwR40DramCtlState,
479 AW_R40_DRAMCOM_REGS_NUM),
480 VMSTATE_UINT32_ARRAY(dramctl, AwR40DramCtlState,
481 AW_R40_DRAMCTL_REGS_NUM),
482 VMSTATE_UINT32_ARRAY(dramphy, AwR40DramCtlState,
483 AW_R40_DRAMPHY_REGS_NUM),
484 VMSTATE_END_OF_LIST()
488 static void allwinner_r40_dramc_class_init(ObjectClass *klass, void *data)
490 DeviceClass *dc = DEVICE_CLASS(klass);
492 dc->reset = allwinner_r40_dramc_reset;
493 dc->vmsd = &allwinner_r40_dramc_vmstate;
494 dc->realize = allwinner_r40_dramc_realize;
495 device_class_set_props(dc, allwinner_r40_dramc_properties);
498 static const TypeInfo allwinner_r40_dramc_info = {
499 .name = TYPE_AW_R40_DRAMC,
500 .parent = TYPE_SYS_BUS_DEVICE,
501 .instance_init = allwinner_r40_dramc_init,
502 .instance_size = sizeof(AwR40DramCtlState),
503 .class_init = allwinner_r40_dramc_class_init,
506 static void allwinner_r40_dramc_register(void)
508 type_register_static(&allwinner_r40_dramc_info);
511 type_init(allwinner_r40_dramc_register)