target/arm: Convert MUL, PMUL to decodetree
[qemu/ar7.git] / hw / dma / sifive_pdma.c
blob1dd88f3479d49d4f913b80901afcd5644b5ef135
1 /*
2 * SiFive Platform DMA emulation
4 * Copyright (c) 2020 Wind River Systems, Inc.
6 * Author:
7 * Bin Meng <bin.meng@windriver.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 or
12 * (at your option) version 3 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "qemu/osdep.h"
24 #include "qemu/bitops.h"
25 #include "qemu/log.h"
26 #include "qapi/error.h"
27 #include "hw/irq.h"
28 #include "hw/qdev-properties.h"
29 #include "hw/sysbus.h"
30 #include "migration/vmstate.h"
31 #include "sysemu/dma.h"
32 #include "hw/dma/sifive_pdma.h"
34 #define DMA_CONTROL 0x000
35 #define CONTROL_CLAIM BIT(0)
36 #define CONTROL_RUN BIT(1)
37 #define CONTROL_DONE_IE BIT(14)
38 #define CONTROL_ERR_IE BIT(15)
39 #define CONTROL_DONE BIT(30)
40 #define CONTROL_ERR BIT(31)
42 #define DMA_NEXT_CONFIG 0x004
43 #define CONFIG_REPEAT BIT(2)
44 #define CONFIG_ORDER BIT(3)
45 #define CONFIG_WRSZ_SHIFT 24
46 #define CONFIG_RDSZ_SHIFT 28
47 #define CONFIG_SZ_MASK 0xf
49 #define DMA_NEXT_BYTES 0x008
50 #define DMA_NEXT_DST 0x010
51 #define DMA_NEXT_SRC 0x018
52 #define DMA_EXEC_CONFIG 0x104
53 #define DMA_EXEC_BYTES 0x108
54 #define DMA_EXEC_DST 0x110
55 #define DMA_EXEC_SRC 0x118
58 * FU540/FU740 docs are incorrect with NextConfig.wsize/rsize reset values.
59 * The reset values tested on Unleashed/Unmatched boards are 6 instead of 0.
61 #define CONFIG_WRSZ_DEFAULT 6
62 #define CONFIG_RDSZ_DEFAULT 6
64 enum dma_chan_state {
65 DMA_CHAN_STATE_IDLE,
66 DMA_CHAN_STATE_STARTED,
67 DMA_CHAN_STATE_ERROR,
68 DMA_CHAN_STATE_DONE
71 static void sifive_pdma_run(SiFivePDMAState *s, int ch)
73 uint64_t bytes = s->chan[ch].next_bytes;
74 uint64_t dst = s->chan[ch].next_dst;
75 uint64_t src = s->chan[ch].next_src;
76 uint32_t config = s->chan[ch].next_config;
77 int wsize, rsize, size, remainder;
78 uint8_t buf[64];
79 int n;
81 /* do nothing if bytes to transfer is zero */
82 if (!bytes) {
83 goto done;
87 * The manual does not describe how the hardware behaviors when
88 * config.wsize and config.rsize are given different values.
89 * A common case is memory to memory DMA, and in this case they
90 * are normally the same. Abort if this expectation fails.
92 wsize = (config >> CONFIG_WRSZ_SHIFT) & CONFIG_SZ_MASK;
93 rsize = (config >> CONFIG_RDSZ_SHIFT) & CONFIG_SZ_MASK;
94 if (wsize != rsize) {
95 goto error;
99 * Calculate the transaction size
101 * size field is base 2 logarithm of DMA transaction size,
102 * but there is an upper limit of 64 bytes per transaction.
104 size = wsize;
105 if (size > 6) {
106 size = 6;
108 size = 1 << size;
109 remainder = bytes % size;
111 /* indicate a DMA transfer is started */
112 s->chan[ch].state = DMA_CHAN_STATE_STARTED;
113 s->chan[ch].control &= ~CONTROL_DONE;
114 s->chan[ch].control &= ~CONTROL_ERR;
116 /* load the next_ registers into their exec_ counterparts */
117 s->chan[ch].exec_config = config;
118 s->chan[ch].exec_bytes = bytes;
119 s->chan[ch].exec_dst = dst;
120 s->chan[ch].exec_src = src;
122 for (n = 0; n < bytes / size; n++) {
123 cpu_physical_memory_read(s->chan[ch].exec_src, buf, size);
124 cpu_physical_memory_write(s->chan[ch].exec_dst, buf, size);
125 s->chan[ch].exec_src += size;
126 s->chan[ch].exec_dst += size;
127 s->chan[ch].exec_bytes -= size;
130 if (remainder) {
131 cpu_physical_memory_read(s->chan[ch].exec_src, buf, remainder);
132 cpu_physical_memory_write(s->chan[ch].exec_dst, buf, remainder);
133 s->chan[ch].exec_src += remainder;
134 s->chan[ch].exec_dst += remainder;
135 s->chan[ch].exec_bytes -= remainder;
138 /* reload exec_ registers if repeat is required */
139 if (s->chan[ch].next_config & CONFIG_REPEAT) {
140 s->chan[ch].exec_bytes = bytes;
141 s->chan[ch].exec_dst = dst;
142 s->chan[ch].exec_src = src;
145 done:
146 /* indicate a DMA transfer is done */
147 s->chan[ch].state = DMA_CHAN_STATE_DONE;
148 s->chan[ch].control &= ~CONTROL_RUN;
149 s->chan[ch].control |= CONTROL_DONE;
150 return;
152 error:
153 s->chan[ch].state = DMA_CHAN_STATE_ERROR;
154 s->chan[ch].control |= CONTROL_ERR;
155 return;
158 static inline void sifive_pdma_update_irq(SiFivePDMAState *s, int ch)
160 bool done_ie, err_ie;
162 done_ie = !!(s->chan[ch].control & CONTROL_DONE_IE);
163 err_ie = !!(s->chan[ch].control & CONTROL_ERR_IE);
165 if (done_ie && (s->chan[ch].control & CONTROL_DONE)) {
166 qemu_irq_raise(s->irq[ch * 2]);
167 } else {
168 qemu_irq_lower(s->irq[ch * 2]);
171 if (err_ie && (s->chan[ch].control & CONTROL_ERR)) {
172 qemu_irq_raise(s->irq[ch * 2 + 1]);
173 } else {
174 qemu_irq_lower(s->irq[ch * 2 + 1]);
177 s->chan[ch].state = DMA_CHAN_STATE_IDLE;
180 static uint64_t sifive_pdma_readq(SiFivePDMAState *s, int ch, hwaddr offset)
182 uint64_t val = 0;
184 offset &= 0xfff;
185 switch (offset) {
186 case DMA_NEXT_BYTES:
187 val = s->chan[ch].next_bytes;
188 break;
189 case DMA_NEXT_DST:
190 val = s->chan[ch].next_dst;
191 break;
192 case DMA_NEXT_SRC:
193 val = s->chan[ch].next_src;
194 break;
195 case DMA_EXEC_BYTES:
196 val = s->chan[ch].exec_bytes;
197 break;
198 case DMA_EXEC_DST:
199 val = s->chan[ch].exec_dst;
200 break;
201 case DMA_EXEC_SRC:
202 val = s->chan[ch].exec_src;
203 break;
204 default:
205 qemu_log_mask(LOG_GUEST_ERROR,
206 "%s: Unexpected 64-bit access to 0x%" HWADDR_PRIX "\n",
207 __func__, offset);
208 break;
211 return val;
214 static uint32_t sifive_pdma_readl(SiFivePDMAState *s, int ch, hwaddr offset)
216 uint32_t val = 0;
218 offset &= 0xfff;
219 switch (offset) {
220 case DMA_CONTROL:
221 val = s->chan[ch].control;
222 break;
223 case DMA_NEXT_CONFIG:
224 val = s->chan[ch].next_config;
225 break;
226 case DMA_NEXT_BYTES:
227 val = extract64(s->chan[ch].next_bytes, 0, 32);
228 break;
229 case DMA_NEXT_BYTES + 4:
230 val = extract64(s->chan[ch].next_bytes, 32, 32);
231 break;
232 case DMA_NEXT_DST:
233 val = extract64(s->chan[ch].next_dst, 0, 32);
234 break;
235 case DMA_NEXT_DST + 4:
236 val = extract64(s->chan[ch].next_dst, 32, 32);
237 break;
238 case DMA_NEXT_SRC:
239 val = extract64(s->chan[ch].next_src, 0, 32);
240 break;
241 case DMA_NEXT_SRC + 4:
242 val = extract64(s->chan[ch].next_src, 32, 32);
243 break;
244 case DMA_EXEC_CONFIG:
245 val = s->chan[ch].exec_config;
246 break;
247 case DMA_EXEC_BYTES:
248 val = extract64(s->chan[ch].exec_bytes, 0, 32);
249 break;
250 case DMA_EXEC_BYTES + 4:
251 val = extract64(s->chan[ch].exec_bytes, 32, 32);
252 break;
253 case DMA_EXEC_DST:
254 val = extract64(s->chan[ch].exec_dst, 0, 32);
255 break;
256 case DMA_EXEC_DST + 4:
257 val = extract64(s->chan[ch].exec_dst, 32, 32);
258 break;
259 case DMA_EXEC_SRC:
260 val = extract64(s->chan[ch].exec_src, 0, 32);
261 break;
262 case DMA_EXEC_SRC + 4:
263 val = extract64(s->chan[ch].exec_src, 32, 32);
264 break;
265 default:
266 qemu_log_mask(LOG_GUEST_ERROR,
267 "%s: Unexpected 32-bit access to 0x%" HWADDR_PRIX "\n",
268 __func__, offset);
269 break;
272 return val;
275 static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size)
277 SiFivePDMAState *s = opaque;
278 int ch = SIFIVE_PDMA_CHAN_NO(offset);
279 uint64_t val = 0;
281 if (ch >= SIFIVE_PDMA_CHANS) {
282 qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
283 __func__, ch);
284 return 0;
287 switch (size) {
288 case 8:
289 val = sifive_pdma_readq(s, ch, offset);
290 break;
291 case 4:
292 val = sifive_pdma_readl(s, ch, offset);
293 break;
294 default:
295 qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid read size %u to PDMA\n",
296 __func__, size);
297 return 0;
300 return val;
303 static void sifive_pdma_writeq(SiFivePDMAState *s, int ch,
304 hwaddr offset, uint64_t value)
306 offset &= 0xfff;
307 switch (offset) {
308 case DMA_NEXT_BYTES:
309 s->chan[ch].next_bytes = value;
310 break;
311 case DMA_NEXT_DST:
312 s->chan[ch].next_dst = value;
313 break;
314 case DMA_NEXT_SRC:
315 s->chan[ch].next_src = value;
316 break;
317 case DMA_EXEC_BYTES:
318 case DMA_EXEC_DST:
319 case DMA_EXEC_SRC:
320 /* these are read-only registers */
321 break;
322 default:
323 qemu_log_mask(LOG_GUEST_ERROR,
324 "%s: Unexpected 64-bit access to 0x%" HWADDR_PRIX "\n",
325 __func__, offset);
326 break;
330 static void sifive_pdma_writel(SiFivePDMAState *s, int ch,
331 hwaddr offset, uint32_t value)
333 bool claimed, run;
335 offset &= 0xfff;
336 switch (offset) {
337 case DMA_CONTROL:
338 claimed = !!(s->chan[ch].control & CONTROL_CLAIM);
339 run = !!(s->chan[ch].control & CONTROL_RUN);
341 if (!claimed && (value & CONTROL_CLAIM)) {
342 /* reset Next* registers */
343 s->chan[ch].next_config = (CONFIG_RDSZ_DEFAULT << CONFIG_RDSZ_SHIFT) |
344 (CONFIG_WRSZ_DEFAULT << CONFIG_WRSZ_SHIFT);
345 s->chan[ch].next_bytes = 0;
346 s->chan[ch].next_dst = 0;
347 s->chan[ch].next_src = 0;
350 /* claim bit can only be cleared when run is low */
351 if (run && !(value & CONTROL_CLAIM)) {
352 value |= CONTROL_CLAIM;
355 s->chan[ch].control = value;
358 * If channel was not claimed before run bit is set,
359 * or if the channel is disclaimed when run was low,
360 * DMA won't run.
362 if (!claimed || (!run && !(value & CONTROL_CLAIM))) {
363 s->chan[ch].control &= ~CONTROL_RUN;
364 return;
367 if (value & CONTROL_RUN) {
368 sifive_pdma_run(s, ch);
371 sifive_pdma_update_irq(s, ch);
372 break;
373 case DMA_NEXT_CONFIG:
374 s->chan[ch].next_config = value;
375 break;
376 case DMA_NEXT_BYTES:
377 s->chan[ch].next_bytes =
378 deposit64(s->chan[ch].next_bytes, 0, 32, value);
379 break;
380 case DMA_NEXT_BYTES + 4:
381 s->chan[ch].next_bytes =
382 deposit64(s->chan[ch].next_bytes, 32, 32, value);
383 break;
384 case DMA_NEXT_DST:
385 s->chan[ch].next_dst = deposit64(s->chan[ch].next_dst, 0, 32, value);
386 break;
387 case DMA_NEXT_DST + 4:
388 s->chan[ch].next_dst = deposit64(s->chan[ch].next_dst, 32, 32, value);
389 break;
390 case DMA_NEXT_SRC:
391 s->chan[ch].next_src = deposit64(s->chan[ch].next_src, 0, 32, value);
392 break;
393 case DMA_NEXT_SRC + 4:
394 s->chan[ch].next_src = deposit64(s->chan[ch].next_src, 32, 32, value);
395 break;
396 case DMA_EXEC_CONFIG:
397 case DMA_EXEC_BYTES:
398 case DMA_EXEC_BYTES + 4:
399 case DMA_EXEC_DST:
400 case DMA_EXEC_DST + 4:
401 case DMA_EXEC_SRC:
402 case DMA_EXEC_SRC + 4:
403 /* these are read-only registers */
404 break;
405 default:
406 qemu_log_mask(LOG_GUEST_ERROR,
407 "%s: Unexpected 32-bit access to 0x%" HWADDR_PRIX "\n",
408 __func__, offset);
409 break;
413 static void sifive_pdma_write(void *opaque, hwaddr offset,
414 uint64_t value, unsigned size)
416 SiFivePDMAState *s = opaque;
417 int ch = SIFIVE_PDMA_CHAN_NO(offset);
419 if (ch >= SIFIVE_PDMA_CHANS) {
420 qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
421 __func__, ch);
422 return;
425 switch (size) {
426 case 8:
427 sifive_pdma_writeq(s, ch, offset, value);
428 break;
429 case 4:
430 sifive_pdma_writel(s, ch, offset, (uint32_t) value);
431 break;
432 default:
433 qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid write size %u to PDMA\n",
434 __func__, size);
435 break;
439 static const MemoryRegionOps sifive_pdma_ops = {
440 .read = sifive_pdma_read,
441 .write = sifive_pdma_write,
442 .endianness = DEVICE_LITTLE_ENDIAN,
443 /* there are 32-bit and 64-bit wide registers */
444 .impl = {
445 .min_access_size = 4,
446 .max_access_size = 8,
448 .valid = {
449 .min_access_size = 4,
450 .max_access_size = 8,
454 static void sifive_pdma_realize(DeviceState *dev, Error **errp)
456 SiFivePDMAState *s = SIFIVE_PDMA(dev);
457 int i;
459 memory_region_init_io(&s->iomem, OBJECT(dev), &sifive_pdma_ops, s,
460 TYPE_SIFIVE_PDMA, SIFIVE_PDMA_REG_SIZE);
461 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
463 for (i = 0; i < SIFIVE_PDMA_IRQS; i++) {
464 sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]);
468 static void sifive_pdma_class_init(ObjectClass *klass, void *data)
470 DeviceClass *dc = DEVICE_CLASS(klass);
472 dc->desc = "SiFive Platform DMA controller";
473 dc->realize = sifive_pdma_realize;
476 static const TypeInfo sifive_pdma_info = {
477 .name = TYPE_SIFIVE_PDMA,
478 .parent = TYPE_SYS_BUS_DEVICE,
479 .instance_size = sizeof(SiFivePDMAState),
480 .class_init = sifive_pdma_class_init,
483 static void sifive_pdma_register_types(void)
485 type_register_static(&sifive_pdma_info);
488 type_init(sifive_pdma_register_types)