Debug info is lost for functions only called from functions marked with cmse_nonsecur...
[binutils-gdb.git] / sim / bfin / dv-bfin_dma.c
bloba3384700b2bfcd4205b22a90eac1a05e80d191bb
1 /* Blackfin Direct Memory Access (DMA) Channel model.
3 Copyright (C) 2010-2023 Free Software Foundation, Inc.
4 Contributed by Analog Devices, Inc.
6 This file is part of simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 /* This must come before any other includes. */
22 #include "defs.h"
24 #include <stdlib.h>
26 #include "sim-main.h"
27 #include "devices.h"
28 #include "hw-device.h"
29 #include "dv-bfin_dma.h"
30 #include "dv-bfin_dmac.h"
32 /* Note: This DMA implementation requires the producer to be the master when
33 the peer is MDMA. The source is always a slave. This way we don't
34 have the two DMA devices thrashing each other with one trying to
35 write and the other trying to read. */
37 struct bfin_dma
39 /* This top portion matches common dv_bfin struct. */
40 bu32 base;
41 struct hw *dma_master;
42 bool acked;
44 struct hw_event *handler;
45 unsigned ele_size;
46 struct hw *hw_peer;
48 /* Order after here is important -- matches hardware MMR layout. */
49 union {
50 struct { bu16 ndpl, ndph; };
51 bu32 next_desc_ptr;
53 union {
54 struct { bu16 sal, sah; };
55 bu32 start_addr;
57 bu16 BFIN_MMR_16 (config);
58 bu32 _pad0;
59 bu16 BFIN_MMR_16 (x_count);
60 bs16 BFIN_MMR_16 (x_modify);
61 bu16 BFIN_MMR_16 (y_count);
62 bs16 BFIN_MMR_16 (y_modify);
63 bu32 curr_desc_ptr, curr_addr;
64 bu16 BFIN_MMR_16 (irq_status);
65 bu16 BFIN_MMR_16 (peripheral_map);
66 bu16 BFIN_MMR_16 (curr_x_count);
67 bu32 _pad1;
68 bu16 BFIN_MMR_16 (curr_y_count);
69 bu32 _pad2;
71 #define mmr_base() offsetof(struct bfin_dma, next_desc_ptr)
72 #define mmr_offset(mmr) (offsetof(struct bfin_dma, mmr) - mmr_base())
74 static const char * const mmr_names[] =
76 "NEXT_DESC_PTR", "START_ADDR", "CONFIG", "<INV>", "X_COUNT", "X_MODIFY",
77 "Y_COUNT", "Y_MODIFY", "CURR_DESC_PTR", "CURR_ADDR", "IRQ_STATUS",
78 "PERIPHERAL_MAP", "CURR_X_COUNT", "<INV>", "CURR_Y_COUNT", "<INV>",
80 #define mmr_name(off) mmr_names[(off) / 4]
82 static bool
83 bfin_dma_enabled (struct bfin_dma *dma)
85 return (dma->config & DMAEN);
88 static bool
89 bfin_dma_running (struct bfin_dma *dma)
91 return (dma->irq_status & DMA_RUN);
94 static struct hw *
95 bfin_dma_get_peer (struct hw *me, struct bfin_dma *dma)
97 if (dma->hw_peer)
98 return dma->hw_peer;
99 return dma->hw_peer = bfin_dmac_get_peer (me, dma->peripheral_map);
102 static void
103 bfin_dma_process_desc (struct hw *me, struct bfin_dma *dma)
105 bu8 ndsize = (dma->config & NDSIZE) >> NDSIZE_SHIFT;
106 bu16 _flows[9], *flows = _flows;
108 HW_TRACE ((me, "dma starting up %#x", dma->config));
110 switch (dma->config & WDSIZE)
112 case WDSIZE_32:
113 dma->ele_size = 4;
114 break;
115 case WDSIZE_16:
116 dma->ele_size = 2;
117 break;
118 default:
119 dma->ele_size = 1;
120 break;
123 /* Address has to be mutiple of transfer size. */
124 if (dma->start_addr & (dma->ele_size - 1))
125 dma->irq_status |= DMA_ERR;
127 if (dma->ele_size != (unsigned) abs (dma->x_modify))
128 hw_abort (me, "DMA config (striding) %#x not supported (x_modify: %d)",
129 dma->config, dma->x_modify);
131 switch (dma->config & DMAFLOW)
133 case DMAFLOW_AUTO:
134 case DMAFLOW_STOP:
135 if (ndsize)
136 hw_abort (me, "DMA config error: DMAFLOW_{AUTO,STOP} requires NDSIZE_0");
137 break;
138 case DMAFLOW_ARRAY:
139 if (ndsize == 0 || ndsize > 7)
140 hw_abort (me, "DMA config error: DMAFLOW_ARRAY requires NDSIZE 1...7");
141 sim_read (hw_system (me), dma->curr_desc_ptr, flows, ndsize * 2);
142 break;
143 case DMAFLOW_SMALL:
144 if (ndsize == 0 || ndsize > 8)
145 hw_abort (me, "DMA config error: DMAFLOW_SMALL requires NDSIZE 1...8");
146 sim_read (hw_system (me), dma->next_desc_ptr, flows, ndsize * 2);
147 break;
148 case DMAFLOW_LARGE:
149 if (ndsize == 0 || ndsize > 9)
150 hw_abort (me, "DMA config error: DMAFLOW_LARGE requires NDSIZE 1...9");
151 sim_read (hw_system (me), dma->next_desc_ptr, flows, ndsize * 2);
152 break;
153 default:
154 hw_abort (me, "DMA config error: invalid DMAFLOW %#x", dma->config);
157 if (ndsize)
159 bu8 idx;
160 bu16 *stores[] = {
161 &dma->sal,
162 &dma->sah,
163 &dma->config,
164 &dma->x_count,
165 (void *) &dma->x_modify,
166 &dma->y_count,
167 (void *) &dma->y_modify,
170 switch (dma->config & DMAFLOW)
172 case DMAFLOW_LARGE:
173 dma->ndph = _flows[1];
174 --ndsize;
175 ++flows;
176 case DMAFLOW_SMALL:
177 dma->ndpl = _flows[0];
178 --ndsize;
179 ++flows;
180 break;
183 for (idx = 0; idx < ndsize; ++idx)
184 *stores[idx] = flows[idx];
187 dma->curr_desc_ptr = dma->next_desc_ptr;
188 dma->curr_addr = dma->start_addr;
189 dma->curr_x_count = dma->x_count ? : 0xffff;
190 dma->curr_y_count = dma->y_count ? : 0xffff;
193 static int
194 bfin_dma_finish_x (struct hw *me, struct bfin_dma *dma)
196 /* XXX: This would be the time to process the next descriptor. */
197 /* XXX: Should this toggle Enable in dma->config ? */
199 if (dma->config & DI_EN)
200 hw_port_event (me, 0, 1);
202 if ((dma->config & DMA2D) && dma->curr_y_count > 1)
204 dma->curr_y_count -= 1;
205 dma->curr_x_count = dma->x_count;
207 /* With 2D, last X transfer does not modify curr_addr. */
208 dma->curr_addr = dma->curr_addr - dma->x_modify + dma->y_modify;
210 return 1;
213 switch (dma->config & DMAFLOW)
215 case DMAFLOW_STOP:
216 HW_TRACE ((me, "dma is complete"));
217 dma->irq_status = (dma->irq_status & ~DMA_RUN) | DMA_DONE;
218 return 0;
219 default:
220 bfin_dma_process_desc (me, dma);
221 return 1;
225 static void bfin_dma_hw_event_callback (struct hw *, void *);
227 static void
228 bfin_dma_reschedule (struct hw *me, unsigned delay)
230 struct bfin_dma *dma = hw_data (me);
231 if (dma->handler)
233 hw_event_queue_deschedule (me, dma->handler);
234 dma->handler = NULL;
236 if (!delay)
237 return;
238 HW_TRACE ((me, "scheduling next process in %u", delay));
239 dma->handler = hw_event_queue_schedule (me, delay,
240 bfin_dma_hw_event_callback, dma);
243 /* Chew through the DMA over and over. */
244 static void
245 bfin_dma_hw_event_callback (struct hw *me, void *data)
247 struct bfin_dma *dma = data;
248 struct hw *peer;
249 struct dv_bfin *bfin_peer;
250 bu8 buf[4096];
251 unsigned ret, nr_bytes, ele_count;
253 dma->handler = NULL;
254 peer = bfin_dma_get_peer (me, dma);
255 bfin_peer = hw_data (peer);
256 ret = 0;
257 if (dma->x_modify < 0)
258 /* XXX: This sucks performance wise. */
259 nr_bytes = dma->ele_size;
260 else
261 nr_bytes = min (sizeof (buf), dma->curr_x_count * dma->ele_size);
263 /* Pumping a chunk! */
264 bfin_peer->dma_master = me;
265 bfin_peer->acked = false;
266 if (dma->config & WNR)
268 HW_TRACE ((me, "dma transfer to 0x%08lx length %u",
269 (unsigned long) dma->curr_addr, nr_bytes));
271 ret = hw_dma_read_buffer (peer, buf, 0, dma->curr_addr, nr_bytes);
272 /* Has the DMA stalled ? abort for now. */
273 if (ret == 0)
274 goto reschedule;
275 /* XXX: How to handle partial DMA transfers ? */
276 if (ret % dma->ele_size)
277 goto error;
278 ret = sim_write (hw_system (me), dma->curr_addr, buf, ret);
280 else
282 HW_TRACE ((me, "dma transfer from 0x%08lx length %u",
283 (unsigned long) dma->curr_addr, nr_bytes));
285 ret = sim_read (hw_system (me), dma->curr_addr, buf, nr_bytes);
286 if (ret == 0)
287 goto reschedule;
288 /* XXX: How to handle partial DMA transfers ? */
289 if (ret % dma->ele_size)
290 goto error;
291 ret = hw_dma_write_buffer (peer, buf, 0, dma->curr_addr, ret, 0);
292 if (ret == 0)
293 goto reschedule;
296 /* Ignore partial writes. */
297 ele_count = ret / dma->ele_size;
298 dma->curr_addr += ele_count * dma->x_modify;
299 dma->curr_x_count -= ele_count;
301 if ((!dma->acked && dma->curr_x_count) || bfin_dma_finish_x (me, dma))
302 /* Still got work to do, so schedule again. */
303 reschedule:
304 bfin_dma_reschedule (me, ret ? 1 : 5000);
306 return;
308 error:
309 /* Don't reschedule on errors ... */
310 dma->irq_status |= DMA_ERR;
313 static unsigned
314 bfin_dma_io_write_buffer (struct hw *me, const void *source, int space,
315 address_word addr, unsigned nr_bytes)
317 struct bfin_dma *dma = hw_data (me);
318 bu32 mmr_off;
319 bu32 value;
320 bu16 *value16p;
321 bu32 *value32p;
322 void *valuep;
324 /* Invalid access mode is higher priority than missing register. */
325 if (!dv_bfin_mmr_require_16_32 (me, addr, nr_bytes, true))
326 return 0;
328 if (nr_bytes == 4)
329 value = dv_load_4 (source);
330 else
331 value = dv_load_2 (source);
333 mmr_off = addr % dma->base;
334 valuep = (void *)((uintptr_t)dma + mmr_base() + mmr_off);
335 value16p = valuep;
336 value32p = valuep;
338 HW_TRACE_WRITE ();
340 /* XXX: All registers are RO when DMA is enabled (except IRQ_STATUS).
341 But does the HW discard writes or send up IVGHW ? The sim
342 simply discards atm ... */
343 switch (mmr_off)
345 case mmr_offset(next_desc_ptr):
346 case mmr_offset(start_addr):
347 case mmr_offset(curr_desc_ptr):
348 case mmr_offset(curr_addr):
349 /* Don't require 32bit access as all DMA MMRs can be used as 16bit. */
350 if (!bfin_dma_running (dma))
352 if (nr_bytes == 4)
353 *value32p = value;
354 else
355 *value16p = value;
357 else
358 HW_TRACE ((me, "discarding write while dma running"));
359 break;
360 case mmr_offset(x_count):
361 case mmr_offset(x_modify):
362 case mmr_offset(y_count):
363 case mmr_offset(y_modify):
364 if (!bfin_dma_running (dma))
365 *value16p = value;
366 break;
367 case mmr_offset(peripheral_map):
368 if (!bfin_dma_running (dma))
370 *value16p = (*value16p & CTYPE) | (value & ~CTYPE);
371 /* Clear peripheral peer so it gets looked up again. */
372 dma->hw_peer = NULL;
374 else
375 HW_TRACE ((me, "discarding write while dma running"));
376 break;
377 case mmr_offset(config):
378 /* XXX: How to handle updating CONFIG of a running channel ? */
379 if (nr_bytes == 4)
380 *value32p = value;
381 else
382 *value16p = value;
384 if (bfin_dma_enabled (dma))
386 dma->irq_status |= DMA_RUN;
387 bfin_dma_process_desc (me, dma);
388 /* The writer is the master. */
389 if (!(dma->peripheral_map & CTYPE) || (dma->config & WNR))
390 bfin_dma_reschedule (me, 1);
392 else
394 dma->irq_status &= ~DMA_RUN;
395 bfin_dma_reschedule (me, 0);
397 break;
398 case mmr_offset(irq_status):
399 dv_w1c_2 (value16p, value, DMA_DONE | DMA_ERR);
400 break;
401 case mmr_offset(curr_x_count):
402 case mmr_offset(curr_y_count):
403 if (!bfin_dma_running (dma))
404 *value16p = value;
405 else
406 HW_TRACE ((me, "discarding write while dma running"));
407 break;
408 default:
409 /* XXX: The HW lets the pad regions be read/written ... */
410 dv_bfin_mmr_invalid (me, addr, nr_bytes, true);
411 return 0;
414 return nr_bytes;
417 static unsigned
418 bfin_dma_io_read_buffer (struct hw *me, void *dest, int space,
419 address_word addr, unsigned nr_bytes)
421 struct bfin_dma *dma = hw_data (me);
422 bu32 mmr_off;
423 bu16 *value16p;
424 bu32 *value32p;
425 void *valuep;
427 /* Invalid access mode is higher priority than missing register. */
428 if (!dv_bfin_mmr_require_16_32 (me, addr, nr_bytes, false))
429 return 0;
431 mmr_off = addr % dma->base;
432 valuep = (void *)((uintptr_t)dma + mmr_base() + mmr_off);
433 value16p = valuep;
434 value32p = valuep;
436 HW_TRACE_READ ();
438 /* Hardware lets you read all MMRs as 16 or 32 bits, even reserved. */
439 if (nr_bytes == 4)
440 dv_store_4 (dest, *value32p);
441 else
442 dv_store_2 (dest, *value16p);
444 return nr_bytes;
447 static unsigned
448 bfin_dma_dma_read_buffer (struct hw *me, void *dest, int space,
449 unsigned_word addr, unsigned nr_bytes)
451 struct bfin_dma *dma = hw_data (me);
452 unsigned ret, ele_count;
454 HW_TRACE_DMA_READ ();
456 /* If someone is trying to read from me, I have to be enabled. */
457 if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
458 return 0;
460 /* XXX: handle x_modify ... */
461 ret = sim_read (hw_system (me), dma->curr_addr, dest, nr_bytes);
462 /* Ignore partial writes. */
463 ele_count = ret / dma->ele_size;
464 /* Has the DMA stalled ? abort for now. */
465 if (!ele_count)
466 return 0;
468 dma->curr_addr += ele_count * dma->x_modify;
469 dma->curr_x_count -= ele_count;
471 if (dma->curr_x_count == 0)
472 bfin_dma_finish_x (me, dma);
474 return ret;
477 static unsigned
478 bfin_dma_dma_write_buffer (struct hw *me, const void *source,
479 int space, unsigned_word addr,
480 unsigned nr_bytes,
481 int violate_read_only_section)
483 struct bfin_dma *dma = hw_data (me);
484 unsigned ret, ele_count;
486 HW_TRACE_DMA_WRITE ();
488 /* If someone is trying to write to me, I have to be enabled. */
489 if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
490 return 0;
492 /* XXX: handle x_modify ... */
493 ret = sim_write (hw_system (me), dma->curr_addr, source, nr_bytes);
494 /* Ignore partial writes. */
495 ele_count = ret / dma->ele_size;
496 /* Has the DMA stalled ? abort for now. */
497 if (!ele_count)
498 return 0;
500 dma->curr_addr += ele_count * dma->x_modify;
501 dma->curr_x_count -= ele_count;
503 if (dma->curr_x_count == 0)
504 bfin_dma_finish_x (me, dma);
506 return ret;
509 static const struct hw_port_descriptor bfin_dma_ports[] =
511 { "di", 0, 0, output_port, }, /* DMA Interrupt */
512 { NULL, 0, 0, 0, },
515 static void
516 attach_bfin_dma_regs (struct hw *me, struct bfin_dma *dma)
518 address_word attach_address;
519 int attach_space;
520 unsigned attach_size;
521 reg_property_spec reg;
523 if (hw_find_property (me, "reg") == NULL)
524 hw_abort (me, "Missing \"reg\" property");
526 if (!hw_find_reg_array_property (me, "reg", 0, &reg))
527 hw_abort (me, "\"reg\" property must contain three addr/size entries");
529 hw_unit_address_to_attach_address (hw_parent (me),
530 &reg.address,
531 &attach_space, &attach_address, me);
532 hw_unit_size_to_attach_size (hw_parent (me), &reg.size, &attach_size, me);
534 if (attach_size != BFIN_MMR_DMA_SIZE)
535 hw_abort (me, "\"reg\" size must be %#x", BFIN_MMR_DMA_SIZE);
537 hw_attach_address (hw_parent (me),
538 0, attach_space, attach_address, attach_size, me);
540 dma->base = attach_address;
543 static void
544 bfin_dma_finish (struct hw *me)
546 struct bfin_dma *dma;
548 dma = HW_ZALLOC (me, struct bfin_dma);
550 set_hw_data (me, dma);
551 set_hw_io_read_buffer (me, bfin_dma_io_read_buffer);
552 set_hw_io_write_buffer (me, bfin_dma_io_write_buffer);
553 set_hw_dma_read_buffer (me, bfin_dma_dma_read_buffer);
554 set_hw_dma_write_buffer (me, bfin_dma_dma_write_buffer);
555 set_hw_ports (me, bfin_dma_ports);
557 attach_bfin_dma_regs (me, dma);
559 /* Initialize the DMA Channel. */
560 dma->peripheral_map = bfin_dmac_default_pmap (me);
563 const struct hw_descriptor dv_bfin_dma_descriptor[] =
565 {"bfin_dma", bfin_dma_finish,},
566 {NULL, NULL},