1 /* Blackfin Direct Memory Access (DMA) Channel model.
3 Copyright (C) 2010-2023 Free Software Foundation, Inc.
4 Contributed by Analog Devices, Inc.
6 This file is part of simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 /* This must come before any other includes. */
28 #include "hw-device.h"
29 #include "dv-bfin_dma.h"
30 #include "dv-bfin_dmac.h"
32 /* Note: This DMA implementation requires the producer to be the master when
33 the peer is MDMA. The source is always a slave. This way we don't
34 have the two DMA devices thrashing each other with one trying to
35 write and the other trying to read. */
39 /* This top portion matches common dv_bfin struct. */
41 struct hw
*dma_master
;
44 struct hw_event
*handler
;
48 /* Order after here is important -- matches hardware MMR layout. */
50 struct { bu16 ndpl
, ndph
; };
54 struct { bu16 sal
, sah
; };
57 bu16
BFIN_MMR_16 (config
);
59 bu16
BFIN_MMR_16 (x_count
);
60 bs16
BFIN_MMR_16 (x_modify
);
61 bu16
BFIN_MMR_16 (y_count
);
62 bs16
BFIN_MMR_16 (y_modify
);
63 bu32 curr_desc_ptr
, curr_addr
;
64 bu16
BFIN_MMR_16 (irq_status
);
65 bu16
BFIN_MMR_16 (peripheral_map
);
66 bu16
BFIN_MMR_16 (curr_x_count
);
68 bu16
BFIN_MMR_16 (curr_y_count
);
71 #define mmr_base() offsetof(struct bfin_dma, next_desc_ptr)
72 #define mmr_offset(mmr) (offsetof(struct bfin_dma, mmr) - mmr_base())
74 static const char * const mmr_names
[] =
76 "NEXT_DESC_PTR", "START_ADDR", "CONFIG", "<INV>", "X_COUNT", "X_MODIFY",
77 "Y_COUNT", "Y_MODIFY", "CURR_DESC_PTR", "CURR_ADDR", "IRQ_STATUS",
78 "PERIPHERAL_MAP", "CURR_X_COUNT", "<INV>", "CURR_Y_COUNT", "<INV>",
80 #define mmr_name(off) mmr_names[(off) / 4]
83 bfin_dma_enabled (struct bfin_dma
*dma
)
85 return (dma
->config
& DMAEN
);
89 bfin_dma_running (struct bfin_dma
*dma
)
91 return (dma
->irq_status
& DMA_RUN
);
95 bfin_dma_get_peer (struct hw
*me
, struct bfin_dma
*dma
)
99 return dma
->hw_peer
= bfin_dmac_get_peer (me
, dma
->peripheral_map
);
103 bfin_dma_process_desc (struct hw
*me
, struct bfin_dma
*dma
)
105 bu8 ndsize
= (dma
->config
& NDSIZE
) >> NDSIZE_SHIFT
;
106 bu16 _flows
[9], *flows
= _flows
;
108 HW_TRACE ((me
, "dma starting up %#x", dma
->config
));
110 switch (dma
->config
& WDSIZE
)
123 /* Address has to be mutiple of transfer size. */
124 if (dma
->start_addr
& (dma
->ele_size
- 1))
125 dma
->irq_status
|= DMA_ERR
;
127 if (dma
->ele_size
!= (unsigned) abs (dma
->x_modify
))
128 hw_abort (me
, "DMA config (striding) %#x not supported (x_modify: %d)",
129 dma
->config
, dma
->x_modify
);
131 switch (dma
->config
& DMAFLOW
)
136 hw_abort (me
, "DMA config error: DMAFLOW_{AUTO,STOP} requires NDSIZE_0");
139 if (ndsize
== 0 || ndsize
> 7)
140 hw_abort (me
, "DMA config error: DMAFLOW_ARRAY requires NDSIZE 1...7");
141 sim_read (hw_system (me
), dma
->curr_desc_ptr
, flows
, ndsize
* 2);
144 if (ndsize
== 0 || ndsize
> 8)
145 hw_abort (me
, "DMA config error: DMAFLOW_SMALL requires NDSIZE 1...8");
146 sim_read (hw_system (me
), dma
->next_desc_ptr
, flows
, ndsize
* 2);
149 if (ndsize
== 0 || ndsize
> 9)
150 hw_abort (me
, "DMA config error: DMAFLOW_LARGE requires NDSIZE 1...9");
151 sim_read (hw_system (me
), dma
->next_desc_ptr
, flows
, ndsize
* 2);
154 hw_abort (me
, "DMA config error: invalid DMAFLOW %#x", dma
->config
);
165 (void *) &dma
->x_modify
,
167 (void *) &dma
->y_modify
,
170 switch (dma
->config
& DMAFLOW
)
173 dma
->ndph
= _flows
[1];
177 dma
->ndpl
= _flows
[0];
183 for (idx
= 0; idx
< ndsize
; ++idx
)
184 *stores
[idx
] = flows
[idx
];
187 dma
->curr_desc_ptr
= dma
->next_desc_ptr
;
188 dma
->curr_addr
= dma
->start_addr
;
189 dma
->curr_x_count
= dma
->x_count
? : 0xffff;
190 dma
->curr_y_count
= dma
->y_count
? : 0xffff;
194 bfin_dma_finish_x (struct hw
*me
, struct bfin_dma
*dma
)
196 /* XXX: This would be the time to process the next descriptor. */
197 /* XXX: Should this toggle Enable in dma->config ? */
199 if (dma
->config
& DI_EN
)
200 hw_port_event (me
, 0, 1);
202 if ((dma
->config
& DMA2D
) && dma
->curr_y_count
> 1)
204 dma
->curr_y_count
-= 1;
205 dma
->curr_x_count
= dma
->x_count
;
207 /* With 2D, last X transfer does not modify curr_addr. */
208 dma
->curr_addr
= dma
->curr_addr
- dma
->x_modify
+ dma
->y_modify
;
213 switch (dma
->config
& DMAFLOW
)
216 HW_TRACE ((me
, "dma is complete"));
217 dma
->irq_status
= (dma
->irq_status
& ~DMA_RUN
) | DMA_DONE
;
220 bfin_dma_process_desc (me
, dma
);
225 static void bfin_dma_hw_event_callback (struct hw
*, void *);
228 bfin_dma_reschedule (struct hw
*me
, unsigned delay
)
230 struct bfin_dma
*dma
= hw_data (me
);
233 hw_event_queue_deschedule (me
, dma
->handler
);
238 HW_TRACE ((me
, "scheduling next process in %u", delay
));
239 dma
->handler
= hw_event_queue_schedule (me
, delay
,
240 bfin_dma_hw_event_callback
, dma
);
243 /* Chew through the DMA over and over. */
245 bfin_dma_hw_event_callback (struct hw
*me
, void *data
)
247 struct bfin_dma
*dma
= data
;
249 struct dv_bfin
*bfin_peer
;
251 unsigned ret
, nr_bytes
, ele_count
;
254 peer
= bfin_dma_get_peer (me
, dma
);
255 bfin_peer
= hw_data (peer
);
257 if (dma
->x_modify
< 0)
258 /* XXX: This sucks performance wise. */
259 nr_bytes
= dma
->ele_size
;
261 nr_bytes
= min (sizeof (buf
), dma
->curr_x_count
* dma
->ele_size
);
263 /* Pumping a chunk! */
264 bfin_peer
->dma_master
= me
;
265 bfin_peer
->acked
= false;
266 if (dma
->config
& WNR
)
268 HW_TRACE ((me
, "dma transfer to 0x%08lx length %u",
269 (unsigned long) dma
->curr_addr
, nr_bytes
));
271 ret
= hw_dma_read_buffer (peer
, buf
, 0, dma
->curr_addr
, nr_bytes
);
272 /* Has the DMA stalled ? abort for now. */
275 /* XXX: How to handle partial DMA transfers ? */
276 if (ret
% dma
->ele_size
)
278 ret
= sim_write (hw_system (me
), dma
->curr_addr
, buf
, ret
);
282 HW_TRACE ((me
, "dma transfer from 0x%08lx length %u",
283 (unsigned long) dma
->curr_addr
, nr_bytes
));
285 ret
= sim_read (hw_system (me
), dma
->curr_addr
, buf
, nr_bytes
);
288 /* XXX: How to handle partial DMA transfers ? */
289 if (ret
% dma
->ele_size
)
291 ret
= hw_dma_write_buffer (peer
, buf
, 0, dma
->curr_addr
, ret
, 0);
296 /* Ignore partial writes. */
297 ele_count
= ret
/ dma
->ele_size
;
298 dma
->curr_addr
+= ele_count
* dma
->x_modify
;
299 dma
->curr_x_count
-= ele_count
;
301 if ((!dma
->acked
&& dma
->curr_x_count
) || bfin_dma_finish_x (me
, dma
))
302 /* Still got work to do, so schedule again. */
304 bfin_dma_reschedule (me
, ret
? 1 : 5000);
309 /* Don't reschedule on errors ... */
310 dma
->irq_status
|= DMA_ERR
;
314 bfin_dma_io_write_buffer (struct hw
*me
, const void *source
, int space
,
315 address_word addr
, unsigned nr_bytes
)
317 struct bfin_dma
*dma
= hw_data (me
);
324 /* Invalid access mode is higher priority than missing register. */
325 if (!dv_bfin_mmr_require_16_32 (me
, addr
, nr_bytes
, true))
329 value
= dv_load_4 (source
);
331 value
= dv_load_2 (source
);
333 mmr_off
= addr
% dma
->base
;
334 valuep
= (void *)((uintptr_t)dma
+ mmr_base() + mmr_off
);
340 /* XXX: All registers are RO when DMA is enabled (except IRQ_STATUS).
341 But does the HW discard writes or send up IVGHW ? The sim
342 simply discards atm ... */
345 case mmr_offset(next_desc_ptr
):
346 case mmr_offset(start_addr
):
347 case mmr_offset(curr_desc_ptr
):
348 case mmr_offset(curr_addr
):
349 /* Don't require 32bit access as all DMA MMRs can be used as 16bit. */
350 if (!bfin_dma_running (dma
))
358 HW_TRACE ((me
, "discarding write while dma running"));
360 case mmr_offset(x_count
):
361 case mmr_offset(x_modify
):
362 case mmr_offset(y_count
):
363 case mmr_offset(y_modify
):
364 if (!bfin_dma_running (dma
))
367 case mmr_offset(peripheral_map
):
368 if (!bfin_dma_running (dma
))
370 *value16p
= (*value16p
& CTYPE
) | (value
& ~CTYPE
);
371 /* Clear peripheral peer so it gets looked up again. */
375 HW_TRACE ((me
, "discarding write while dma running"));
377 case mmr_offset(config
):
378 /* XXX: How to handle updating CONFIG of a running channel ? */
384 if (bfin_dma_enabled (dma
))
386 dma
->irq_status
|= DMA_RUN
;
387 bfin_dma_process_desc (me
, dma
);
388 /* The writer is the master. */
389 if (!(dma
->peripheral_map
& CTYPE
) || (dma
->config
& WNR
))
390 bfin_dma_reschedule (me
, 1);
394 dma
->irq_status
&= ~DMA_RUN
;
395 bfin_dma_reschedule (me
, 0);
398 case mmr_offset(irq_status
):
399 dv_w1c_2 (value16p
, value
, DMA_DONE
| DMA_ERR
);
401 case mmr_offset(curr_x_count
):
402 case mmr_offset(curr_y_count
):
403 if (!bfin_dma_running (dma
))
406 HW_TRACE ((me
, "discarding write while dma running"));
409 /* XXX: The HW lets the pad regions be read/written ... */
410 dv_bfin_mmr_invalid (me
, addr
, nr_bytes
, true);
418 bfin_dma_io_read_buffer (struct hw
*me
, void *dest
, int space
,
419 address_word addr
, unsigned nr_bytes
)
421 struct bfin_dma
*dma
= hw_data (me
);
427 /* Invalid access mode is higher priority than missing register. */
428 if (!dv_bfin_mmr_require_16_32 (me
, addr
, nr_bytes
, false))
431 mmr_off
= addr
% dma
->base
;
432 valuep
= (void *)((uintptr_t)dma
+ mmr_base() + mmr_off
);
438 /* Hardware lets you read all MMRs as 16 or 32 bits, even reserved. */
440 dv_store_4 (dest
, *value32p
);
442 dv_store_2 (dest
, *value16p
);
448 bfin_dma_dma_read_buffer (struct hw
*me
, void *dest
, int space
,
449 unsigned_word addr
, unsigned nr_bytes
)
451 struct bfin_dma
*dma
= hw_data (me
);
452 unsigned ret
, ele_count
;
454 HW_TRACE_DMA_READ ();
456 /* If someone is trying to read from me, I have to be enabled. */
457 if (!bfin_dma_enabled (dma
) && !bfin_dma_running (dma
))
460 /* XXX: handle x_modify ... */
461 ret
= sim_read (hw_system (me
), dma
->curr_addr
, dest
, nr_bytes
);
462 /* Ignore partial writes. */
463 ele_count
= ret
/ dma
->ele_size
;
464 /* Has the DMA stalled ? abort for now. */
468 dma
->curr_addr
+= ele_count
* dma
->x_modify
;
469 dma
->curr_x_count
-= ele_count
;
471 if (dma
->curr_x_count
== 0)
472 bfin_dma_finish_x (me
, dma
);
478 bfin_dma_dma_write_buffer (struct hw
*me
, const void *source
,
479 int space
, unsigned_word addr
,
481 int violate_read_only_section
)
483 struct bfin_dma
*dma
= hw_data (me
);
484 unsigned ret
, ele_count
;
486 HW_TRACE_DMA_WRITE ();
488 /* If someone is trying to write to me, I have to be enabled. */
489 if (!bfin_dma_enabled (dma
) && !bfin_dma_running (dma
))
492 /* XXX: handle x_modify ... */
493 ret
= sim_write (hw_system (me
), dma
->curr_addr
, source
, nr_bytes
);
494 /* Ignore partial writes. */
495 ele_count
= ret
/ dma
->ele_size
;
496 /* Has the DMA stalled ? abort for now. */
500 dma
->curr_addr
+= ele_count
* dma
->x_modify
;
501 dma
->curr_x_count
-= ele_count
;
503 if (dma
->curr_x_count
== 0)
504 bfin_dma_finish_x (me
, dma
);
509 static const struct hw_port_descriptor bfin_dma_ports
[] =
511 { "di", 0, 0, output_port
, }, /* DMA Interrupt */
516 attach_bfin_dma_regs (struct hw
*me
, struct bfin_dma
*dma
)
518 address_word attach_address
;
520 unsigned attach_size
;
521 reg_property_spec reg
;
523 if (hw_find_property (me
, "reg") == NULL
)
524 hw_abort (me
, "Missing \"reg\" property");
526 if (!hw_find_reg_array_property (me
, "reg", 0, ®
))
527 hw_abort (me
, "\"reg\" property must contain three addr/size entries");
529 hw_unit_address_to_attach_address (hw_parent (me
),
531 &attach_space
, &attach_address
, me
);
532 hw_unit_size_to_attach_size (hw_parent (me
), ®
.size
, &attach_size
, me
);
534 if (attach_size
!= BFIN_MMR_DMA_SIZE
)
535 hw_abort (me
, "\"reg\" size must be %#x", BFIN_MMR_DMA_SIZE
);
537 hw_attach_address (hw_parent (me
),
538 0, attach_space
, attach_address
, attach_size
, me
);
540 dma
->base
= attach_address
;
544 bfin_dma_finish (struct hw
*me
)
546 struct bfin_dma
*dma
;
548 dma
= HW_ZALLOC (me
, struct bfin_dma
);
550 set_hw_data (me
, dma
);
551 set_hw_io_read_buffer (me
, bfin_dma_io_read_buffer
);
552 set_hw_io_write_buffer (me
, bfin_dma_io_write_buffer
);
553 set_hw_dma_read_buffer (me
, bfin_dma_dma_read_buffer
);
554 set_hw_dma_write_buffer (me
, bfin_dma_dma_write_buffer
);
555 set_hw_ports (me
, bfin_dma_ports
);
557 attach_bfin_dma_regs (me
, dma
);
559 /* Initialize the DMA Channel. */
560 dma
->peripheral_map
= bfin_dmac_default_pmap (me
);
563 const struct hw_descriptor dv_bfin_dma_descriptor
[] =
565 {"bfin_dma", bfin_dma_finish
,},