1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (C) 2017 Broadcom
5 * Broadcom SBA RAID Driver
7 * The Broadcom stream buffer accelerator (SBA) provides offloading
8 * capabilities for RAID operations. The SBA offload engine is accessible
9 * via Broadcom SoC specific ring manager. Two or more offload engines
10 * can share same Broadcom SoC specific ring manager due to this Broadcom
11 * SoC specific ring manager driver is implemented as a mailbox controller
12 * driver and offload engine drivers are implemented as mallbox clients.
14 * Typically, Broadcom SoC specific ring manager will implement larger
15 * number of hardware rings over one or more SBA hardware devices. By
16 * design, the internal buffer size of SBA hardware device is limited
17 * but all offload operations supported by SBA can be broken down into
18 * multiple small size requests and executed parallelly on multiple SBA
19 * hardware devices for achieving high through-put.
21 * The Broadcom SBA RAID driver does not require any register programming
22 * except submitting request to SBA hardware device via mailbox channels.
23 * This driver implements a DMA device with one DMA channel using a single
24 * mailbox channel provided by Broadcom SoC specific ring manager driver.
25 * For having more SBA DMA channels, we can create more SBA device nodes
26 * in Broadcom SoC specific DTS based on number of hardware rings supported
27 * by Broadcom SoC ring manager.
30 #include <linux/bitops.h>
31 #include <linux/debugfs.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/dmaengine.h>
34 #include <linux/list.h>
35 #include <linux/mailbox_client.h>
36 #include <linux/mailbox/brcm-message.h>
37 #include <linux/module.h>
39 #include <linux/of_platform.h>
40 #include <linux/platform_device.h>
41 #include <linux/slab.h>
42 #include <linux/raid/pq.h>
44 #include "dmaengine.h"
46 /* ====== Driver macros and defines ===== */
48 #define SBA_TYPE_SHIFT 48
49 #define SBA_TYPE_MASK GENMASK(1, 0)
50 #define SBA_TYPE_A 0x0
51 #define SBA_TYPE_B 0x2
52 #define SBA_TYPE_C 0x3
53 #define SBA_USER_DEF_SHIFT 32
54 #define SBA_USER_DEF_MASK GENMASK(15, 0)
55 #define SBA_R_MDATA_SHIFT 24
56 #define SBA_R_MDATA_MASK GENMASK(7, 0)
57 #define SBA_C_MDATA_MS_SHIFT 18
58 #define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
59 #define SBA_INT_SHIFT 17
60 #define SBA_INT_MASK BIT(0)
61 #define SBA_RESP_SHIFT 16
62 #define SBA_RESP_MASK BIT(0)
63 #define SBA_C_MDATA_SHIFT 8
64 #define SBA_C_MDATA_MASK GENMASK(7, 0)
65 #define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
66 #define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
67 #define SBA_C_MDATA_DNUM_SHIFT 5
68 #define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
69 #define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
70 #define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
71 #define SBA_CMD_SHIFT 0
72 #define SBA_CMD_MASK GENMASK(3, 0)
73 #define SBA_CMD_ZERO_BUFFER 0x4
74 #define SBA_CMD_ZERO_ALL_BUFFERS 0x8
75 #define SBA_CMD_LOAD_BUFFER 0x9
76 #define SBA_CMD_XOR 0xa
77 #define SBA_CMD_GALOIS_XOR 0xb
78 #define SBA_CMD_WRITE_BUFFER 0xc
79 #define SBA_CMD_GALOIS 0xe
81 #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
82 #define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
84 /* Driver helper macros */
85 #define to_sba_request(tx) \
86 container_of(tx, struct sba_request, tx)
87 #define to_sba_device(dchan) \
88 container_of(dchan, struct sba_device, dma_chan)
90 /* ===== Driver data structures ===== */
92 enum sba_request_flags
{
93 SBA_REQUEST_STATE_FREE
= 0x001,
94 SBA_REQUEST_STATE_ALLOCED
= 0x002,
95 SBA_REQUEST_STATE_PENDING
= 0x004,
96 SBA_REQUEST_STATE_ACTIVE
= 0x008,
97 SBA_REQUEST_STATE_ABORTED
= 0x010,
98 SBA_REQUEST_STATE_MASK
= 0x0ff,
99 SBA_REQUEST_FENCE
= 0x100,
104 struct list_head node
;
105 struct sba_device
*sba
;
107 /* Chained requests management */
108 struct sba_request
*first
;
109 struct list_head next
;
110 atomic_t next_pending_count
;
111 /* BRCM message data */
112 struct brcm_message msg
;
113 struct dma_async_tx_descriptor tx
;
115 struct brcm_sba_command cmds
[];
124 /* Underlying device */
126 /* DT configuration parameters */
127 enum sba_version ver
;
128 /* Derived configuration parameters */
136 u32 max_resp_pool_size
;
137 u32 max_cmds_pool_size
;
138 /* Mailbox client and Mailbox channels */
139 struct mbox_client client
;
140 struct mbox_chan
*mchan
;
141 struct device
*mbox_dev
;
142 /* DMA device and DMA channel */
143 struct dma_device dma_dev
;
144 struct dma_chan dma_chan
;
145 /* DMA channel resources */
147 dma_addr_t resp_dma_base
;
149 dma_addr_t cmds_dma_base
;
150 spinlock_t reqs_lock
;
152 struct list_head reqs_alloc_list
;
153 struct list_head reqs_pending_list
;
154 struct list_head reqs_active_list
;
155 struct list_head reqs_aborted_list
;
156 struct list_head reqs_free_list
;
157 /* DebugFS directory entries */
161 /* ====== Command helper routines ===== */
163 static inline u64 __pure
sba_cmd_enc(u64 cmd
, u32 val
, u32 shift
, u32 mask
)
165 cmd
&= ~((u64
)mask
<< shift
);
166 cmd
|= ((u64
)(val
& mask
) << shift
);
170 static inline u32 __pure
sba_cmd_load_c_mdata(u32 b0
)
172 return b0
& SBA_C_MDATA_BNUMx_MASK
;
175 static inline u32 __pure
sba_cmd_write_c_mdata(u32 b0
)
177 return b0
& SBA_C_MDATA_BNUMx_MASK
;
180 static inline u32 __pure
sba_cmd_xor_c_mdata(u32 b1
, u32 b0
)
182 return (b0
& SBA_C_MDATA_BNUMx_MASK
) |
183 ((b1
& SBA_C_MDATA_BNUMx_MASK
) << SBA_C_MDATA_BNUMx_SHIFT(1));
186 static inline u32 __pure
sba_cmd_pq_c_mdata(u32 d
, u32 b1
, u32 b0
)
188 return (b0
& SBA_C_MDATA_BNUMx_MASK
) |
189 ((b1
& SBA_C_MDATA_BNUMx_MASK
) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
190 ((d
& SBA_C_MDATA_DNUM_MASK
) << SBA_C_MDATA_DNUM_SHIFT
);
193 /* ====== General helper routines ===== */
195 static struct sba_request
*sba_alloc_request(struct sba_device
*sba
)
199 struct sba_request
*req
= NULL
;
201 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
202 list_for_each_entry(req
, &sba
->reqs_free_list
, node
) {
203 if (async_tx_test_ack(&req
->tx
)) {
204 list_move_tail(&req
->node
, &sba
->reqs_alloc_list
);
209 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
213 * We have no more free requests so, we peek
214 * mailbox channels hoping few active requests
215 * would have completed which will create more
216 * room for new requests.
218 mbox_client_peek_data(sba
->mchan
);
222 req
->flags
= SBA_REQUEST_STATE_ALLOCED
;
224 INIT_LIST_HEAD(&req
->next
);
225 atomic_set(&req
->next_pending_count
, 1);
227 dma_async_tx_descriptor_init(&req
->tx
, &sba
->dma_chan
);
228 async_tx_ack(&req
->tx
);
233 /* Note: Must be called with sba->reqs_lock held */
234 static void _sba_pending_request(struct sba_device
*sba
,
235 struct sba_request
*req
)
237 lockdep_assert_held(&sba
->reqs_lock
);
238 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
239 req
->flags
|= SBA_REQUEST_STATE_PENDING
;
240 list_move_tail(&req
->node
, &sba
->reqs_pending_list
);
241 if (list_empty(&sba
->reqs_active_list
))
242 sba
->reqs_fence
= false;
245 /* Note: Must be called with sba->reqs_lock held */
246 static bool _sba_active_request(struct sba_device
*sba
,
247 struct sba_request
*req
)
249 lockdep_assert_held(&sba
->reqs_lock
);
250 if (list_empty(&sba
->reqs_active_list
))
251 sba
->reqs_fence
= false;
254 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
255 req
->flags
|= SBA_REQUEST_STATE_ACTIVE
;
256 list_move_tail(&req
->node
, &sba
->reqs_active_list
);
257 if (req
->flags
& SBA_REQUEST_FENCE
)
258 sba
->reqs_fence
= true;
262 /* Note: Must be called with sba->reqs_lock held */
263 static void _sba_abort_request(struct sba_device
*sba
,
264 struct sba_request
*req
)
266 lockdep_assert_held(&sba
->reqs_lock
);
267 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
268 req
->flags
|= SBA_REQUEST_STATE_ABORTED
;
269 list_move_tail(&req
->node
, &sba
->reqs_aborted_list
);
270 if (list_empty(&sba
->reqs_active_list
))
271 sba
->reqs_fence
= false;
274 /* Note: Must be called with sba->reqs_lock held */
275 static void _sba_free_request(struct sba_device
*sba
,
276 struct sba_request
*req
)
278 lockdep_assert_held(&sba
->reqs_lock
);
279 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
280 req
->flags
|= SBA_REQUEST_STATE_FREE
;
281 list_move_tail(&req
->node
, &sba
->reqs_free_list
);
282 if (list_empty(&sba
->reqs_active_list
))
283 sba
->reqs_fence
= false;
286 static void sba_free_chained_requests(struct sba_request
*req
)
289 struct sba_request
*nreq
;
290 struct sba_device
*sba
= req
->sba
;
292 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
294 _sba_free_request(sba
, req
);
295 list_for_each_entry(nreq
, &req
->next
, next
)
296 _sba_free_request(sba
, nreq
);
298 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
301 static void sba_chain_request(struct sba_request
*first
,
302 struct sba_request
*req
)
305 struct sba_device
*sba
= req
->sba
;
307 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
309 list_add_tail(&req
->next
, &first
->next
);
311 atomic_inc(&first
->next_pending_count
);
313 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
316 static void sba_cleanup_nonpending_requests(struct sba_device
*sba
)
319 struct sba_request
*req
, *req1
;
321 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
323 /* Freeup all alloced request */
324 list_for_each_entry_safe(req
, req1
, &sba
->reqs_alloc_list
, node
)
325 _sba_free_request(sba
, req
);
327 /* Set all active requests as aborted */
328 list_for_each_entry_safe(req
, req1
, &sba
->reqs_active_list
, node
)
329 _sba_abort_request(sba
, req
);
332 * Note: We expect that aborted request will be eventually
333 * freed by sba_receive_message()
336 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
339 static void sba_cleanup_pending_requests(struct sba_device
*sba
)
342 struct sba_request
*req
, *req1
;
344 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
346 /* Freeup all pending request */
347 list_for_each_entry_safe(req
, req1
, &sba
->reqs_pending_list
, node
)
348 _sba_free_request(sba
, req
);
350 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
353 static int sba_send_mbox_request(struct sba_device
*sba
,
354 struct sba_request
*req
)
358 /* Send message for the request */
360 ret
= mbox_send_message(sba
->mchan
, &req
->msg
);
362 dev_err(sba
->dev
, "send message failed with error %d", ret
);
366 /* Check error returned by mailbox controller */
367 ret
= req
->msg
.error
;
369 dev_err(sba
->dev
, "message error %d", ret
);
372 /* Signal txdone for mailbox channel */
373 mbox_client_txdone(sba
->mchan
, ret
);
378 /* Note: Must be called with sba->reqs_lock held */
379 static void _sba_process_pending_requests(struct sba_device
*sba
)
383 struct sba_request
*req
;
385 /* Process few pending requests */
386 count
= SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL
;
387 while (!list_empty(&sba
->reqs_pending_list
) && count
) {
388 /* Get the first pending request */
389 req
= list_first_entry(&sba
->reqs_pending_list
,
390 struct sba_request
, node
);
392 /* Try to make request active */
393 if (!_sba_active_request(sba
, req
))
396 /* Send request to mailbox channel */
397 ret
= sba_send_mbox_request(sba
, req
);
399 _sba_pending_request(sba
, req
);
407 static void sba_process_received_request(struct sba_device
*sba
,
408 struct sba_request
*req
)
411 struct dma_async_tx_descriptor
*tx
;
412 struct sba_request
*nreq
, *first
= req
->first
;
414 /* Process only after all chained requests are received */
415 if (!atomic_dec_return(&first
->next_pending_count
)) {
418 WARN_ON(tx
->cookie
< 0);
419 if (tx
->cookie
> 0) {
420 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
421 dma_cookie_complete(tx
);
422 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
423 dmaengine_desc_get_callback_invoke(tx
, NULL
);
424 dma_descriptor_unmap(tx
);
426 tx
->callback_result
= NULL
;
429 dma_run_dependencies(tx
);
431 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
433 /* Free all requests chained to first request */
434 list_for_each_entry(nreq
, &first
->next
, next
)
435 _sba_free_request(sba
, nreq
);
436 INIT_LIST_HEAD(&first
->next
);
438 /* Free the first request */
439 _sba_free_request(sba
, first
);
441 /* Process pending requests */
442 _sba_process_pending_requests(sba
);
444 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
448 static void sba_write_stats_in_seqfile(struct sba_device
*sba
,
449 struct seq_file
*file
)
452 struct sba_request
*req
;
453 u32 free_count
= 0, alloced_count
= 0;
454 u32 pending_count
= 0, active_count
= 0, aborted_count
= 0;
456 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
458 list_for_each_entry(req
, &sba
->reqs_free_list
, node
)
459 if (async_tx_test_ack(&req
->tx
))
462 list_for_each_entry(req
, &sba
->reqs_alloc_list
, node
)
465 list_for_each_entry(req
, &sba
->reqs_pending_list
, node
)
468 list_for_each_entry(req
, &sba
->reqs_active_list
, node
)
471 list_for_each_entry(req
, &sba
->reqs_aborted_list
, node
)
474 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
476 seq_printf(file
, "maximum requests = %d\n", sba
->max_req
);
477 seq_printf(file
, "free requests = %d\n", free_count
);
478 seq_printf(file
, "alloced requests = %d\n", alloced_count
);
479 seq_printf(file
, "pending requests = %d\n", pending_count
);
480 seq_printf(file
, "active requests = %d\n", active_count
);
481 seq_printf(file
, "aborted requests = %d\n", aborted_count
);
484 /* ====== DMAENGINE callbacks ===== */
486 static void sba_free_chan_resources(struct dma_chan
*dchan
)
489 * Channel resources are pre-alloced so we just free-up
490 * whatever we can so that we can re-use pre-alloced
491 * channel resources next time.
493 sba_cleanup_nonpending_requests(to_sba_device(dchan
));
496 static int sba_device_terminate_all(struct dma_chan
*dchan
)
498 /* Cleanup all pending requests */
499 sba_cleanup_pending_requests(to_sba_device(dchan
));
504 static void sba_issue_pending(struct dma_chan
*dchan
)
507 struct sba_device
*sba
= to_sba_device(dchan
);
509 /* Process pending requests */
510 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
511 _sba_process_pending_requests(sba
);
512 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
515 static dma_cookie_t
sba_tx_submit(struct dma_async_tx_descriptor
*tx
)
519 struct sba_device
*sba
;
520 struct sba_request
*req
, *nreq
;
525 sba
= to_sba_device(tx
->chan
);
526 req
= to_sba_request(tx
);
528 /* Assign cookie and mark all chained requests pending */
529 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
530 cookie
= dma_cookie_assign(tx
);
531 _sba_pending_request(sba
, req
);
532 list_for_each_entry(nreq
, &req
->next
, next
)
533 _sba_pending_request(sba
, nreq
);
534 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
539 static enum dma_status
sba_tx_status(struct dma_chan
*dchan
,
541 struct dma_tx_state
*txstate
)
544 struct sba_device
*sba
= to_sba_device(dchan
);
546 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
547 if (ret
== DMA_COMPLETE
)
550 mbox_client_peek_data(sba
->mchan
);
552 return dma_cookie_status(dchan
, cookie
, txstate
);
555 static void sba_fillup_interrupt_msg(struct sba_request
*req
,
556 struct brcm_sba_command
*cmds
,
557 struct brcm_message
*msg
)
561 dma_addr_t resp_dma
= req
->tx
.phys
;
562 struct brcm_sba_command
*cmdsp
= cmds
;
564 /* Type-B command to load dummy data into buf0 */
565 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
566 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
567 cmd
= sba_cmd_enc(cmd
, req
->sba
->hw_resp_size
,
568 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
569 c_mdata
= sba_cmd_load_c_mdata(0);
570 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
571 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
572 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
573 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
575 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
576 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
577 cmdsp
->data
= resp_dma
;
578 cmdsp
->data_len
= req
->sba
->hw_resp_size
;
581 /* Type-A command to write buf0 to dummy location */
582 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
583 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
584 cmd
= sba_cmd_enc(cmd
, req
->sba
->hw_resp_size
,
585 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
586 cmd
= sba_cmd_enc(cmd
, 0x1,
587 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
588 c_mdata
= sba_cmd_write_c_mdata(0);
589 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
590 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
591 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
592 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
594 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
595 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
596 if (req
->sba
->hw_resp_size
) {
597 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
598 cmdsp
->resp
= resp_dma
;
599 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
601 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
602 cmdsp
->data
= resp_dma
;
603 cmdsp
->data_len
= req
->sba
->hw_resp_size
;
606 /* Fillup brcm_message */
607 msg
->type
= BRCM_MESSAGE_SBA
;
608 msg
->sba
.cmds
= cmds
;
609 msg
->sba
.cmds_count
= cmdsp
- cmds
;
614 static struct dma_async_tx_descriptor
*
615 sba_prep_dma_interrupt(struct dma_chan
*dchan
, unsigned long flags
)
617 struct sba_request
*req
= NULL
;
618 struct sba_device
*sba
= to_sba_device(dchan
);
620 /* Alloc new request */
621 req
= sba_alloc_request(sba
);
626 * Force fence so that no requests are submitted
627 * until DMA callback for this request is invoked.
629 req
->flags
|= SBA_REQUEST_FENCE
;
631 /* Fillup request message */
632 sba_fillup_interrupt_msg(req
, req
->cmds
, &req
->msg
);
634 /* Init async_tx descriptor */
635 req
->tx
.flags
= flags
;
636 req
->tx
.cookie
= -EBUSY
;
641 static void sba_fillup_memcpy_msg(struct sba_request
*req
,
642 struct brcm_sba_command
*cmds
,
643 struct brcm_message
*msg
,
644 dma_addr_t msg_offset
, size_t msg_len
,
645 dma_addr_t dst
, dma_addr_t src
)
649 dma_addr_t resp_dma
= req
->tx
.phys
;
650 struct brcm_sba_command
*cmdsp
= cmds
;
652 /* Type-B command to load data into buf0 */
653 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
654 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
655 cmd
= sba_cmd_enc(cmd
, msg_len
,
656 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
657 c_mdata
= sba_cmd_load_c_mdata(0);
658 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
659 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
660 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
661 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
663 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
664 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
665 cmdsp
->data
= src
+ msg_offset
;
666 cmdsp
->data_len
= msg_len
;
669 /* Type-A command to write buf0 */
670 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
671 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
672 cmd
= sba_cmd_enc(cmd
, msg_len
,
673 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
674 cmd
= sba_cmd_enc(cmd
, 0x1,
675 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
676 c_mdata
= sba_cmd_write_c_mdata(0);
677 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
678 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
679 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
680 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
682 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
683 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
684 if (req
->sba
->hw_resp_size
) {
685 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
686 cmdsp
->resp
= resp_dma
;
687 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
689 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
690 cmdsp
->data
= dst
+ msg_offset
;
691 cmdsp
->data_len
= msg_len
;
694 /* Fillup brcm_message */
695 msg
->type
= BRCM_MESSAGE_SBA
;
696 msg
->sba
.cmds
= cmds
;
697 msg
->sba
.cmds_count
= cmdsp
- cmds
;
702 static struct sba_request
*
703 sba_prep_dma_memcpy_req(struct sba_device
*sba
,
704 dma_addr_t off
, dma_addr_t dst
, dma_addr_t src
,
705 size_t len
, unsigned long flags
)
707 struct sba_request
*req
= NULL
;
709 /* Alloc new request */
710 req
= sba_alloc_request(sba
);
713 if (flags
& DMA_PREP_FENCE
)
714 req
->flags
|= SBA_REQUEST_FENCE
;
716 /* Fillup request message */
717 sba_fillup_memcpy_msg(req
, req
->cmds
, &req
->msg
,
720 /* Init async_tx descriptor */
721 req
->tx
.flags
= flags
;
722 req
->tx
.cookie
= -EBUSY
;
727 static struct dma_async_tx_descriptor
*
728 sba_prep_dma_memcpy(struct dma_chan
*dchan
, dma_addr_t dst
, dma_addr_t src
,
729 size_t len
, unsigned long flags
)
733 struct sba_device
*sba
= to_sba_device(dchan
);
734 struct sba_request
*first
= NULL
, *req
;
736 /* Create chained requests where each request is upto hw_buf_size */
738 req_len
= (len
< sba
->hw_buf_size
) ? len
: sba
->hw_buf_size
;
740 req
= sba_prep_dma_memcpy_req(sba
, off
, dst
, src
,
744 sba_free_chained_requests(first
);
749 sba_chain_request(first
, req
);
757 return (first
) ? &first
->tx
: NULL
;
760 static void sba_fillup_xor_msg(struct sba_request
*req
,
761 struct brcm_sba_command
*cmds
,
762 struct brcm_message
*msg
,
763 dma_addr_t msg_offset
, size_t msg_len
,
764 dma_addr_t dst
, dma_addr_t
*src
, u32 src_cnt
)
769 dma_addr_t resp_dma
= req
->tx
.phys
;
770 struct brcm_sba_command
*cmdsp
= cmds
;
772 /* Type-B command to load data into buf0 */
773 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
774 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
775 cmd
= sba_cmd_enc(cmd
, msg_len
,
776 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
777 c_mdata
= sba_cmd_load_c_mdata(0);
778 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
779 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
780 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
781 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
783 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
784 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
785 cmdsp
->data
= src
[0] + msg_offset
;
786 cmdsp
->data_len
= msg_len
;
789 /* Type-B commands to xor data with buf0 and put it back in buf0 */
790 for (i
= 1; i
< src_cnt
; i
++) {
791 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
792 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
793 cmd
= sba_cmd_enc(cmd
, msg_len
,
794 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
795 c_mdata
= sba_cmd_xor_c_mdata(0, 0);
796 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
797 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
798 cmd
= sba_cmd_enc(cmd
, SBA_CMD_XOR
,
799 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
801 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
802 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
803 cmdsp
->data
= src
[i
] + msg_offset
;
804 cmdsp
->data_len
= msg_len
;
808 /* Type-A command to write buf0 */
809 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
810 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
811 cmd
= sba_cmd_enc(cmd
, msg_len
,
812 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
813 cmd
= sba_cmd_enc(cmd
, 0x1,
814 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
815 c_mdata
= sba_cmd_write_c_mdata(0);
816 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
817 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
818 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
819 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
821 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
822 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
823 if (req
->sba
->hw_resp_size
) {
824 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
825 cmdsp
->resp
= resp_dma
;
826 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
828 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
829 cmdsp
->data
= dst
+ msg_offset
;
830 cmdsp
->data_len
= msg_len
;
833 /* Fillup brcm_message */
834 msg
->type
= BRCM_MESSAGE_SBA
;
835 msg
->sba
.cmds
= cmds
;
836 msg
->sba
.cmds_count
= cmdsp
- cmds
;
841 static struct sba_request
*
842 sba_prep_dma_xor_req(struct sba_device
*sba
,
843 dma_addr_t off
, dma_addr_t dst
, dma_addr_t
*src
,
844 u32 src_cnt
, size_t len
, unsigned long flags
)
846 struct sba_request
*req
= NULL
;
848 /* Alloc new request */
849 req
= sba_alloc_request(sba
);
852 if (flags
& DMA_PREP_FENCE
)
853 req
->flags
|= SBA_REQUEST_FENCE
;
855 /* Fillup request message */
856 sba_fillup_xor_msg(req
, req
->cmds
, &req
->msg
,
857 off
, len
, dst
, src
, src_cnt
);
859 /* Init async_tx descriptor */
860 req
->tx
.flags
= flags
;
861 req
->tx
.cookie
= -EBUSY
;
866 static struct dma_async_tx_descriptor
*
867 sba_prep_dma_xor(struct dma_chan
*dchan
, dma_addr_t dst
, dma_addr_t
*src
,
868 u32 src_cnt
, size_t len
, unsigned long flags
)
872 struct sba_device
*sba
= to_sba_device(dchan
);
873 struct sba_request
*first
= NULL
, *req
;
876 if (unlikely(src_cnt
> sba
->max_xor_srcs
))
879 /* Create chained requests where each request is upto hw_buf_size */
881 req_len
= (len
< sba
->hw_buf_size
) ? len
: sba
->hw_buf_size
;
883 req
= sba_prep_dma_xor_req(sba
, off
, dst
, src
, src_cnt
,
887 sba_free_chained_requests(first
);
892 sba_chain_request(first
, req
);
900 return (first
) ? &first
->tx
: NULL
;
903 static void sba_fillup_pq_msg(struct sba_request
*req
,
905 struct brcm_sba_command
*cmds
,
906 struct brcm_message
*msg
,
907 dma_addr_t msg_offset
, size_t msg_len
,
908 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
,
909 const u8
*scf
, dma_addr_t
*src
, u32 src_cnt
)
914 dma_addr_t resp_dma
= req
->tx
.phys
;
915 struct brcm_sba_command
*cmdsp
= cmds
;
918 /* Type-B command to load old P into buf0 */
920 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
921 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
922 cmd
= sba_cmd_enc(cmd
, msg_len
,
923 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
924 c_mdata
= sba_cmd_load_c_mdata(0);
925 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
926 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
927 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
928 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
930 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
931 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
932 cmdsp
->data
= *dst_p
+ msg_offset
;
933 cmdsp
->data_len
= msg_len
;
937 /* Type-B command to load old Q into buf1 */
939 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
940 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
941 cmd
= sba_cmd_enc(cmd
, msg_len
,
942 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
943 c_mdata
= sba_cmd_load_c_mdata(1);
944 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
945 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
946 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
947 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
949 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
950 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
951 cmdsp
->data
= *dst_q
+ msg_offset
;
952 cmdsp
->data_len
= msg_len
;
956 /* Type-A command to zero all buffers */
957 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
958 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
959 cmd
= sba_cmd_enc(cmd
, msg_len
,
960 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
961 cmd
= sba_cmd_enc(cmd
, SBA_CMD_ZERO_ALL_BUFFERS
,
962 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
964 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
965 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
969 /* Type-B commands for generate P onto buf0 and Q onto buf1 */
970 for (i
= 0; i
< src_cnt
; i
++) {
971 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
972 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
973 cmd
= sba_cmd_enc(cmd
, msg_len
,
974 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
975 c_mdata
= sba_cmd_pq_c_mdata(raid6_gflog
[scf
[i
]], 1, 0);
976 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
977 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
978 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_MS(c_mdata
),
979 SBA_C_MDATA_MS_SHIFT
, SBA_C_MDATA_MS_MASK
);
980 cmd
= sba_cmd_enc(cmd
, SBA_CMD_GALOIS_XOR
,
981 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
983 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
984 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
985 cmdsp
->data
= src
[i
] + msg_offset
;
986 cmdsp
->data_len
= msg_len
;
990 /* Type-A command to write buf0 */
992 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
993 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
994 cmd
= sba_cmd_enc(cmd
, msg_len
,
995 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
996 cmd
= sba_cmd_enc(cmd
, 0x1,
997 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
998 c_mdata
= sba_cmd_write_c_mdata(0);
999 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1000 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1001 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1002 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1004 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1005 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1006 if (req
->sba
->hw_resp_size
) {
1007 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1008 cmdsp
->resp
= resp_dma
;
1009 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1011 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1012 cmdsp
->data
= *dst_p
+ msg_offset
;
1013 cmdsp
->data_len
= msg_len
;
1017 /* Type-A command to write buf1 */
1019 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1020 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1021 cmd
= sba_cmd_enc(cmd
, msg_len
,
1022 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1023 cmd
= sba_cmd_enc(cmd
, 0x1,
1024 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1025 c_mdata
= sba_cmd_write_c_mdata(1);
1026 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1027 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1028 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1029 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1031 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1032 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1033 if (req
->sba
->hw_resp_size
) {
1034 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1035 cmdsp
->resp
= resp_dma
;
1036 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1038 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1039 cmdsp
->data
= *dst_q
+ msg_offset
;
1040 cmdsp
->data_len
= msg_len
;
1044 /* Fillup brcm_message */
1045 msg
->type
= BRCM_MESSAGE_SBA
;
1046 msg
->sba
.cmds
= cmds
;
1047 msg
->sba
.cmds_count
= cmdsp
- cmds
;
1052 static struct sba_request
*
1053 sba_prep_dma_pq_req(struct sba_device
*sba
, dma_addr_t off
,
1054 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
, dma_addr_t
*src
,
1055 u32 src_cnt
, const u8
*scf
, size_t len
, unsigned long flags
)
1057 struct sba_request
*req
= NULL
;
1059 /* Alloc new request */
1060 req
= sba_alloc_request(sba
);
1063 if (flags
& DMA_PREP_FENCE
)
1064 req
->flags
|= SBA_REQUEST_FENCE
;
1066 /* Fillup request messages */
1067 sba_fillup_pq_msg(req
, dmaf_continue(flags
),
1068 req
->cmds
, &req
->msg
,
1069 off
, len
, dst_p
, dst_q
, scf
, src
, src_cnt
);
1071 /* Init async_tx descriptor */
1072 req
->tx
.flags
= flags
;
1073 req
->tx
.cookie
= -EBUSY
;
1078 static void sba_fillup_pq_single_msg(struct sba_request
*req
,
1080 struct brcm_sba_command
*cmds
,
1081 struct brcm_message
*msg
,
1082 dma_addr_t msg_offset
, size_t msg_len
,
1083 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
,
1084 dma_addr_t src
, u8 scf
)
1088 u8 pos
, dpos
= raid6_gflog
[scf
];
1089 dma_addr_t resp_dma
= req
->tx
.phys
;
1090 struct brcm_sba_command
*cmdsp
= cmds
;
1096 /* Type-B command to load old P into buf0 */
1097 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1098 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1099 cmd
= sba_cmd_enc(cmd
, msg_len
,
1100 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1101 c_mdata
= sba_cmd_load_c_mdata(0);
1102 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1103 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1104 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
1105 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1107 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1108 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1109 cmdsp
->data
= *dst_p
+ msg_offset
;
1110 cmdsp
->data_len
= msg_len
;
1114 * Type-B commands to xor data with buf0 and put it
1117 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1118 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1119 cmd
= sba_cmd_enc(cmd
, msg_len
,
1120 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1121 c_mdata
= sba_cmd_xor_c_mdata(0, 0);
1122 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1123 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1124 cmd
= sba_cmd_enc(cmd
, SBA_CMD_XOR
,
1125 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1127 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1128 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1129 cmdsp
->data
= src
+ msg_offset
;
1130 cmdsp
->data_len
= msg_len
;
1133 /* Type-B command to load old P into buf0 */
1134 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1135 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1136 cmd
= sba_cmd_enc(cmd
, msg_len
,
1137 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1138 c_mdata
= sba_cmd_load_c_mdata(0);
1139 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1140 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1141 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
1142 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1144 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1145 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1146 cmdsp
->data
= src
+ msg_offset
;
1147 cmdsp
->data_len
= msg_len
;
1151 /* Type-A command to write buf0 */
1152 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1153 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1154 cmd
= sba_cmd_enc(cmd
, msg_len
,
1155 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1156 cmd
= sba_cmd_enc(cmd
, 0x1,
1157 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1158 c_mdata
= sba_cmd_write_c_mdata(0);
1159 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1160 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1161 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1162 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1164 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1165 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1166 if (req
->sba
->hw_resp_size
) {
1167 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1168 cmdsp
->resp
= resp_dma
;
1169 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1171 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1172 cmdsp
->data
= *dst_p
+ msg_offset
;
1173 cmdsp
->data_len
= msg_len
;
1180 /* Type-A command to zero all buffers */
1181 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1182 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1183 cmd
= sba_cmd_enc(cmd
, msg_len
,
1184 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1185 cmd
= sba_cmd_enc(cmd
, SBA_CMD_ZERO_ALL_BUFFERS
,
1186 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1188 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1189 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1193 goto skip_q_computation
;
1194 pos
= (dpos
< req
->sba
->max_pq_coefs
) ?
1195 dpos
: (req
->sba
->max_pq_coefs
- 1);
1198 * Type-B command to generate initial Q from data
1199 * and store output into buf0
1201 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1202 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1203 cmd
= sba_cmd_enc(cmd
, msg_len
,
1204 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1205 c_mdata
= sba_cmd_pq_c_mdata(pos
, 0, 0);
1206 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1207 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1208 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_MS(c_mdata
),
1209 SBA_C_MDATA_MS_SHIFT
, SBA_C_MDATA_MS_MASK
);
1210 cmd
= sba_cmd_enc(cmd
, SBA_CMD_GALOIS
,
1211 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1213 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1214 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1215 cmdsp
->data
= src
+ msg_offset
;
1216 cmdsp
->data_len
= msg_len
;
1221 /* Multiple Type-A command to generate final Q */
1223 pos
= (dpos
< req
->sba
->max_pq_coefs
) ?
1224 dpos
: (req
->sba
->max_pq_coefs
- 1);
1227 * Type-A command to generate Q with buf0 and
1228 * buf1 store result in buf0
1230 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1231 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1232 cmd
= sba_cmd_enc(cmd
, msg_len
,
1233 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1234 c_mdata
= sba_cmd_pq_c_mdata(pos
, 0, 1);
1235 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1236 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1237 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_MS(c_mdata
),
1238 SBA_C_MDATA_MS_SHIFT
, SBA_C_MDATA_MS_MASK
);
1239 cmd
= sba_cmd_enc(cmd
, SBA_CMD_GALOIS
,
1240 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1242 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1243 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1252 * Type-B command to XOR previous output with
1253 * buf0 and write it into buf0
1255 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1256 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1257 cmd
= sba_cmd_enc(cmd
, msg_len
,
1258 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1259 c_mdata
= sba_cmd_xor_c_mdata(0, 0);
1260 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1261 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1262 cmd
= sba_cmd_enc(cmd
, SBA_CMD_XOR
,
1263 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1265 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1266 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1267 cmdsp
->data
= *dst_q
+ msg_offset
;
1268 cmdsp
->data_len
= msg_len
;
1272 /* Type-A command to write buf0 */
1273 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1274 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1275 cmd
= sba_cmd_enc(cmd
, msg_len
,
1276 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1277 cmd
= sba_cmd_enc(cmd
, 0x1,
1278 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1279 c_mdata
= sba_cmd_write_c_mdata(0);
1280 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1281 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1282 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1283 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1285 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1286 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1287 if (req
->sba
->hw_resp_size
) {
1288 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1289 cmdsp
->resp
= resp_dma
;
1290 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1292 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1293 cmdsp
->data
= *dst_q
+ msg_offset
;
1294 cmdsp
->data_len
= msg_len
;
1298 /* Fillup brcm_message */
1299 msg
->type
= BRCM_MESSAGE_SBA
;
1300 msg
->sba
.cmds
= cmds
;
1301 msg
->sba
.cmds_count
= cmdsp
- cmds
;
1306 static struct sba_request
*
1307 sba_prep_dma_pq_single_req(struct sba_device
*sba
, dma_addr_t off
,
1308 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
,
1309 dma_addr_t src
, u8 scf
, size_t len
,
1310 unsigned long flags
)
1312 struct sba_request
*req
= NULL
;
1314 /* Alloc new request */
1315 req
= sba_alloc_request(sba
);
1318 if (flags
& DMA_PREP_FENCE
)
1319 req
->flags
|= SBA_REQUEST_FENCE
;
1321 /* Fillup request messages */
1322 sba_fillup_pq_single_msg(req
, dmaf_continue(flags
),
1323 req
->cmds
, &req
->msg
, off
, len
,
1324 dst_p
, dst_q
, src
, scf
);
1326 /* Init async_tx descriptor */
1327 req
->tx
.flags
= flags
;
1328 req
->tx
.cookie
= -EBUSY
;
1333 static struct dma_async_tx_descriptor
*
1334 sba_prep_dma_pq(struct dma_chan
*dchan
, dma_addr_t
*dst
, dma_addr_t
*src
,
1335 u32 src_cnt
, const u8
*scf
, size_t len
, unsigned long flags
)
1341 dma_addr_t
*dst_p
= NULL
, *dst_q
= NULL
;
1342 struct sba_device
*sba
= to_sba_device(dchan
);
1343 struct sba_request
*first
= NULL
, *req
;
1346 if (unlikely(src_cnt
> sba
->max_pq_srcs
))
1348 for (i
= 0; i
< src_cnt
; i
++)
1349 if (sba
->max_pq_coefs
<= raid6_gflog
[scf
[i
]])
1352 /* Figure-out P and Q destination addresses */
1353 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
1355 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
1358 /* Create chained requests where each request is upto hw_buf_size */
1360 req_len
= (len
< sba
->hw_buf_size
) ? len
: sba
->hw_buf_size
;
1363 dst_q_index
= src_cnt
;
1366 for (i
= 0; i
< src_cnt
; i
++) {
1367 if (*dst_q
== src
[i
]) {
1374 if (dst_q_index
< src_cnt
) {
1376 req
= sba_prep_dma_pq_single_req(sba
,
1377 off
, dst_p
, dst_q
, src
[i
], scf
[i
],
1378 req_len
, flags
| DMA_PREP_FENCE
);
1383 sba_chain_request(first
, req
);
1387 flags
|= DMA_PREP_CONTINUE
;
1390 for (i
= 0; i
< src_cnt
; i
++) {
1391 if (dst_q_index
== i
)
1394 req
= sba_prep_dma_pq_single_req(sba
,
1395 off
, dst_p
, dst_q
, src
[i
], scf
[i
],
1396 req_len
, flags
| DMA_PREP_FENCE
);
1401 sba_chain_request(first
, req
);
1405 flags
|= DMA_PREP_CONTINUE
;
1408 req
= sba_prep_dma_pq_req(sba
, off
,
1409 dst_p
, dst_q
, src
, src_cnt
,
1410 scf
, req_len
, flags
);
1415 sba_chain_request(first
, req
);
1424 return (first
) ? &first
->tx
: NULL
;
1428 sba_free_chained_requests(first
);
1432 /* ====== Mailbox callbacks ===== */
1434 static void sba_receive_message(struct mbox_client
*cl
, void *msg
)
1436 struct brcm_message
*m
= msg
;
1437 struct sba_request
*req
= m
->ctx
;
1438 struct sba_device
*sba
= req
->sba
;
1440 /* Error count if message has error */
1442 dev_err(sba
->dev
, "%s got message with error %d",
1443 dma_chan_name(&sba
->dma_chan
), m
->error
);
1445 /* Process received request */
1446 sba_process_received_request(sba
, req
);
1449 /* ====== Debugfs callbacks ====== */
1451 static int sba_debugfs_stats_show(struct seq_file
*file
, void *offset
)
1453 struct sba_device
*sba
= dev_get_drvdata(file
->private);
1455 /* Write stats in file */
1456 sba_write_stats_in_seqfile(sba
, file
);
1461 /* ====== Platform driver routines ===== */
1463 static int sba_prealloc_channel_resources(struct sba_device
*sba
)
1466 struct sba_request
*req
= NULL
;
1468 sba
->resp_base
= dma_alloc_coherent(sba
->mbox_dev
,
1469 sba
->max_resp_pool_size
,
1470 &sba
->resp_dma_base
, GFP_KERNEL
);
1471 if (!sba
->resp_base
)
1474 sba
->cmds_base
= dma_alloc_coherent(sba
->mbox_dev
,
1475 sba
->max_cmds_pool_size
,
1476 &sba
->cmds_dma_base
, GFP_KERNEL
);
1477 if (!sba
->cmds_base
) {
1479 goto fail_free_resp_pool
;
1482 spin_lock_init(&sba
->reqs_lock
);
1483 sba
->reqs_fence
= false;
1484 INIT_LIST_HEAD(&sba
->reqs_alloc_list
);
1485 INIT_LIST_HEAD(&sba
->reqs_pending_list
);
1486 INIT_LIST_HEAD(&sba
->reqs_active_list
);
1487 INIT_LIST_HEAD(&sba
->reqs_aborted_list
);
1488 INIT_LIST_HEAD(&sba
->reqs_free_list
);
1490 for (i
= 0; i
< sba
->max_req
; i
++) {
1491 req
= devm_kzalloc(sba
->dev
,
1492 struct_size(req
, cmds
, sba
->max_cmd_per_req
),
1496 goto fail_free_cmds_pool
;
1498 INIT_LIST_HEAD(&req
->node
);
1500 req
->flags
= SBA_REQUEST_STATE_FREE
;
1501 INIT_LIST_HEAD(&req
->next
);
1502 atomic_set(&req
->next_pending_count
, 0);
1503 for (j
= 0; j
< sba
->max_cmd_per_req
; j
++) {
1504 req
->cmds
[j
].cmd
= 0;
1505 req
->cmds
[j
].cmd_dma
= sba
->cmds_base
+
1506 (i
* sba
->max_cmd_per_req
+ j
) * sizeof(u64
);
1507 req
->cmds
[j
].cmd_dma_addr
= sba
->cmds_dma_base
+
1508 (i
* sba
->max_cmd_per_req
+ j
) * sizeof(u64
);
1509 req
->cmds
[j
].flags
= 0;
1511 memset(&req
->msg
, 0, sizeof(req
->msg
));
1512 dma_async_tx_descriptor_init(&req
->tx
, &sba
->dma_chan
);
1513 async_tx_ack(&req
->tx
);
1514 req
->tx
.tx_submit
= sba_tx_submit
;
1515 req
->tx
.phys
= sba
->resp_dma_base
+ i
* sba
->hw_resp_size
;
1516 list_add_tail(&req
->node
, &sba
->reqs_free_list
);
1521 fail_free_cmds_pool
:
1522 dma_free_coherent(sba
->mbox_dev
,
1523 sba
->max_cmds_pool_size
,
1524 sba
->cmds_base
, sba
->cmds_dma_base
);
1525 fail_free_resp_pool
:
1526 dma_free_coherent(sba
->mbox_dev
,
1527 sba
->max_resp_pool_size
,
1528 sba
->resp_base
, sba
->resp_dma_base
);
1532 static void sba_freeup_channel_resources(struct sba_device
*sba
)
1534 dmaengine_terminate_all(&sba
->dma_chan
);
1535 dma_free_coherent(sba
->mbox_dev
, sba
->max_cmds_pool_size
,
1536 sba
->cmds_base
, sba
->cmds_dma_base
);
1537 dma_free_coherent(sba
->mbox_dev
, sba
->max_resp_pool_size
,
1538 sba
->resp_base
, sba
->resp_dma_base
);
1539 sba
->resp_base
= NULL
;
1540 sba
->resp_dma_base
= 0;
1543 static int sba_async_register(struct sba_device
*sba
)
1546 struct dma_device
*dma_dev
= &sba
->dma_dev
;
1548 /* Initialize DMA channel cookie */
1549 sba
->dma_chan
.device
= dma_dev
;
1550 dma_cookie_init(&sba
->dma_chan
);
1552 /* Initialize DMA device capability mask */
1553 dma_cap_zero(dma_dev
->cap_mask
);
1554 dma_cap_set(DMA_INTERRUPT
, dma_dev
->cap_mask
);
1555 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
1556 dma_cap_set(DMA_XOR
, dma_dev
->cap_mask
);
1557 dma_cap_set(DMA_PQ
, dma_dev
->cap_mask
);
1560 * Set mailbox channel device as the base device of
1561 * our dma_device because the actual memory accesses
1562 * will be done by mailbox controller
1564 dma_dev
->dev
= sba
->mbox_dev
;
1566 /* Set base prep routines */
1567 dma_dev
->device_free_chan_resources
= sba_free_chan_resources
;
1568 dma_dev
->device_terminate_all
= sba_device_terminate_all
;
1569 dma_dev
->device_issue_pending
= sba_issue_pending
;
1570 dma_dev
->device_tx_status
= sba_tx_status
;
1572 /* Set interrupt routine */
1573 if (dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
))
1574 dma_dev
->device_prep_dma_interrupt
= sba_prep_dma_interrupt
;
1576 /* Set memcpy routine */
1577 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
))
1578 dma_dev
->device_prep_dma_memcpy
= sba_prep_dma_memcpy
;
1580 /* Set xor routine and capability */
1581 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1582 dma_dev
->device_prep_dma_xor
= sba_prep_dma_xor
;
1583 dma_dev
->max_xor
= sba
->max_xor_srcs
;
1586 /* Set pq routine and capability */
1587 if (dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
)) {
1588 dma_dev
->device_prep_dma_pq
= sba_prep_dma_pq
;
1589 dma_set_maxpq(dma_dev
, sba
->max_pq_srcs
, 0);
1592 /* Initialize DMA device channel list */
1593 INIT_LIST_HEAD(&dma_dev
->channels
);
1594 list_add_tail(&sba
->dma_chan
.device_node
, &dma_dev
->channels
);
1596 /* Register with Linux async DMA framework*/
1597 ret
= dma_async_device_register(dma_dev
);
1599 dev_err(sba
->dev
, "async device register error %d", ret
);
1603 dev_info(sba
->dev
, "%s capabilities: %s%s%s%s\n",
1604 dma_chan_name(&sba
->dma_chan
),
1605 dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
) ? "interrupt " : "",
1606 dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
) ? "memcpy " : "",
1607 dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ? "xor " : "",
1608 dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
) ? "pq " : "");
1613 static int sba_probe(struct platform_device
*pdev
)
1616 struct sba_device
*sba
;
1617 struct platform_device
*mbox_pdev
;
1618 struct of_phandle_args args
;
1620 /* Allocate main SBA struct */
1621 sba
= devm_kzalloc(&pdev
->dev
, sizeof(*sba
), GFP_KERNEL
);
1625 sba
->dev
= &pdev
->dev
;
1626 platform_set_drvdata(pdev
, sba
);
1628 /* Number of mailbox channels should be atleast 1 */
1629 ret
= of_count_phandle_with_args(pdev
->dev
.of_node
,
1630 "mboxes", "#mbox-cells");
1634 /* Determine SBA version from DT compatible string */
1635 if (of_device_is_compatible(sba
->dev
->of_node
, "brcm,iproc-sba"))
1636 sba
->ver
= SBA_VER_1
;
1637 else if (of_device_is_compatible(sba
->dev
->of_node
,
1638 "brcm,iproc-sba-v2"))
1639 sba
->ver
= SBA_VER_2
;
1643 /* Derived Configuration parameters */
1646 sba
->hw_buf_size
= 4096;
1647 sba
->hw_resp_size
= 8;
1648 sba
->max_pq_coefs
= 6;
1649 sba
->max_pq_srcs
= 6;
1652 sba
->hw_buf_size
= 4096;
1653 sba
->hw_resp_size
= 8;
1654 sba
->max_pq_coefs
= 30;
1656 * We can support max_pq_srcs == max_pq_coefs because
1657 * we are limited by number of SBA commands that we can
1658 * fit in one message for underlying ring manager HW.
1660 sba
->max_pq_srcs
= 12;
1665 sba
->max_req
= SBA_MAX_REQ_PER_MBOX_CHANNEL
;
1666 sba
->max_cmd_per_req
= sba
->max_pq_srcs
+ 3;
1667 sba
->max_xor_srcs
= sba
->max_cmd_per_req
- 1;
1668 sba
->max_resp_pool_size
= sba
->max_req
* sba
->hw_resp_size
;
1669 sba
->max_cmds_pool_size
= sba
->max_req
*
1670 sba
->max_cmd_per_req
* sizeof(u64
);
1672 /* Setup mailbox client */
1673 sba
->client
.dev
= &pdev
->dev
;
1674 sba
->client
.rx_callback
= sba_receive_message
;
1675 sba
->client
.tx_block
= false;
1676 sba
->client
.knows_txdone
= true;
1677 sba
->client
.tx_tout
= 0;
1679 /* Request mailbox channel */
1680 sba
->mchan
= mbox_request_channel(&sba
->client
, 0);
1681 if (IS_ERR(sba
->mchan
)) {
1682 ret
= PTR_ERR(sba
->mchan
);
1683 goto fail_free_mchan
;
1686 /* Find-out underlying mailbox device */
1687 ret
= of_parse_phandle_with_args(pdev
->dev
.of_node
,
1688 "mboxes", "#mbox-cells", 0, &args
);
1690 goto fail_free_mchan
;
1691 mbox_pdev
= of_find_device_by_node(args
.np
);
1692 of_node_put(args
.np
);
1695 goto fail_free_mchan
;
1697 sba
->mbox_dev
= &mbox_pdev
->dev
;
1699 /* Prealloc channel resource */
1700 ret
= sba_prealloc_channel_resources(sba
);
1702 goto fail_free_mchan
;
1704 /* Check availability of debugfs */
1705 if (!debugfs_initialized())
1708 /* Create debugfs root entry */
1709 sba
->root
= debugfs_create_dir(dev_name(sba
->dev
), NULL
);
1711 /* Create debugfs stats entry */
1712 debugfs_create_devm_seqfile(sba
->dev
, "stats", sba
->root
,
1713 sba_debugfs_stats_show
);
1717 /* Register DMA device with Linux async framework */
1718 ret
= sba_async_register(sba
);
1720 goto fail_free_resources
;
1722 /* Print device info */
1723 dev_info(sba
->dev
, "%s using SBAv%d mailbox channel from %s",
1724 dma_chan_name(&sba
->dma_chan
), sba
->ver
+1,
1725 dev_name(sba
->mbox_dev
));
1729 fail_free_resources
:
1730 debugfs_remove_recursive(sba
->root
);
1731 sba_freeup_channel_resources(sba
);
1733 mbox_free_channel(sba
->mchan
);
1737 static void sba_remove(struct platform_device
*pdev
)
1739 struct sba_device
*sba
= platform_get_drvdata(pdev
);
1741 dma_async_device_unregister(&sba
->dma_dev
);
1743 debugfs_remove_recursive(sba
->root
);
1745 sba_freeup_channel_resources(sba
);
1747 mbox_free_channel(sba
->mchan
);
1750 static const struct of_device_id sba_of_match
[] = {
1751 { .compatible
= "brcm,iproc-sba", },
1752 { .compatible
= "brcm,iproc-sba-v2", },
1755 MODULE_DEVICE_TABLE(of
, sba_of_match
);
1757 static struct platform_driver sba_driver
= {
1759 .remove_new
= sba_remove
,
1761 .name
= "bcm-sba-raid",
1762 .of_match_table
= sba_of_match
,
1765 module_platform_driver(sba_driver
);
1767 MODULE_DESCRIPTION("Broadcom SBA RAID driver");
1768 MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1769 MODULE_LICENSE("GPL v2");