ath10k: rebuild crypto header in rx data frames
[linux-2.6/btrfs-unstable.git] / drivers / dma / mv_xor_v2.c
blobf652a0e0f5a2a46d78bece1d41cd0895dfc5d593
1 /*
2 * Copyright (C) 2015-2016 Marvell International Ltd.
4 * This program is free software: you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
15 #include <linux/clk.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/module.h>
20 #include <linux/msi.h>
21 #include <linux/of.h>
22 #include <linux/of_irq.h>
23 #include <linux/platform_device.h>
24 #include <linux/spinlock.h>
26 #include "dmaengine.h"
28 /* DMA Engine Registers */
29 #define MV_XOR_V2_DMA_DESQ_BALR_OFF 0x000
30 #define MV_XOR_V2_DMA_DESQ_BAHR_OFF 0x004
31 #define MV_XOR_V2_DMA_DESQ_SIZE_OFF 0x008
32 #define MV_XOR_V2_DMA_DESQ_DONE_OFF 0x00C
33 #define MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK 0x7FFF
34 #define MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT 0
35 #define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK 0x1FFF
36 #define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT 16
37 #define MV_XOR_V2_DMA_DESQ_ARATTR_OFF 0x010
38 #define MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK 0x3F3F
39 #define MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE 0x202
40 #define MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE 0x3C3C
41 #define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014
42 #define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018
43 #define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF
44 #define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0
45 #define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18)
46 #define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C
47 /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
48 #define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C
49 #define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK 0xFFFF
50 #define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT 16
51 #define MV_XOR_V2_DMA_IMSG_BALR_OFF 0x050
52 #define MV_XOR_V2_DMA_IMSG_BAHR_OFF 0x054
53 #define MV_XOR_V2_DMA_DESQ_CTRL_OFF 0x100
54 #define MV_XOR_V2_DMA_DESQ_CTRL_32B 1
55 #define MV_XOR_V2_DMA_DESQ_CTRL_128B 7
56 #define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800
57 #define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804
58 #define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808
59 #define MV_XOR_V2_DMA_IMSG_TMOT 0x810
60 #define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF
61 #define MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT 0
63 /* XOR Global registers */
64 #define MV_XOR_V2_GLOB_BW_CTRL 0x4
65 #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT 0
66 #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL 64
67 #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT 8
68 #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL 8
69 #define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT 12
70 #define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL 4
71 #define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT 16
72 #define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL 4
73 #define MV_XOR_V2_GLOB_PAUSE 0x014
74 #define MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL 0x8
75 #define MV_XOR_V2_GLOB_SYS_INT_CAUSE 0x200
76 #define MV_XOR_V2_GLOB_SYS_INT_MASK 0x204
77 #define MV_XOR_V2_GLOB_MEM_INT_CAUSE 0x220
78 #define MV_XOR_V2_GLOB_MEM_INT_MASK 0x224
80 #define MV_XOR_V2_MIN_DESC_SIZE 32
81 #define MV_XOR_V2_EXT_DESC_SIZE 128
83 #define MV_XOR_V2_DESC_RESERVED_SIZE 12
84 #define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE 12
86 #define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF 8
89 * Descriptors queue size. With 32 bytes descriptors, up to 2^14
90 * descriptors are allowed, with 128 bytes descriptors, up to 2^12
91 * descriptors are allowed. This driver uses 128 bytes descriptors,
92 * but experimentation has shown that a set of 1024 descriptors is
93 * sufficient to reach a good level of performance.
95 #define MV_XOR_V2_DESC_NUM 1024
98 * Threshold values for descriptors and timeout, determined by
99 * experimentation as giving a good level of performance.
101 #define MV_XOR_V2_DONE_IMSG_THRD 0x14
102 #define MV_XOR_V2_TIMER_THRD 0xB0
105 * struct mv_xor_v2_descriptor - DMA HW descriptor
106 * @desc_id: used by S/W and is not affected by H/W.
107 * @flags: error and status flags
108 * @crc32_result: CRC32 calculation result
109 * @desc_ctrl: operation mode and control flags
110 * @buff_size: amount of bytes to be processed
111 * @fill_pattern_src_addr: Fill-Pattern or Source-Address and
112 * AW-Attributes
113 * @data_buff_addr: Source (and might be RAID6 destination)
114 * addresses of data buffers in RAID5 and RAID6
115 * @reserved: reserved
117 struct mv_xor_v2_descriptor {
118 u16 desc_id;
119 u16 flags;
120 u32 crc32_result;
121 u32 desc_ctrl;
123 /* Definitions for desc_ctrl */
124 #define DESC_NUM_ACTIVE_D_BUF_SHIFT 22
125 #define DESC_OP_MODE_SHIFT 28
126 #define DESC_OP_MODE_NOP 0 /* Idle operation */
127 #define DESC_OP_MODE_MEMCPY 1 /* Pure-DMA operation */
128 #define DESC_OP_MODE_MEMSET 2 /* Mem-Fill operation */
129 #define DESC_OP_MODE_MEMINIT 3 /* Mem-Init operation */
130 #define DESC_OP_MODE_MEM_COMPARE 4 /* Mem-Compare operation */
131 #define DESC_OP_MODE_CRC32 5 /* CRC32 calculation */
132 #define DESC_OP_MODE_XOR 6 /* RAID5 (XOR) operation */
133 #define DESC_OP_MODE_RAID6 7 /* RAID6 P&Q-generation */
134 #define DESC_OP_MODE_RAID6_REC 8 /* RAID6 Recovery */
135 #define DESC_Q_BUFFER_ENABLE BIT(16)
136 #define DESC_P_BUFFER_ENABLE BIT(17)
137 #define DESC_IOD BIT(27)
139 u32 buff_size;
140 u32 fill_pattern_src_addr[4];
141 u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE];
142 u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE];
146 * struct mv_xor_v2_device - implements a xor device
147 * @lock: lock for the engine
148 * @dma_base: memory mapped DMA register base
149 * @glob_base: memory mapped global register base
150 * @irq_tasklet:
151 * @free_sw_desc: linked list of free SW descriptors
152 * @dmadev: dma device
153 * @dmachan: dma channel
154 * @hw_desq: HW descriptors queue
155 * @hw_desq_virt: virtual address of DESCQ
156 * @sw_desq: SW descriptors queue
157 * @desc_size: HW descriptor size
158 * @npendings: number of pending descriptors (for which tx_submit has
159 * been called, but not yet issue_pending)
161 struct mv_xor_v2_device {
162 spinlock_t lock;
163 void __iomem *dma_base;
164 void __iomem *glob_base;
165 struct clk *clk;
166 struct tasklet_struct irq_tasklet;
167 struct list_head free_sw_desc;
168 struct dma_device dmadev;
169 struct dma_chan dmachan;
170 dma_addr_t hw_desq;
171 struct mv_xor_v2_descriptor *hw_desq_virt;
172 struct mv_xor_v2_sw_desc *sw_desq;
173 int desc_size;
174 unsigned int npendings;
175 unsigned int hw_queue_idx;
179 * struct mv_xor_v2_sw_desc - implements a xor SW descriptor
180 * @idx: descriptor index
181 * @async_tx: support for the async_tx api
182 * @hw_desc: assosiated HW descriptor
183 * @free_list: node of the free SW descriprots list
185 struct mv_xor_v2_sw_desc {
186 int idx;
187 struct dma_async_tx_descriptor async_tx;
188 struct mv_xor_v2_descriptor hw_desc;
189 struct list_head free_list;
193 * Fill the data buffers to a HW descriptor
195 static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
196 struct mv_xor_v2_descriptor *desc,
197 dma_addr_t src, int index)
199 int arr_index = ((index >> 1) * 3);
202 * Fill the buffer's addresses to the descriptor.
204 * The format of the buffers address for 2 sequential buffers
205 * X and X + 1:
207 * First word: Buffer-DX-Address-Low[31:0]
208 * Second word: Buffer-DX+1-Address-Low[31:0]
209 * Third word: DX+1-Buffer-Address-High[47:32] [31:16]
210 * DX-Buffer-Address-High[47:32] [15:0]
212 if ((index & 0x1) == 0) {
213 desc->data_buff_addr[arr_index] = lower_32_bits(src);
215 desc->data_buff_addr[arr_index + 2] &= ~0xFFFF;
216 desc->data_buff_addr[arr_index + 2] |=
217 upper_32_bits(src) & 0xFFFF;
218 } else {
219 desc->data_buff_addr[arr_index + 1] =
220 lower_32_bits(src);
222 desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000;
223 desc->data_buff_addr[arr_index + 2] |=
224 (upper_32_bits(src) & 0xFFFF) << 16;
229 * notify the engine of new descriptors, and update the available index.
231 static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
232 int num_of_desc)
234 /* write the number of new descriptors in the DESQ. */
235 writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF);
239 * free HW descriptors
241 static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev,
242 int num_of_desc)
244 /* write the number of new descriptors in the DESQ. */
245 writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF);
249 * Set descriptor size
250 * Return the HW descriptor size in bytes
252 static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
254 writel(MV_XOR_V2_DMA_DESQ_CTRL_128B,
255 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF);
257 return MV_XOR_V2_EXT_DESC_SIZE;
261 * Set the IMSG threshold
263 static inline
264 void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev)
266 u32 reg;
268 /* Configure threshold of number of descriptors, and enable timer */
269 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
270 reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
271 reg |= (MV_XOR_V2_DONE_IMSG_THRD << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
272 reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN;
273 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
275 /* Configure Timer Threshold */
276 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
277 reg &= (~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK <<
278 MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
279 reg |= (MV_XOR_V2_TIMER_THRD << MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
280 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
283 static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
285 struct mv_xor_v2_device *xor_dev = data;
286 unsigned int ndescs;
287 u32 reg;
289 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
291 ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
292 MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
294 /* No descriptors to process */
295 if (!ndescs)
296 return IRQ_NONE;
298 /* schedule a tasklet to handle descriptors callbacks */
299 tasklet_schedule(&xor_dev->irq_tasklet);
301 return IRQ_HANDLED;
305 * submit a descriptor to the DMA engine
307 static dma_cookie_t
308 mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
310 void *dest_hw_desc;
311 dma_cookie_t cookie;
312 struct mv_xor_v2_sw_desc *sw_desc =
313 container_of(tx, struct mv_xor_v2_sw_desc, async_tx);
314 struct mv_xor_v2_device *xor_dev =
315 container_of(tx->chan, struct mv_xor_v2_device, dmachan);
317 dev_dbg(xor_dev->dmadev.dev,
318 "%s sw_desc %p: async_tx %p\n",
319 __func__, sw_desc, &sw_desc->async_tx);
321 /* assign coookie */
322 spin_lock_bh(&xor_dev->lock);
323 cookie = dma_cookie_assign(tx);
325 /* copy the HW descriptor from the SW descriptor to the DESQ */
326 dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
328 memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
330 xor_dev->npendings++;
331 xor_dev->hw_queue_idx++;
332 if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
333 xor_dev->hw_queue_idx = 0;
335 spin_unlock_bh(&xor_dev->lock);
337 return cookie;
341 * Prepare a SW descriptor
343 static struct mv_xor_v2_sw_desc *
344 mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
346 struct mv_xor_v2_sw_desc *sw_desc;
347 bool found = false;
349 /* Lock the channel */
350 spin_lock_bh(&xor_dev->lock);
352 if (list_empty(&xor_dev->free_sw_desc)) {
353 spin_unlock_bh(&xor_dev->lock);
354 /* schedule tasklet to free some descriptors */
355 tasklet_schedule(&xor_dev->irq_tasklet);
356 return NULL;
359 list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
360 if (async_tx_test_ack(&sw_desc->async_tx)) {
361 found = true;
362 break;
366 if (!found) {
367 spin_unlock_bh(&xor_dev->lock);
368 return NULL;
371 list_del(&sw_desc->free_list);
373 /* Release the channel */
374 spin_unlock_bh(&xor_dev->lock);
376 return sw_desc;
380 * Prepare a HW descriptor for a memcpy operation
382 static struct dma_async_tx_descriptor *
383 mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
384 dma_addr_t src, size_t len, unsigned long flags)
386 struct mv_xor_v2_sw_desc *sw_desc;
387 struct mv_xor_v2_descriptor *hw_descriptor;
388 struct mv_xor_v2_device *xor_dev;
390 xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan);
392 dev_dbg(xor_dev->dmadev.dev,
393 "%s len: %zu src %pad dest %pad flags: %ld\n",
394 __func__, len, &src, &dest, flags);
396 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
397 if (!sw_desc)
398 return NULL;
400 sw_desc->async_tx.flags = flags;
402 /* set the HW descriptor */
403 hw_descriptor = &sw_desc->hw_desc;
405 /* save the SW descriptor ID to restore when operation is done */
406 hw_descriptor->desc_id = sw_desc->idx;
408 /* Set the MEMCPY control word */
409 hw_descriptor->desc_ctrl =
410 DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT;
412 if (flags & DMA_PREP_INTERRUPT)
413 hw_descriptor->desc_ctrl |= DESC_IOD;
415 /* Set source address */
416 hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src);
417 hw_descriptor->fill_pattern_src_addr[1] =
418 upper_32_bits(src) & 0xFFFF;
420 /* Set Destination address */
421 hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
422 hw_descriptor->fill_pattern_src_addr[3] =
423 upper_32_bits(dest) & 0xFFFF;
425 /* Set buffers size */
426 hw_descriptor->buff_size = len;
428 /* return the async tx descriptor */
429 return &sw_desc->async_tx;
433 * Prepare a HW descriptor for a XOR operation
435 static struct dma_async_tx_descriptor *
436 mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
437 unsigned int src_cnt, size_t len, unsigned long flags)
439 struct mv_xor_v2_sw_desc *sw_desc;
440 struct mv_xor_v2_descriptor *hw_descriptor;
441 struct mv_xor_v2_device *xor_dev =
442 container_of(chan, struct mv_xor_v2_device, dmachan);
443 int i;
445 if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1)
446 return NULL;
448 dev_dbg(xor_dev->dmadev.dev,
449 "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
450 __func__, src_cnt, len, &dest, flags);
452 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
453 if (!sw_desc)
454 return NULL;
456 sw_desc->async_tx.flags = flags;
458 /* set the HW descriptor */
459 hw_descriptor = &sw_desc->hw_desc;
461 /* save the SW descriptor ID to restore when operation is done */
462 hw_descriptor->desc_id = sw_desc->idx;
464 /* Set the XOR control word */
465 hw_descriptor->desc_ctrl =
466 DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT;
467 hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE;
469 if (flags & DMA_PREP_INTERRUPT)
470 hw_descriptor->desc_ctrl |= DESC_IOD;
472 /* Set the data buffers */
473 for (i = 0; i < src_cnt; i++)
474 mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i);
476 hw_descriptor->desc_ctrl |=
477 src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT;
479 /* Set Destination address */
480 hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
481 hw_descriptor->fill_pattern_src_addr[3] =
482 upper_32_bits(dest) & 0xFFFF;
484 /* Set buffers size */
485 hw_descriptor->buff_size = len;
487 /* return the async tx descriptor */
488 return &sw_desc->async_tx;
492 * Prepare a HW descriptor for interrupt operation.
494 static struct dma_async_tx_descriptor *
495 mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
497 struct mv_xor_v2_sw_desc *sw_desc;
498 struct mv_xor_v2_descriptor *hw_descriptor;
499 struct mv_xor_v2_device *xor_dev =
500 container_of(chan, struct mv_xor_v2_device, dmachan);
502 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
503 if (!sw_desc)
504 return NULL;
506 /* set the HW descriptor */
507 hw_descriptor = &sw_desc->hw_desc;
509 /* save the SW descriptor ID to restore when operation is done */
510 hw_descriptor->desc_id = sw_desc->idx;
512 /* Set the INTERRUPT control word */
513 hw_descriptor->desc_ctrl =
514 DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT;
515 hw_descriptor->desc_ctrl |= DESC_IOD;
517 /* return the async tx descriptor */
518 return &sw_desc->async_tx;
522 * push pending transactions to hardware
524 static void mv_xor_v2_issue_pending(struct dma_chan *chan)
526 struct mv_xor_v2_device *xor_dev =
527 container_of(chan, struct mv_xor_v2_device, dmachan);
529 spin_lock_bh(&xor_dev->lock);
532 * update the engine with the number of descriptors to
533 * process
535 mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings);
536 xor_dev->npendings = 0;
538 spin_unlock_bh(&xor_dev->lock);
541 static inline
542 int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev,
543 int *pending_ptr)
545 u32 reg;
547 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
549 /* get the next pending descriptor index */
550 *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) &
551 MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK);
553 /* get the number of descriptors pending handle */
554 return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
555 MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
559 * handle the descriptors after HW process
561 static void mv_xor_v2_tasklet(unsigned long data)
563 struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
564 int pending_ptr, num_of_pending, i;
565 struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
567 dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
569 /* get the pending descriptors parameters */
570 num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
572 /* loop over free descriptors */
573 for (i = 0; i < num_of_pending; i++) {
574 struct mv_xor_v2_descriptor *next_pending_hw_desc =
575 xor_dev->hw_desq_virt + pending_ptr;
577 /* get the SW descriptor related to the HW descriptor */
578 next_pending_sw_desc =
579 &xor_dev->sw_desq[next_pending_hw_desc->desc_id];
581 /* call the callback */
582 if (next_pending_sw_desc->async_tx.cookie > 0) {
584 * update the channel's completed cookie - no
585 * lock is required the IMSG threshold provide
586 * the locking
588 dma_cookie_complete(&next_pending_sw_desc->async_tx);
590 if (next_pending_sw_desc->async_tx.callback)
591 next_pending_sw_desc->async_tx.callback(
592 next_pending_sw_desc->async_tx.callback_param);
594 dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
597 dma_run_dependencies(&next_pending_sw_desc->async_tx);
599 /* Lock the channel */
600 spin_lock_bh(&xor_dev->lock);
602 /* add the SW descriptor to the free descriptors list */
603 list_add(&next_pending_sw_desc->free_list,
604 &xor_dev->free_sw_desc);
606 /* Release the channel */
607 spin_unlock_bh(&xor_dev->lock);
609 /* increment the next descriptor */
610 pending_ptr++;
611 if (pending_ptr >= MV_XOR_V2_DESC_NUM)
612 pending_ptr = 0;
615 if (num_of_pending != 0) {
616 /* free the descriptores */
617 mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
622 * Set DMA Interrupt-message (IMSG) parameters
624 static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
626 struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev);
628 writel(msg->address_lo,
629 xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF);
630 writel(msg->address_hi & 0xFFFF,
631 xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF);
632 writel(msg->data,
633 xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF);
636 static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
638 u32 reg;
640 /* write the DESQ size to the DMA engine */
641 writel(MV_XOR_V2_DESC_NUM,
642 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
644 /* write the DESQ address to the DMA enngine*/
645 writel(xor_dev->hw_desq & 0xFFFFFFFF,
646 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
647 writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
648 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
651 * This is a temporary solution, until we activate the
652 * SMMU. Set the attributes for reading & writing data buffers
653 * & descriptors to:
655 * - OuterShareable - Snoops will be performed on CPU caches
656 * - Enable cacheable - Bufferable, Modifiable, Other Allocate
657 * and Allocate
659 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
660 reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
661 reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
662 MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
663 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
665 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
666 reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
667 reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
668 MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
669 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
671 /* BW CTRL - set values to optimize the XOR performance:
673 * - Set WrBurstLen & RdBurstLen - the unit will issue
674 * maximum of 256B write/read transactions.
675 * - Limit the number of outstanding write & read data
676 * (OBB/IBB) requests to the maximal value.
678 reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL <<
679 MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) |
680 (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL <<
681 MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) |
682 (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL <<
683 MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) |
684 (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL <<
685 MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT));
686 writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL);
688 /* Disable the AXI timer feature */
689 reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
690 reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
691 writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
693 /* enable the DMA engine */
694 writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
696 return 0;
699 static int mv_xor_v2_suspend(struct platform_device *dev, pm_message_t state)
701 struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
703 /* Set this bit to disable to stop the XOR unit. */
704 writel(0x1, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
706 return 0;
709 static int mv_xor_v2_resume(struct platform_device *dev)
711 struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
713 mv_xor_v2_set_desc_size(xor_dev);
714 mv_xor_v2_enable_imsg_thrd(xor_dev);
715 mv_xor_v2_descq_init(xor_dev);
717 return 0;
720 static int mv_xor_v2_probe(struct platform_device *pdev)
722 struct mv_xor_v2_device *xor_dev;
723 struct resource *res;
724 int i, ret = 0;
725 struct dma_device *dma_dev;
726 struct mv_xor_v2_sw_desc *sw_desc;
727 struct msi_desc *msi_desc;
729 BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
730 MV_XOR_V2_EXT_DESC_SIZE);
732 xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL);
733 if (!xor_dev)
734 return -ENOMEM;
736 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
737 xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
738 if (IS_ERR(xor_dev->dma_base))
739 return PTR_ERR(xor_dev->dma_base);
741 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
742 xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
743 if (IS_ERR(xor_dev->glob_base))
744 return PTR_ERR(xor_dev->glob_base);
746 platform_set_drvdata(pdev, xor_dev);
748 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
749 if (ret)
750 return ret;
752 xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
753 if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
754 return -EPROBE_DEFER;
755 if (!IS_ERR(xor_dev->clk)) {
756 ret = clk_prepare_enable(xor_dev->clk);
757 if (ret)
758 return ret;
761 ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
762 mv_xor_v2_set_msi_msg);
763 if (ret)
764 goto disable_clk;
766 msi_desc = first_msi_entry(&pdev->dev);
767 if (!msi_desc)
768 goto free_msi_irqs;
770 ret = devm_request_irq(&pdev->dev, msi_desc->irq,
771 mv_xor_v2_interrupt_handler, 0,
772 dev_name(&pdev->dev), xor_dev);
773 if (ret)
774 goto free_msi_irqs;
776 tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet,
777 (unsigned long) xor_dev);
779 xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev);
781 dma_cookie_init(&xor_dev->dmachan);
784 * allocate coherent memory for hardware descriptors
785 * note: writecombine gives slightly better performance, but
786 * requires that we explicitly flush the writes
788 xor_dev->hw_desq_virt =
789 dma_alloc_coherent(&pdev->dev,
790 xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
791 &xor_dev->hw_desq, GFP_KERNEL);
792 if (!xor_dev->hw_desq_virt) {
793 ret = -ENOMEM;
794 goto free_msi_irqs;
797 /* alloc memory for the SW descriptors */
798 xor_dev->sw_desq = devm_kzalloc(&pdev->dev, sizeof(*sw_desc) *
799 MV_XOR_V2_DESC_NUM, GFP_KERNEL);
800 if (!xor_dev->sw_desq) {
801 ret = -ENOMEM;
802 goto free_hw_desq;
805 spin_lock_init(&xor_dev->lock);
807 /* init the free SW descriptors list */
808 INIT_LIST_HEAD(&xor_dev->free_sw_desc);
810 /* add all SW descriptors to the free list */
811 for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
812 struct mv_xor_v2_sw_desc *sw_desc =
813 xor_dev->sw_desq + i;
814 sw_desc->idx = i;
815 dma_async_tx_descriptor_init(&sw_desc->async_tx,
816 &xor_dev->dmachan);
817 sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
818 async_tx_ack(&sw_desc->async_tx);
820 list_add(&sw_desc->free_list,
821 &xor_dev->free_sw_desc);
824 dma_dev = &xor_dev->dmadev;
826 /* set DMA capabilities */
827 dma_cap_zero(dma_dev->cap_mask);
828 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
829 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
830 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
832 /* init dma link list */
833 INIT_LIST_HEAD(&dma_dev->channels);
835 /* set base routines */
836 dma_dev->device_tx_status = dma_cookie_status;
837 dma_dev->device_issue_pending = mv_xor_v2_issue_pending;
838 dma_dev->dev = &pdev->dev;
840 dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy;
841 dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt;
842 dma_dev->max_xor = 8;
843 dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor;
845 xor_dev->dmachan.device = dma_dev;
847 list_add_tail(&xor_dev->dmachan.device_node,
848 &dma_dev->channels);
850 mv_xor_v2_enable_imsg_thrd(xor_dev);
852 mv_xor_v2_descq_init(xor_dev);
854 ret = dma_async_device_register(dma_dev);
855 if (ret)
856 goto free_hw_desq;
858 dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n");
860 return 0;
862 free_hw_desq:
863 dma_free_coherent(&pdev->dev,
864 xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
865 xor_dev->hw_desq_virt, xor_dev->hw_desq);
866 free_msi_irqs:
867 platform_msi_domain_free_irqs(&pdev->dev);
868 disable_clk:
869 if (!IS_ERR(xor_dev->clk))
870 clk_disable_unprepare(xor_dev->clk);
871 return ret;
874 static int mv_xor_v2_remove(struct platform_device *pdev)
876 struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev);
878 dma_async_device_unregister(&xor_dev->dmadev);
880 dma_free_coherent(&pdev->dev,
881 xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
882 xor_dev->hw_desq_virt, xor_dev->hw_desq);
884 platform_msi_domain_free_irqs(&pdev->dev);
886 clk_disable_unprepare(xor_dev->clk);
888 return 0;
891 #ifdef CONFIG_OF
892 static const struct of_device_id mv_xor_v2_dt_ids[] = {
893 { .compatible = "marvell,xor-v2", },
896 MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids);
897 #endif
899 static struct platform_driver mv_xor_v2_driver = {
900 .probe = mv_xor_v2_probe,
901 .suspend = mv_xor_v2_suspend,
902 .resume = mv_xor_v2_resume,
903 .remove = mv_xor_v2_remove,
904 .driver = {
905 .name = "mv_xor_v2",
906 .of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
910 module_platform_driver(mv_xor_v2_driver);
912 MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine");
913 MODULE_LICENSE("GPL");