9506 Want support for QLogic QL41000/45000 series devices
[unleashed.git] / usr / src / uts / common / io / qede / 579xx / drivers / ecore / ecore_hw.c
blobb365901c21dbc26460d3bcbe7a4fb7e1a4956595
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1, (the "License").
27 * You may not use this file except in compliance with the License.
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
36 #include "bcm_osal.h"
37 #include "ecore_hsi_common.h"
38 #include "ecore_status.h"
39 #include "ecore.h"
40 #include "ecore_hw.h"
41 #include "reg_addr.h"
42 #include "ecore_utils.h"
43 #include "ecore_iov_api.h"
45 #ifndef ASIC_ONLY
46 #define ECORE_EMUL_FACTOR 2000
47 #define ECORE_FPGA_FACTOR 200
48 #endif
50 #define ECORE_BAR_ACQUIRE_TIMEOUT 1000
52 /* Invalid values */
53 #define ECORE_BAR_INVALID_OFFSET (OSAL_CPU_TO_LE32(-1))
55 struct ecore_ptt {
56 osal_list_entry_t list_entry;
57 unsigned int idx;
58 struct pxp_ptt_entry pxp;
59 u8 hwfn_id;
62 struct ecore_ptt_pool {
63 osal_list_t free_list;
64 osal_spinlock_t lock; /* ptt synchronized access */
65 struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
68 enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
70 struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
71 GFP_KERNEL,
72 sizeof(*p_pool));
73 int i;
75 if (!p_pool)
76 return ECORE_NOMEM;
78 OSAL_LIST_INIT(&p_pool->free_list);
79 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
80 p_pool->ptts[i].idx = i;
81 p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
82 p_pool->ptts[i].pxp.pretend.control = 0;
83 p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
85 /* There are special PTT entries that are taken only by design.
86 * The rest are added ot the list for general usage.
88 if (i >= RESERVED_PTT_MAX)
89 OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
90 &p_pool->free_list);
93 p_hwfn->p_ptt_pool = p_pool;
94 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
95 OSAL_SPIN_LOCK_INIT(&p_pool->lock);
97 return ECORE_SUCCESS;
100 void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
102 struct ecore_ptt *p_ptt;
103 int i;
105 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
106 p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
107 p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
111 void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
113 #ifndef __EXTRACT__LINUX__
114 if (p_hwfn->p_ptt_pool)
115 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
116 #endif
117 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
118 p_hwfn->p_ptt_pool = OSAL_NULL;
121 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
123 struct ecore_ptt *p_ptt;
124 unsigned int i;
126 /* Take the free PTT from the list */
127 for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
128 OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
130 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) {
131 p_ptt = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_ptt_pool->free_list,
132 struct ecore_ptt, list_entry);
133 OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
134 &p_hwfn->p_ptt_pool->free_list);
136 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
138 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
139 "allocated ptt %d\n", p_ptt->idx);
141 return p_ptt;
144 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
145 OSAL_MSLEEP(1);
148 DP_NOTICE(p_hwfn, true, "PTT acquire timeout - failed to allocate PTT\n");
149 return OSAL_NULL;
152 void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
153 struct ecore_ptt *p_ptt) {
154 /* This PTT should not be set to pretend if it is being released */
155 /* TODO - add some pretend sanity checks, to make sure pretend isn't set on this ptt */
157 OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
158 OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
159 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
162 u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn,
163 struct ecore_ptt *p_ptt)
165 /* The HW is using DWORDS and we need to translate it to Bytes */
166 return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
169 static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
171 return PXP_PF_WINDOW_ADMIN_PER_PF_START +
172 p_ptt->idx * sizeof(struct pxp_ptt_entry);
175 u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
177 return PXP_EXTERNAL_BAR_PF_WINDOW_START +
178 p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
181 void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
182 struct ecore_ptt *p_ptt,
183 u32 new_hw_addr)
185 u32 prev_hw_addr;
187 prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
189 if (new_hw_addr == prev_hw_addr)
190 return;
192 /* Update PTT entery in admin window */
193 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
194 "Updating PTT entry %d to offset 0x%x\n",
195 p_ptt->idx, new_hw_addr);
197 /* The HW is using DWORDS and the address is in Bytes */
198 p_ptt->pxp.offset = OSAL_CPU_TO_LE32(new_hw_addr >> 2);
200 REG_WR(p_hwfn,
201 ecore_ptt_config_addr(p_ptt) +
202 OFFSETOF(struct pxp_ptt_entry, offset),
203 OSAL_LE32_TO_CPU(p_ptt->pxp.offset));
206 static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
207 struct ecore_ptt *p_ptt,
208 u32 hw_addr)
210 u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
211 u32 offset;
213 offset = hw_addr - win_hw_addr;
215 if (p_ptt->hwfn_id != p_hwfn->my_id)
216 DP_NOTICE(p_hwfn, true,
217 "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
218 p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
220 /* Verify the address is within the window */
221 if (hw_addr < win_hw_addr ||
222 offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
223 ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
224 offset = 0;
227 return ecore_ptt_get_bar_addr(p_ptt) + offset;
230 struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
231 enum reserved_ptts ptt_idx)
233 if (ptt_idx >= RESERVED_PTT_MAX) {
234 DP_NOTICE(p_hwfn, true,
235 "Requested PTT %d is out of range\n", ptt_idx);
236 return OSAL_NULL;
239 return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
242 static bool ecore_is_reg_fifo_empty(struct ecore_hwfn *p_hwfn,
243 struct ecore_ptt *p_ptt)
245 bool is_empty = true;
246 u32 bar_addr;
248 if (!p_hwfn->p_dev->chk_reg_fifo)
249 goto out;
251 /* ecore_rd() cannot be used here since it calls this function */
252 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA);
253 is_empty = REG_RD(p_hwfn, bar_addr) == 0;
255 #ifndef ASIC_ONLY
256 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
257 OSAL_UDELAY(100);
258 #endif
260 out:
261 return is_empty;
264 void ecore_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr,
265 u32 val)
267 bool prev_fifo_err;
268 u32 bar_addr;
270 prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
272 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
273 REG_WR(p_hwfn, bar_addr, val);
274 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
275 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
276 bar_addr, hw_addr, val);
278 #ifndef ASIC_ONLY
279 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
280 OSAL_UDELAY(100);
281 #endif
283 OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
284 "reg_fifo error was caused by a call to ecore_wr(0x%x, 0x%x)\n",
285 hw_addr, val);
288 u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
290 bool prev_fifo_err;
291 u32 bar_addr, val;
293 prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
295 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
296 val = REG_RD(p_hwfn, bar_addr);
298 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
299 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
300 bar_addr, hw_addr, val);
302 #ifndef ASIC_ONLY
303 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
304 OSAL_UDELAY(100);
305 #endif
307 OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
308 "reg_fifo error was caused by a call to ecore_rd(0x%x)\n",
309 hw_addr);
311 return val;
314 static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
315 struct ecore_ptt *p_ptt,
316 void *addr,
317 u32 hw_addr,
318 osal_size_t n,
319 bool to_device)
321 u32 dw_count, *host_addr, hw_offset;
322 osal_size_t quota, done = 0;
323 u32 OSAL_IOMEM *reg_addr;
325 while (done < n) {
326 quota = OSAL_MIN_T(osal_size_t, n - done,
327 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
329 if (IS_PF(p_hwfn->p_dev)) {
330 ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
331 hw_offset = ecore_ptt_get_bar_addr(p_ptt);
332 } else {
333 hw_offset = hw_addr + done;
336 dw_count = quota / 4;
337 host_addr = (u32 *)((u8 *)addr + done);
338 reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
340 if (to_device)
341 while (dw_count--)
342 DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
343 else
344 while (dw_count--)
345 *host_addr++ = DIRECT_REG_RD(p_hwfn,
346 reg_addr++);
348 done += quota;
352 void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
353 struct ecore_ptt *p_ptt,
354 void *dest, u32 hw_addr, osal_size_t n)
356 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
357 "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
358 hw_addr, dest, hw_addr, (unsigned long) n);
360 ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
363 void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
364 struct ecore_ptt *p_ptt,
365 u32 hw_addr, void *src, osal_size_t n)
367 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
368 "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
369 hw_addr, hw_addr, src, (unsigned long)n);
371 ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
374 void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
375 struct ecore_ptt *p_ptt, u16 fid)
377 u16 control = 0;
379 SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
380 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
382 /* Every pretend undos previous pretends, including
383 * previous port pretend.
385 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
386 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
387 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
389 if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
390 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
392 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
393 p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
395 REG_WR(p_hwfn,
396 ecore_ptt_config_addr(p_ptt) +
397 OFFSETOF(struct pxp_ptt_entry, pretend),
398 *(u32 *)&p_ptt->pxp.pretend);
401 void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
402 struct ecore_ptt *p_ptt, u8 port_id)
404 u16 control = 0;
406 SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
407 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
408 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
409 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
411 REG_WR(p_hwfn,
412 ecore_ptt_config_addr(p_ptt) +
413 OFFSETOF(struct pxp_ptt_entry, pretend),
414 *(u32 *)&p_ptt->pxp.pretend);
417 void ecore_port_unpretend(struct ecore_hwfn *p_hwfn,
418 struct ecore_ptt *p_ptt)
420 u16 control = 0;
422 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
423 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
424 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
426 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
428 REG_WR(p_hwfn,
429 ecore_ptt_config_addr(p_ptt) +
430 OFFSETOF(struct pxp_ptt_entry, pretend),
431 *(u32 *)&p_ptt->pxp.pretend);
434 u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
436 u32 concrete_fid = 0;
438 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
439 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
440 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
442 return concrete_fid;
445 #if 0
446 /* Ecore HW lock
447 * =============
448 * Although the implemention is ready, today we don't have any flow that
449 * utliizes said locks - and we want to keep it this way.
450 * If this changes, this needs to be revisted.
452 #define HW_LOCK_MAX_RETRIES 1000
453 enum _ecore_status_t ecore_hw_lock(struct ecore_hwfn *p_hwfn,
454 struct ecore_ptt *p_ptt,
455 u8 resource,
456 bool block)
458 u32 cnt, lock_status, hw_lock_cntr_reg;
459 enum _ecore_status_t ecore_status;
461 /* Locate the proper lock register for this function.
462 * Note This code assumes all the H/W lock registers are sequential
463 * in memory.
465 hw_lock_cntr_reg = MISCS_REG_DRIVER_CONTROL_0 +
466 p_hwfn->rel_pf_id *
467 MISCS_REG_DRIVER_CONTROL_0_SIZE * sizeof(u32);
469 /* Validate that the resource is not already taken */
470 lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
472 if (lock_status & resource) {
473 DP_NOTICE(p_hwfn, true,
474 "Resource already locked: lock_status=0x%x resource=0x%x\n",
475 lock_status, resource);
477 return ECORE_BUSY;
480 /* Register for the lock */
481 ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg + sizeof(u32), resource);
483 /* Try for 5 seconds every 5ms */
484 for (cnt = 0; cnt < HW_LOCK_MAX_RETRIES; cnt++) {
485 lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
487 if (lock_status & resource)
488 return ECORE_SUCCESS;
490 if (!block) {
491 ecore_status = ECORE_BUSY;
492 break;
495 OSAL_MSLEEP(5);
498 if (cnt == HW_LOCK_MAX_RETRIES) {
499 DP_NOTICE(p_hwfn, true, "Lock timeout resource=0x%x\n",
500 resource);
501 ecore_status = ECORE_TIMEOUT;
504 /* Clear the pending request */
505 ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg, resource);
507 return ecore_status;
510 enum _ecore_status_t ecore_hw_unlock(struct ecore_hwfn *p_hwfn,
511 struct ecore_ptt *p_ptt,
512 u8 resource)
514 u32 lock_status, hw_lock_cntr_reg;
516 /* Locate the proper lock register for this function.
517 * Note This code assumes all the H/W lock registers are sequential
518 * in memory.
520 hw_lock_cntr_reg = MISCS_REG_DRIVER_CONTROL_0 +
521 p_hwfn->rel_pf_id *
522 MISCS_REG_DRIVER_CONTROL_0_SIZE * sizeof(u32);
524 /* Validate that the resource is currently taken */
525 lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
527 if (!(lock_status & resource)) {
528 DP_NOTICE(p_hwfn, true,
529 "resource 0x%x was not taken (lock status 0x%x)\n",
530 resource, lock_status);
532 return ECORE_NODEV;
535 /* clear lock for resource */
536 ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg, resource);
537 return ECORE_SUCCESS;
539 #endif /* HW locks logic */
541 /* DMAE */
542 static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
543 const u8 is_src_type_grc,
544 const u8 is_dst_type_grc,
545 struct ecore_dmae_params *p_params)
547 u16 opcode_b = 0;
548 u32 opcode = 0;
550 /* Whether the source is the PCIe or the GRC.
551 * 0- The source is the PCIe
552 * 1- The source is the GRC.
554 opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
555 : DMAE_CMD_SRC_MASK_PCIE) <<
556 DMAE_CMD_SRC_SHIFT;
557 opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
558 DMAE_CMD_SRC_PF_ID_SHIFT;
560 /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
561 opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
562 : DMAE_CMD_DST_MASK_PCIE) <<
563 DMAE_CMD_DST_SHIFT;
564 opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
565 DMAE_CMD_DST_PF_ID_SHIFT;
567 /* DMAE_E4_TODO need to check which value to specifiy here. */
568 /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT;*/
570 /* Whether to write a completion word to the completion destination:
571 * 0-Do not write a completion word
572 * 1-Write the completion word
574 opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
575 opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK <<
576 DMAE_CMD_SRC_ADDR_RESET_SHIFT;
578 if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
579 opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
581 /* swapping mode 3 - big endian there should be a define ifdefed in
582 * the HSI somewhere. Since it is currently
584 opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
586 opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
588 /* reset source address in next go */
589 opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK <<
590 DMAE_CMD_SRC_ADDR_RESET_SHIFT;
592 /* reset dest address in next go */
593 opcode |= DMAE_CMD_DST_ADDR_RESET_MASK <<
594 DMAE_CMD_DST_ADDR_RESET_SHIFT;
596 /* SRC/DST VFID: all 1's - pf, otherwise VF id */
597 if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
598 opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
599 opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
600 } else {
601 opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
602 DMAE_CMD_SRC_VF_ID_SHIFT);
604 if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
605 opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
606 opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
607 } else {
608 opcode_b |= DMAE_CMD_DST_VF_ID_MASK <<
609 DMAE_CMD_DST_VF_ID_SHIFT;
612 p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
613 p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
616 static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
618 OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) !=
619 31 * 4);
621 /* All the DMAE 'go' registers form an array in internal memory */
622 return DMAE_REG_GO_C0 + (idx << 2);
625 static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
626 struct ecore_ptt *p_ptt)
628 struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
629 u8 idx_cmd = p_hwfn->dmae_info.channel, i;
630 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
632 /* verify address is not OSAL_NULL */
633 if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
634 ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
635 DP_NOTICE(p_hwfn, true,
636 "source or destination address 0 idx_cmd=%d\n"
637 "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
638 idx_cmd,
639 OSAL_LE32_TO_CPU(p_command->opcode),
640 OSAL_LE16_TO_CPU(p_command->opcode_b),
641 OSAL_LE16_TO_CPU(p_command->length_dw),
642 OSAL_LE32_TO_CPU(p_command->src_addr_hi),
643 OSAL_LE32_TO_CPU(p_command->src_addr_lo),
644 OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
645 OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
647 return ECORE_INVAL;
650 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
651 "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
652 idx_cmd,
653 OSAL_LE32_TO_CPU(p_command->opcode),
654 OSAL_LE16_TO_CPU(p_command->opcode_b),
655 OSAL_LE16_TO_CPU(p_command->length_dw),
656 OSAL_LE32_TO_CPU(p_command->src_addr_hi),
657 OSAL_LE32_TO_CPU(p_command->src_addr_lo),
658 OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
659 OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
661 /* Copy the command to DMAE - need to do it before every call
662 * for source/dest address no reset.
663 * The number of commands have been increased to 16 (previous was 14)
664 * The first 9 DWs are the command registers, the 10 DW is the
665 * GO register, and
666 * the rest are result registers (which are read only by the client).
668 for (i = 0; i < DMAE_CMD_SIZE; i++) {
669 u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
670 *(((u32 *)p_command) + i) : 0;
672 ecore_wr(p_hwfn, p_ptt,
673 DMAE_REG_CMD_MEM +
674 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
675 (i * sizeof(u32)), data);
678 ecore_wr(p_hwfn, p_ptt,
679 ecore_dmae_idx_to_go_cmd(idx_cmd),
680 DMAE_GO_VALUE);
682 return ecore_status;
685 enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
687 dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
688 struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
689 u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
690 u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
692 *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
693 if (*p_comp == OSAL_NULL) {
694 DP_NOTICE(p_hwfn, true,
695 "Failed to allocate `p_completion_word'\n");
696 goto err;
699 p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
700 *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
701 sizeof(struct dmae_cmd));
702 if (*p_cmd == OSAL_NULL) {
703 DP_NOTICE(p_hwfn, true,
704 "Failed to allocate `struct dmae_cmd'\n");
705 goto err;
708 p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
709 *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
710 sizeof(u32) * DMAE_MAX_RW_SIZE);
711 if (*p_buff == OSAL_NULL) {
712 DP_NOTICE(p_hwfn, true,
713 "Failed to allocate `intermediate_buffer'\n");
714 goto err;
717 p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
719 return ECORE_SUCCESS;
720 err:
721 ecore_dmae_info_free(p_hwfn);
722 return ECORE_NOMEM;
725 void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
727 dma_addr_t p_phys;
729 /* Just make sure no one is in the middle */
730 OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
732 if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
733 p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
734 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
735 p_hwfn->dmae_info.p_completion_word,
736 p_phys, sizeof(u32));
737 p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
740 if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
741 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
742 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
743 p_hwfn->dmae_info.p_dmae_cmd,
744 p_phys, sizeof(struct dmae_cmd));
745 p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
748 if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
749 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
750 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
751 p_hwfn->dmae_info.p_intermediate_buffer,
752 p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
753 p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
756 OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
759 static enum _ecore_status_t
760 ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
762 u32 wait_cnt_limit = 10000, wait_cnt = 0;
763 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
765 #ifndef ASIC_ONLY
766 u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
767 ECORE_EMUL_FACTOR :
768 (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
769 ECORE_FPGA_FACTOR : 1));
771 wait_cnt_limit *= factor;
772 #endif
774 /* DMAE_E4_TODO : TODO check if we have to call any other function
775 * other than BARRIER to sync the completion_word since we are not
776 * using the volatile keyword for this
778 OSAL_BARRIER(p_hwfn->p_dev);
779 while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
780 OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
781 if (++wait_cnt > wait_cnt_limit) {
782 DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
783 "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
784 *(p_hwfn->dmae_info.p_completion_word),
785 DMAE_COMPLETION_VAL);
786 ecore_status = ECORE_TIMEOUT;
787 break;
790 /* to sync the completion_word since we are not
791 * using the volatile keyword for p_completion_word
793 OSAL_BARRIER(p_hwfn->p_dev);
796 if (ecore_status == ECORE_SUCCESS)
797 *p_hwfn->dmae_info.p_completion_word = 0;
799 return ecore_status;
802 static enum _ecore_status_t ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
803 struct ecore_ptt *p_ptt,
804 u64 src_addr,
805 u64 dst_addr,
806 u8 src_type,
807 u8 dst_type,
808 u32 length_dw)
810 dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
811 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
812 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
814 switch (src_type) {
815 case ECORE_DMAE_ADDRESS_GRC:
816 case ECORE_DMAE_ADDRESS_HOST_PHYS:
817 cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(src_addr));
818 cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(src_addr));
819 break;
820 /* for virtual source addresses we use the intermediate buffer. */
821 case ECORE_DMAE_ADDRESS_HOST_VIRT:
822 cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
823 cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
824 OSAL_MEMCPY(&(p_hwfn->dmae_info.p_intermediate_buffer[0]),
825 (void *)(osal_uintptr_t)src_addr,
826 length_dw * sizeof(u32));
827 break;
828 default:
829 return ECORE_INVAL;
832 switch (dst_type) {
833 case ECORE_DMAE_ADDRESS_GRC:
834 case ECORE_DMAE_ADDRESS_HOST_PHYS:
835 cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(dst_addr));
836 cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(dst_addr));
837 break;
838 /* for virtual destination addresses we use the intermediate buffer. */
839 case ECORE_DMAE_ADDRESS_HOST_VIRT:
840 cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
841 cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
842 break;
843 default:
844 return ECORE_INVAL;
847 cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw);
848 #ifndef __EXTRACT__LINUX__
849 if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
850 src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
851 OSAL_DMA_SYNC(p_hwfn->p_dev,
852 (void *)HILO_U64(cmd->src_addr_hi,
853 cmd->src_addr_lo),
854 length_dw * sizeof(u32), false);
855 #endif
857 ecore_dmae_post_command(p_hwfn, p_ptt);
859 ecore_status = ecore_dmae_operation_wait(p_hwfn);
861 #ifndef __EXTRACT__LINUX__
862 /* TODO - is it true ? */
863 if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
864 src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
865 OSAL_DMA_SYNC(p_hwfn->p_dev,
866 (void *)HILO_U64(cmd->src_addr_hi,
867 cmd->src_addr_lo),
868 length_dw * sizeof(u32), true);
869 #endif
871 if (ecore_status != ECORE_SUCCESS) {
872 DP_NOTICE(p_hwfn, ECORE_MSG_HW,
873 "Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x, intermediate buffer 0x%llx.\n",
874 src_addr, dst_addr, length_dw,
875 (u64)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
876 return ecore_status;
879 if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
880 OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
881 &p_hwfn->dmae_info.p_intermediate_buffer[0],
882 length_dw * sizeof(u32));
884 return ECORE_SUCCESS;
887 static enum _ecore_status_t ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
888 struct ecore_ptt *p_ptt,
889 u64 src_addr, u64 dst_addr,
890 u8 src_type, u8 dst_type,
891 u32 size_in_dwords,
892 struct ecore_dmae_params *p_params)
894 dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
895 u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
896 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
897 u64 src_addr_split = 0, dst_addr_split = 0;
898 u16 length_limit = DMAE_MAX_RW_SIZE;
899 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
900 u32 offset = 0;
902 if (p_hwfn->p_dev->recov_in_prog) {
903 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
904 "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n",
905 src_addr, src_type, dst_addr, dst_type,
906 size_in_dwords);
907 /* Return success to let the flow to be completed successfully
908 * w/o any error handling.
910 return ECORE_SUCCESS;
913 ecore_dmae_opcode(p_hwfn,
914 (src_type == ECORE_DMAE_ADDRESS_GRC),
915 (dst_type == ECORE_DMAE_ADDRESS_GRC),
916 p_params);
918 cmd->comp_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
919 cmd->comp_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
920 cmd->comp_val = OSAL_CPU_TO_LE32(DMAE_COMPLETION_VAL);
922 /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
923 cnt_split = size_in_dwords / length_limit;
924 length_mod = size_in_dwords % length_limit;
926 src_addr_split = src_addr;
927 dst_addr_split = dst_addr;
929 for (i = 0; i <= cnt_split; i++) {
930 offset = length_limit * i;
932 if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
933 if (src_type == ECORE_DMAE_ADDRESS_GRC)
934 src_addr_split = src_addr + offset;
935 else
936 src_addr_split = src_addr + (offset*4);
939 if (dst_type == ECORE_DMAE_ADDRESS_GRC)
940 dst_addr_split = dst_addr + offset;
941 else
942 dst_addr_split = dst_addr + (offset*4);
944 length_cur = (cnt_split == i) ? length_mod : length_limit;
946 /* might be zero on last iteration */
947 if (!length_cur)
948 continue;
950 ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
951 p_ptt,
952 src_addr_split,
953 dst_addr_split,
954 src_type,
955 dst_type,
956 length_cur);
957 if (ecore_status != ECORE_SUCCESS) {
958 DP_NOTICE(p_hwfn, false,
959 "ecore_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
960 ecore_status, src_addr, dst_addr, length_cur);
962 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
963 break;
967 return ecore_status;
970 enum _ecore_status_t ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
971 struct ecore_ptt *p_ptt,
972 u64 source_addr,
973 u32 grc_addr,
974 u32 size_in_dwords,
975 u32 flags)
977 u32 grc_addr_in_dw = grc_addr / sizeof(u32);
978 struct ecore_dmae_params params;
979 enum _ecore_status_t rc;
981 OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
982 params.flags = flags;
984 OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
986 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
987 grc_addr_in_dw,
988 ECORE_DMAE_ADDRESS_HOST_VIRT,
989 ECORE_DMAE_ADDRESS_GRC,
990 size_in_dwords, &params);
992 OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
994 return rc;
997 enum _ecore_status_t ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
998 struct ecore_ptt *p_ptt,
999 u32 grc_addr,
1000 dma_addr_t dest_addr,
1001 u32 size_in_dwords,
1002 u32 flags)
1004 u32 grc_addr_in_dw = grc_addr / sizeof(u32);
1005 struct ecore_dmae_params params;
1006 enum _ecore_status_t rc;
1008 OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
1009 params.flags = flags;
1011 OSAL_MUTEX_ACQUIRE(&(p_hwfn->dmae_info.mutex));
1013 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
1014 dest_addr, ECORE_DMAE_ADDRESS_GRC,
1015 ECORE_DMAE_ADDRESS_HOST_VIRT,
1016 size_in_dwords, &params);
1018 OSAL_MUTEX_RELEASE(&(p_hwfn->dmae_info.mutex));
1020 return rc;
1023 enum _ecore_status_t ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
1024 struct ecore_ptt *p_ptt,
1025 dma_addr_t source_addr,
1026 dma_addr_t dest_addr,
1027 u32 size_in_dwords,
1028 struct ecore_dmae_params *p_params)
1030 enum _ecore_status_t rc;
1032 OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
1034 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
1035 dest_addr,
1036 ECORE_DMAE_ADDRESS_HOST_PHYS,
1037 ECORE_DMAE_ADDRESS_HOST_PHYS,
1038 size_in_dwords,
1039 p_params);
1041 OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
1043 return rc;
1046 void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
1047 enum ecore_hw_err_type err_type)
1049 /* Fan failure cannot be masked by handling of another HW error */
1050 if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
1051 DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
1052 "Recovery is in progress. Avoid notifying about HW error %d.\n",
1053 err_type);
1054 return;
1057 OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);