1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "qman_priv.h"
33 #define DQRR_MAXFILL 15
34 #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
35 #define IRQNAME "QMan portal %d"
36 #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
37 #define QMAN_POLL_LIMIT 32
38 #define QMAN_PIRQ_DQRR_ITHRESH 12
39 #define QMAN_PIRQ_MR_ITHRESH 4
40 #define QMAN_PIRQ_IPERIOD 100
42 /* Portal register assists */
44 /* Cache-inhibited register offsets */
45 #define QM_REG_EQCR_PI_CINH 0x0000
46 #define QM_REG_EQCR_CI_CINH 0x0004
47 #define QM_REG_EQCR_ITR 0x0008
48 #define QM_REG_DQRR_PI_CINH 0x0040
49 #define QM_REG_DQRR_CI_CINH 0x0044
50 #define QM_REG_DQRR_ITR 0x0048
51 #define QM_REG_DQRR_DCAP 0x0050
52 #define QM_REG_DQRR_SDQCR 0x0054
53 #define QM_REG_DQRR_VDQCR 0x0058
54 #define QM_REG_DQRR_PDQCR 0x005c
55 #define QM_REG_MR_PI_CINH 0x0080
56 #define QM_REG_MR_CI_CINH 0x0084
57 #define QM_REG_MR_ITR 0x0088
58 #define QM_REG_CFG 0x0100
59 #define QM_REG_ISR 0x0e00
60 #define QM_REG_IER 0x0e04
61 #define QM_REG_ISDR 0x0e08
62 #define QM_REG_IIR 0x0e0c
63 #define QM_REG_ITPR 0x0e14
65 /* Cache-enabled register offsets */
66 #define QM_CL_EQCR 0x0000
67 #define QM_CL_DQRR 0x1000
68 #define QM_CL_MR 0x2000
69 #define QM_CL_EQCR_PI_CENA 0x3000
70 #define QM_CL_EQCR_CI_CENA 0x3100
71 #define QM_CL_DQRR_PI_CENA 0x3200
72 #define QM_CL_DQRR_CI_CENA 0x3300
73 #define QM_CL_MR_PI_CENA 0x3400
74 #define QM_CL_MR_CI_CENA 0x3500
75 #define QM_CL_CR 0x3800
76 #define QM_CL_RR0 0x3900
77 #define QM_CL_RR1 0x3940
80 * BTW, the drivers (and h/w programming model) already obtain the required
81 * synchronisation for portal accesses and data-dependencies. Use of barrier()s
82 * or other order-preserving primitives simply degrade performance. Hence the
83 * use of the __raw_*() interfaces, which simply ensure that the compiler treats
84 * the portal registers as volatile
87 /* Cache-enabled ring access */
88 #define qm_cl(base, idx) ((void *)base + ((idx) << 6))
93 * pmode == production mode
94 * cmode == consumption mode,
95 * dmode == h/w dequeue mode.
96 * Enum values use 3 letter codes. First letter matches the portal mode,
97 * remaining two letters indicate;
98 * ci == cache-inhibited portal register
99 * ce == cache-enabled portal register
100 * vb == in-band valid-bit (cache-enabled)
101 * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
102 * As for "enum qm_dqrr_dmode", it should be self-explanatory.
104 enum qm_eqcr_pmode
{ /* matches QCSP_CFG::EPM */
105 qm_eqcr_pci
= 0, /* PI index, cache-inhibited */
106 qm_eqcr_pce
= 1, /* PI index, cache-enabled */
107 qm_eqcr_pvb
= 2 /* valid-bit */
109 enum qm_dqrr_dmode
{ /* matches QCSP_CFG::DP */
110 qm_dqrr_dpush
= 0, /* SDQCR + VDQCR */
111 qm_dqrr_dpull
= 1 /* PDQCR */
113 enum qm_dqrr_pmode
{ /* s/w-only */
114 qm_dqrr_pci
, /* reads DQRR_PI_CINH */
115 qm_dqrr_pce
, /* reads DQRR_PI_CENA */
116 qm_dqrr_pvb
/* reads valid-bit */
118 enum qm_dqrr_cmode
{ /* matches QCSP_CFG::DCM */
119 qm_dqrr_cci
= 0, /* CI index, cache-inhibited */
120 qm_dqrr_cce
= 1, /* CI index, cache-enabled */
121 qm_dqrr_cdc
= 2 /* Discrete Consumption Acknowledgment */
123 enum qm_mr_pmode
{ /* s/w-only */
124 qm_mr_pci
, /* reads MR_PI_CINH */
125 qm_mr_pce
, /* reads MR_PI_CENA */
126 qm_mr_pvb
/* reads valid-bit */
128 enum qm_mr_cmode
{ /* matches QCSP_CFG::MM */
129 qm_mr_cci
= 0, /* CI index, cache-inhibited */
130 qm_mr_cce
= 1 /* CI index, cache-enabled */
133 /* --- Portal structures --- */
135 #define QM_EQCR_SIZE 8
136 #define QM_DQRR_SIZE 16
139 /* "Enqueue Command" */
140 struct qm_eqcr_entry
{
141 u8 _ncw_verb
; /* writes to this are non-coherent */
144 u32 orp
; /* 24-bit */
145 u32 fqid
; /* 24-bit */
150 #define QM_EQCR_VERB_VBIT 0x80
151 #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
152 #define QM_EQCR_VERB_CMD_ENQUEUE 0x01
153 #define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
154 #define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
155 #define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
158 struct qm_eqcr_entry
*ring
, *cursor
;
159 u8 ci
, available
, ithresh
, vbit
;
160 #ifdef CONFIG_FSL_DPAA_CHECKING
162 enum qm_eqcr_pmode pmode
;
167 const struct qm_dqrr_entry
*ring
, *cursor
;
168 u8 pi
, ci
, fill
, ithresh
, vbit
;
169 #ifdef CONFIG_FSL_DPAA_CHECKING
170 enum qm_dqrr_dmode dmode
;
171 enum qm_dqrr_pmode pmode
;
172 enum qm_dqrr_cmode cmode
;
177 union qm_mr_entry
*ring
, *cursor
;
178 u8 pi
, ci
, fill
, ithresh
, vbit
;
179 #ifdef CONFIG_FSL_DPAA_CHECKING
180 enum qm_mr_pmode pmode
;
181 enum qm_mr_cmode cmode
;
185 /* MC (Management Command) command */
187 struct qm_mcc_queryfq
{
190 u32 fqid
; /* 24-bit */
193 /* "Alter FQ State Commands " */
194 struct qm_mcc_alterfq
{
197 u32 fqid
; /* 24-bit */
199 u8 count
; /* number of consecutive FQID */
201 u32 context_b
; /* frame queue context b */
206 struct qm_mcc_querycgr
{
213 struct qm_mcc_querywq
{
216 /* select channel if verb != QUERYWQ_DEDICATED */
217 u16 channel_wq
; /* ignores wq (3 lsbits): _res[0-2] */
221 #define QM_MCC_VERB_VBIT 0x80
222 #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
223 #define QM_MCC_VERB_INITFQ_PARKED 0x40
224 #define QM_MCC_VERB_INITFQ_SCHED 0x41
225 #define QM_MCC_VERB_QUERYFQ 0x44
226 #define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
227 #define QM_MCC_VERB_QUERYWQ 0x46
228 #define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
229 #define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
230 #define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
231 #define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
232 #define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
233 #define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
234 #define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
235 #define QM_MCC_VERB_INITCGR 0x50
236 #define QM_MCC_VERB_MODIFYCGR 0x51
237 #define QM_MCC_VERB_CGRTESTWRITE 0x52
238 #define QM_MCC_VERB_QUERYCGR 0x58
239 #define QM_MCC_VERB_QUERYCONGESTION 0x59
240 union qm_mc_command
{
242 u8 _ncw_verb
; /* writes to this are non-coherent */
245 struct qm_mcc_initfq initfq
;
246 struct qm_mcc_queryfq queryfq
;
247 struct qm_mcc_alterfq alterfq
;
248 struct qm_mcc_initcgr initcgr
;
249 struct qm_mcc_querycgr querycgr
;
250 struct qm_mcc_querywq querywq
;
251 struct qm_mcc_queryfq_np queryfq_np
;
254 /* MC (Management Command) result */
256 struct qm_mcr_queryfq
{
260 struct qm_fqd fqd
; /* the FQD fields are here */
264 /* "Alter FQ State Commands" */
265 struct qm_mcr_alterfq
{
268 u8 fqs
; /* Frame Queue Status */
271 #define QM_MCR_VERB_RRID 0x80
272 #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
273 #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
274 #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
275 #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
276 #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
277 #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
278 #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
279 #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
280 #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
281 #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
282 #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
283 #define QM_MCR_RESULT_NULL 0x00
284 #define QM_MCR_RESULT_OK 0xf0
285 #define QM_MCR_RESULT_ERR_FQID 0xf1
286 #define QM_MCR_RESULT_ERR_FQSTATE 0xf2
287 #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
288 #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
289 #define QM_MCR_RESULT_PENDING 0xf8
290 #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
291 #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
292 #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
293 #define QM_MCR_TIMEOUT 10000 /* us */
300 struct qm_mcr_queryfq queryfq
;
301 struct qm_mcr_alterfq alterfq
;
302 struct qm_mcr_querycgr querycgr
;
303 struct qm_mcr_querycongestion querycongestion
;
304 struct qm_mcr_querywq querywq
;
305 struct qm_mcr_queryfq_np queryfq_np
;
309 union qm_mc_command
*cr
;
310 union qm_mc_result
*rr
;
312 #ifdef CONFIG_FSL_DPAA_CHECKING
314 /* Can be _mc_start()ed */
316 /* Can be _mc_commit()ed or _mc_abort()ed */
318 /* Can only be _mc_retry()ed */
325 void __iomem
*ce
; /* cache-enabled */
326 void __iomem
*ci
; /* cache-inhibited */
331 * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
332 * and including 'mc' fits within a cacheline (yay!). The 'config' part
333 * is setup-only, so isn't a cause for a concern. In other words, don't
334 * rearrange this structure on a whim, there be dragons ...
341 } ____cacheline_aligned
;
343 /* Cache-inhibited register access. */
344 static inline u32
qm_in(struct qm_portal
*p
, u32 offset
)
346 return __raw_readl(p
->addr
.ci
+ offset
);
349 static inline void qm_out(struct qm_portal
*p
, u32 offset
, u32 val
)
351 __raw_writel(val
, p
->addr
.ci
+ offset
);
354 /* Cache Enabled Portal Access */
355 static inline void qm_cl_invalidate(struct qm_portal
*p
, u32 offset
)
357 dpaa_invalidate(p
->addr
.ce
+ offset
);
360 static inline void qm_cl_touch_ro(struct qm_portal
*p
, u32 offset
)
362 dpaa_touch_ro(p
->addr
.ce
+ offset
);
365 static inline u32
qm_ce_in(struct qm_portal
*p
, u32 offset
)
367 return __raw_readl(p
->addr
.ce
+ offset
);
370 /* --- EQCR API --- */
372 #define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
373 #define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
375 /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
376 static struct qm_eqcr_entry
*eqcr_carryclear(struct qm_eqcr_entry
*p
)
378 uintptr_t addr
= (uintptr_t)p
;
382 return (struct qm_eqcr_entry
*)addr
;
385 /* Bit-wise logic to convert a ring pointer to a ring index */
386 static int eqcr_ptr2idx(struct qm_eqcr_entry
*e
)
388 return ((uintptr_t)e
>> EQCR_SHIFT
) & (QM_EQCR_SIZE
- 1);
391 /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
392 static inline void eqcr_inc(struct qm_eqcr
*eqcr
)
394 /* increment to the next EQCR pointer and handle overflow and 'vbit' */
395 struct qm_eqcr_entry
*partial
= eqcr
->cursor
+ 1;
397 eqcr
->cursor
= eqcr_carryclear(partial
);
398 if (partial
!= eqcr
->cursor
)
399 eqcr
->vbit
^= QM_EQCR_VERB_VBIT
;
402 static inline int qm_eqcr_init(struct qm_portal
*portal
,
403 enum qm_eqcr_pmode pmode
,
404 unsigned int eq_stash_thresh
,
407 struct qm_eqcr
*eqcr
= &portal
->eqcr
;
411 eqcr
->ring
= portal
->addr
.ce
+ QM_CL_EQCR
;
412 eqcr
->ci
= qm_in(portal
, QM_REG_EQCR_CI_CINH
) & (QM_EQCR_SIZE
- 1);
413 qm_cl_invalidate(portal
, QM_CL_EQCR_CI_CENA
);
414 pi
= qm_in(portal
, QM_REG_EQCR_PI_CINH
) & (QM_EQCR_SIZE
- 1);
415 eqcr
->cursor
= eqcr
->ring
+ pi
;
416 eqcr
->vbit
= (qm_in(portal
, QM_REG_EQCR_PI_CINH
) & QM_EQCR_SIZE
) ?
417 QM_EQCR_VERB_VBIT
: 0;
418 eqcr
->available
= QM_EQCR_SIZE
- 1 -
419 dpaa_cyc_diff(QM_EQCR_SIZE
, eqcr
->ci
, pi
);
420 eqcr
->ithresh
= qm_in(portal
, QM_REG_EQCR_ITR
);
421 #ifdef CONFIG_FSL_DPAA_CHECKING
425 cfg
= (qm_in(portal
, QM_REG_CFG
) & 0x00ffffff) |
426 (eq_stash_thresh
<< 28) | /* QCSP_CFG: EST */
427 (eq_stash_prio
<< 26) | /* QCSP_CFG: EP */
428 ((pmode
& 0x3) << 24); /* QCSP_CFG::EPM */
429 qm_out(portal
, QM_REG_CFG
, cfg
);
433 static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal
*portal
)
435 return (qm_in(portal
, QM_REG_CFG
) >> 28) & 0x7;
438 static inline void qm_eqcr_finish(struct qm_portal
*portal
)
440 struct qm_eqcr
*eqcr
= &portal
->eqcr
;
441 u8 pi
= qm_in(portal
, QM_REG_EQCR_PI_CINH
) & (QM_EQCR_SIZE
- 1);
442 u8 ci
= qm_in(portal
, QM_REG_EQCR_CI_CINH
) & (QM_EQCR_SIZE
- 1);
444 DPAA_ASSERT(!eqcr
->busy
);
445 if (pi
!= eqcr_ptr2idx(eqcr
->cursor
))
446 pr_crit("losing uncommitted EQCR entries\n");
448 pr_crit("missing existing EQCR completions\n");
449 if (eqcr
->ci
!= eqcr_ptr2idx(eqcr
->cursor
))
450 pr_crit("EQCR destroyed unquiesced\n");
453 static inline struct qm_eqcr_entry
*qm_eqcr_start_no_stash(struct qm_portal
456 struct qm_eqcr
*eqcr
= &portal
->eqcr
;
458 DPAA_ASSERT(!eqcr
->busy
);
459 if (!eqcr
->available
)
462 #ifdef CONFIG_FSL_DPAA_CHECKING
465 dpaa_zero(eqcr
->cursor
);
469 static inline struct qm_eqcr_entry
*qm_eqcr_start_stash(struct qm_portal
472 struct qm_eqcr
*eqcr
= &portal
->eqcr
;
475 DPAA_ASSERT(!eqcr
->busy
);
476 if (!eqcr
->available
) {
478 eqcr
->ci
= qm_ce_in(portal
, QM_CL_EQCR_CI_CENA
) &
480 diff
= dpaa_cyc_diff(QM_EQCR_SIZE
, old_ci
, eqcr
->ci
);
481 eqcr
->available
+= diff
;
485 #ifdef CONFIG_FSL_DPAA_CHECKING
488 dpaa_zero(eqcr
->cursor
);
492 static inline void eqcr_commit_checks(struct qm_eqcr
*eqcr
)
494 DPAA_ASSERT(eqcr
->busy
);
495 DPAA_ASSERT(eqcr
->cursor
->orp
== (eqcr
->cursor
->orp
& 0x00ffffff));
496 DPAA_ASSERT(eqcr
->cursor
->fqid
== (eqcr
->cursor
->fqid
& 0x00ffffff));
497 DPAA_ASSERT(eqcr
->available
>= 1);
500 static inline void qm_eqcr_pvb_commit(struct qm_portal
*portal
, u8 myverb
)
502 struct qm_eqcr
*eqcr
= &portal
->eqcr
;
503 struct qm_eqcr_entry
*eqcursor
;
505 eqcr_commit_checks(eqcr
);
506 DPAA_ASSERT(eqcr
->pmode
== qm_eqcr_pvb
);
508 eqcursor
= eqcr
->cursor
;
509 eqcursor
->_ncw_verb
= myverb
| eqcr
->vbit
;
510 dpaa_flush(eqcursor
);
513 #ifdef CONFIG_FSL_DPAA_CHECKING
518 static inline void qm_eqcr_cce_prefetch(struct qm_portal
*portal
)
520 qm_cl_touch_ro(portal
, QM_CL_EQCR_CI_CENA
);
523 static inline u8
qm_eqcr_cce_update(struct qm_portal
*portal
)
525 struct qm_eqcr
*eqcr
= &portal
->eqcr
;
526 u8 diff
, old_ci
= eqcr
->ci
;
528 eqcr
->ci
= qm_ce_in(portal
, QM_CL_EQCR_CI_CENA
) & (QM_EQCR_SIZE
- 1);
529 qm_cl_invalidate(portal
, QM_CL_EQCR_CI_CENA
);
530 diff
= dpaa_cyc_diff(QM_EQCR_SIZE
, old_ci
, eqcr
->ci
);
531 eqcr
->available
+= diff
;
535 static inline void qm_eqcr_set_ithresh(struct qm_portal
*portal
, u8 ithresh
)
537 struct qm_eqcr
*eqcr
= &portal
->eqcr
;
539 eqcr
->ithresh
= ithresh
;
540 qm_out(portal
, QM_REG_EQCR_ITR
, ithresh
);
543 static inline u8
qm_eqcr_get_avail(struct qm_portal
*portal
)
545 struct qm_eqcr
*eqcr
= &portal
->eqcr
;
547 return eqcr
->available
;
550 static inline u8
qm_eqcr_get_fill(struct qm_portal
*portal
)
552 struct qm_eqcr
*eqcr
= &portal
->eqcr
;
554 return QM_EQCR_SIZE
- 1 - eqcr
->available
;
557 /* --- DQRR API --- */
559 #define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
560 #define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
562 static const struct qm_dqrr_entry
*dqrr_carryclear(
563 const struct qm_dqrr_entry
*p
)
565 uintptr_t addr
= (uintptr_t)p
;
569 return (const struct qm_dqrr_entry
*)addr
;
572 static inline int dqrr_ptr2idx(const struct qm_dqrr_entry
*e
)
574 return ((uintptr_t)e
>> DQRR_SHIFT
) & (QM_DQRR_SIZE
- 1);
577 static const struct qm_dqrr_entry
*dqrr_inc(const struct qm_dqrr_entry
*e
)
579 return dqrr_carryclear(e
+ 1);
582 static inline void qm_dqrr_set_maxfill(struct qm_portal
*portal
, u8 mf
)
584 qm_out(portal
, QM_REG_CFG
, (qm_in(portal
, QM_REG_CFG
) & 0xff0fffff) |
585 ((mf
& (QM_DQRR_SIZE
- 1)) << 20));
588 static inline int qm_dqrr_init(struct qm_portal
*portal
,
589 const struct qm_portal_config
*config
,
590 enum qm_dqrr_dmode dmode
,
591 enum qm_dqrr_pmode pmode
,
592 enum qm_dqrr_cmode cmode
, u8 max_fill
)
594 struct qm_dqrr
*dqrr
= &portal
->dqrr
;
597 /* Make sure the DQRR will be idle when we enable */
598 qm_out(portal
, QM_REG_DQRR_SDQCR
, 0);
599 qm_out(portal
, QM_REG_DQRR_VDQCR
, 0);
600 qm_out(portal
, QM_REG_DQRR_PDQCR
, 0);
601 dqrr
->ring
= portal
->addr
.ce
+ QM_CL_DQRR
;
602 dqrr
->pi
= qm_in(portal
, QM_REG_DQRR_PI_CINH
) & (QM_DQRR_SIZE
- 1);
603 dqrr
->ci
= qm_in(portal
, QM_REG_DQRR_CI_CINH
) & (QM_DQRR_SIZE
- 1);
604 dqrr
->cursor
= dqrr
->ring
+ dqrr
->ci
;
605 dqrr
->fill
= dpaa_cyc_diff(QM_DQRR_SIZE
, dqrr
->ci
, dqrr
->pi
);
606 dqrr
->vbit
= (qm_in(portal
, QM_REG_DQRR_PI_CINH
) & QM_DQRR_SIZE
) ?
607 QM_DQRR_VERB_VBIT
: 0;
608 dqrr
->ithresh
= qm_in(portal
, QM_REG_DQRR_ITR
);
609 #ifdef CONFIG_FSL_DPAA_CHECKING
614 /* Invalidate every ring entry before beginning */
615 for (cfg
= 0; cfg
< QM_DQRR_SIZE
; cfg
++)
616 dpaa_invalidate(qm_cl(dqrr
->ring
, cfg
));
617 cfg
= (qm_in(portal
, QM_REG_CFG
) & 0xff000f00) |
618 ((max_fill
& (QM_DQRR_SIZE
- 1)) << 20) | /* DQRR_MF */
619 ((dmode
& 1) << 18) | /* DP */
620 ((cmode
& 3) << 16) | /* DCM */
622 (0 ? 0x40 : 0) | /* Ignore RP */
623 (0 ? 0x10 : 0); /* Ignore SP */
624 qm_out(portal
, QM_REG_CFG
, cfg
);
625 qm_dqrr_set_maxfill(portal
, max_fill
);
629 static inline void qm_dqrr_finish(struct qm_portal
*portal
)
631 #ifdef CONFIG_FSL_DPAA_CHECKING
632 struct qm_dqrr
*dqrr
= &portal
->dqrr
;
634 if (dqrr
->cmode
!= qm_dqrr_cdc
&&
635 dqrr
->ci
!= dqrr_ptr2idx(dqrr
->cursor
))
636 pr_crit("Ignoring completed DQRR entries\n");
640 static inline const struct qm_dqrr_entry
*qm_dqrr_current(
641 struct qm_portal
*portal
)
643 struct qm_dqrr
*dqrr
= &portal
->dqrr
;
650 static inline u8
qm_dqrr_next(struct qm_portal
*portal
)
652 struct qm_dqrr
*dqrr
= &portal
->dqrr
;
654 DPAA_ASSERT(dqrr
->fill
);
655 dqrr
->cursor
= dqrr_inc(dqrr
->cursor
);
659 static inline void qm_dqrr_pvb_update(struct qm_portal
*portal
)
661 struct qm_dqrr
*dqrr
= &portal
->dqrr
;
662 struct qm_dqrr_entry
*res
= qm_cl(dqrr
->ring
, dqrr
->pi
);
664 DPAA_ASSERT(dqrr
->pmode
== qm_dqrr_pvb
);
665 #ifndef CONFIG_FSL_PAMU
667 * If PAMU is not available we need to invalidate the cache.
668 * When PAMU is available the cache is updated by stash
670 dpaa_invalidate_touch_ro(res
);
673 * when accessing 'verb', use __raw_readb() to ensure that compiler
674 * inlining doesn't try to optimise out "excess reads".
676 if ((__raw_readb(&res
->verb
) & QM_DQRR_VERB_VBIT
) == dqrr
->vbit
) {
677 dqrr
->pi
= (dqrr
->pi
+ 1) & (QM_DQRR_SIZE
- 1);
679 dqrr
->vbit
^= QM_DQRR_VERB_VBIT
;
684 static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal
*portal
,
685 const struct qm_dqrr_entry
*dq
,
688 __maybe_unused
struct qm_dqrr
*dqrr
= &portal
->dqrr
;
689 int idx
= dqrr_ptr2idx(dq
);
691 DPAA_ASSERT(dqrr
->cmode
== qm_dqrr_cdc
);
692 DPAA_ASSERT((dqrr
->ring
+ idx
) == dq
);
693 DPAA_ASSERT(idx
< QM_DQRR_SIZE
);
694 qm_out(portal
, QM_REG_DQRR_DCAP
, (0 << 8) | /* DQRR_DCAP::S */
695 ((park
? 1 : 0) << 6) | /* DQRR_DCAP::PK */
696 idx
); /* DQRR_DCAP::DCAP_CI */
699 static inline void qm_dqrr_cdc_consume_n(struct qm_portal
*portal
, u32 bitmask
)
701 __maybe_unused
struct qm_dqrr
*dqrr
= &portal
->dqrr
;
703 DPAA_ASSERT(dqrr
->cmode
== qm_dqrr_cdc
);
704 qm_out(portal
, QM_REG_DQRR_DCAP
, (1 << 8) | /* DQRR_DCAP::S */
705 (bitmask
<< 16)); /* DQRR_DCAP::DCAP_CI */
708 static inline void qm_dqrr_sdqcr_set(struct qm_portal
*portal
, u32 sdqcr
)
710 qm_out(portal
, QM_REG_DQRR_SDQCR
, sdqcr
);
713 static inline void qm_dqrr_vdqcr_set(struct qm_portal
*portal
, u32 vdqcr
)
715 qm_out(portal
, QM_REG_DQRR_VDQCR
, vdqcr
);
718 static inline void qm_dqrr_set_ithresh(struct qm_portal
*portal
, u8 ithresh
)
720 qm_out(portal
, QM_REG_DQRR_ITR
, ithresh
);
725 #define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
726 #define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
728 static union qm_mr_entry
*mr_carryclear(union qm_mr_entry
*p
)
730 uintptr_t addr
= (uintptr_t)p
;
734 return (union qm_mr_entry
*)addr
;
737 static inline int mr_ptr2idx(const union qm_mr_entry
*e
)
739 return ((uintptr_t)e
>> MR_SHIFT
) & (QM_MR_SIZE
- 1);
742 static inline union qm_mr_entry
*mr_inc(union qm_mr_entry
*e
)
744 return mr_carryclear(e
+ 1);
747 static inline int qm_mr_init(struct qm_portal
*portal
, enum qm_mr_pmode pmode
,
748 enum qm_mr_cmode cmode
)
750 struct qm_mr
*mr
= &portal
->mr
;
753 mr
->ring
= portal
->addr
.ce
+ QM_CL_MR
;
754 mr
->pi
= qm_in(portal
, QM_REG_MR_PI_CINH
) & (QM_MR_SIZE
- 1);
755 mr
->ci
= qm_in(portal
, QM_REG_MR_CI_CINH
) & (QM_MR_SIZE
- 1);
756 mr
->cursor
= mr
->ring
+ mr
->ci
;
757 mr
->fill
= dpaa_cyc_diff(QM_MR_SIZE
, mr
->ci
, mr
->pi
);
758 mr
->vbit
= (qm_in(portal
, QM_REG_MR_PI_CINH
) & QM_MR_SIZE
)
759 ? QM_MR_VERB_VBIT
: 0;
760 mr
->ithresh
= qm_in(portal
, QM_REG_MR_ITR
);
761 #ifdef CONFIG_FSL_DPAA_CHECKING
765 cfg
= (qm_in(portal
, QM_REG_CFG
) & 0xfffff0ff) |
766 ((cmode
& 1) << 8); /* QCSP_CFG:MM */
767 qm_out(portal
, QM_REG_CFG
, cfg
);
771 static inline void qm_mr_finish(struct qm_portal
*portal
)
773 struct qm_mr
*mr
= &portal
->mr
;
775 if (mr
->ci
!= mr_ptr2idx(mr
->cursor
))
776 pr_crit("Ignoring completed MR entries\n");
779 static inline const union qm_mr_entry
*qm_mr_current(struct qm_portal
*portal
)
781 struct qm_mr
*mr
= &portal
->mr
;
788 static inline int qm_mr_next(struct qm_portal
*portal
)
790 struct qm_mr
*mr
= &portal
->mr
;
792 DPAA_ASSERT(mr
->fill
);
793 mr
->cursor
= mr_inc(mr
->cursor
);
797 static inline void qm_mr_pvb_update(struct qm_portal
*portal
)
799 struct qm_mr
*mr
= &portal
->mr
;
800 union qm_mr_entry
*res
= qm_cl(mr
->ring
, mr
->pi
);
802 DPAA_ASSERT(mr
->pmode
== qm_mr_pvb
);
804 * when accessing 'verb', use __raw_readb() to ensure that compiler
805 * inlining doesn't try to optimise out "excess reads".
807 if ((__raw_readb(&res
->verb
) & QM_MR_VERB_VBIT
) == mr
->vbit
) {
808 mr
->pi
= (mr
->pi
+ 1) & (QM_MR_SIZE
- 1);
810 mr
->vbit
^= QM_MR_VERB_VBIT
;
814 dpaa_invalidate_touch_ro(res
);
817 static inline void qm_mr_cci_consume(struct qm_portal
*portal
, u8 num
)
819 struct qm_mr
*mr
= &portal
->mr
;
821 DPAA_ASSERT(mr
->cmode
== qm_mr_cci
);
822 mr
->ci
= (mr
->ci
+ num
) & (QM_MR_SIZE
- 1);
823 qm_out(portal
, QM_REG_MR_CI_CINH
, mr
->ci
);
826 static inline void qm_mr_cci_consume_to_current(struct qm_portal
*portal
)
828 struct qm_mr
*mr
= &portal
->mr
;
830 DPAA_ASSERT(mr
->cmode
== qm_mr_cci
);
831 mr
->ci
= mr_ptr2idx(mr
->cursor
);
832 qm_out(portal
, QM_REG_MR_CI_CINH
, mr
->ci
);
835 static inline void qm_mr_set_ithresh(struct qm_portal
*portal
, u8 ithresh
)
837 qm_out(portal
, QM_REG_MR_ITR
, ithresh
);
840 /* --- Management command API --- */
842 static inline int qm_mc_init(struct qm_portal
*portal
)
844 struct qm_mc
*mc
= &portal
->mc
;
846 mc
->cr
= portal
->addr
.ce
+ QM_CL_CR
;
847 mc
->rr
= portal
->addr
.ce
+ QM_CL_RR0
;
848 mc
->rridx
= (__raw_readb(&mc
->cr
->_ncw_verb
) & QM_MCC_VERB_VBIT
)
850 mc
->vbit
= mc
->rridx
? QM_MCC_VERB_VBIT
: 0;
851 #ifdef CONFIG_FSL_DPAA_CHECKING
852 mc
->state
= qman_mc_idle
;
857 static inline void qm_mc_finish(struct qm_portal
*portal
)
859 #ifdef CONFIG_FSL_DPAA_CHECKING
860 struct qm_mc
*mc
= &portal
->mc
;
862 DPAA_ASSERT(mc
->state
== qman_mc_idle
);
863 if (mc
->state
!= qman_mc_idle
)
864 pr_crit("Losing incomplete MC command\n");
868 static inline union qm_mc_command
*qm_mc_start(struct qm_portal
*portal
)
870 struct qm_mc
*mc
= &portal
->mc
;
872 DPAA_ASSERT(mc
->state
== qman_mc_idle
);
873 #ifdef CONFIG_FSL_DPAA_CHECKING
874 mc
->state
= qman_mc_user
;
880 static inline void qm_mc_commit(struct qm_portal
*portal
, u8 myverb
)
882 struct qm_mc
*mc
= &portal
->mc
;
883 union qm_mc_result
*rr
= mc
->rr
+ mc
->rridx
;
885 DPAA_ASSERT(mc
->state
== qman_mc_user
);
887 mc
->cr
->_ncw_verb
= myverb
| mc
->vbit
;
889 dpaa_invalidate_touch_ro(rr
);
890 #ifdef CONFIG_FSL_DPAA_CHECKING
891 mc
->state
= qman_mc_hw
;
895 static inline union qm_mc_result
*qm_mc_result(struct qm_portal
*portal
)
897 struct qm_mc
*mc
= &portal
->mc
;
898 union qm_mc_result
*rr
= mc
->rr
+ mc
->rridx
;
900 DPAA_ASSERT(mc
->state
== qman_mc_hw
);
902 * The inactive response register's verb byte always returns zero until
903 * its command is submitted and completed. This includes the valid-bit,
904 * in case you were wondering...
906 if (!__raw_readb(&rr
->verb
)) {
907 dpaa_invalidate_touch_ro(rr
);
911 mc
->vbit
^= QM_MCC_VERB_VBIT
;
912 #ifdef CONFIG_FSL_DPAA_CHECKING
913 mc
->state
= qman_mc_idle
;
918 static inline int qm_mc_result_timeout(struct qm_portal
*portal
,
919 union qm_mc_result
**mcr
)
921 int timeout
= QM_MCR_TIMEOUT
;
924 *mcr
= qm_mc_result(portal
);
933 static inline void fq_set(struct qman_fq
*fq
, u32 mask
)
935 set_bits(mask
, &fq
->flags
);
938 static inline void fq_clear(struct qman_fq
*fq
, u32 mask
)
940 clear_bits(mask
, &fq
->flags
);
943 static inline int fq_isset(struct qman_fq
*fq
, u32 mask
)
945 return fq
->flags
& mask
;
948 static inline int fq_isclear(struct qman_fq
*fq
, u32 mask
)
950 return !(fq
->flags
& mask
);
955 /* PORTAL_BITS_*** - dynamic, strictly internal */
957 /* interrupt sources processed by portal_isr(), configurable */
958 unsigned long irq_sources
;
959 u32 use_eqcr_ci_stashing
;
960 /* only 1 volatile dequeue at a time */
961 struct qman_fq
*vdqcr_owned
;
963 /* probing time config params for cpu-affine portals */
964 const struct qm_portal_config
*config
;
965 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
966 struct qman_cgrs
*cgrs
;
967 /* linked-list of CSCN handlers. */
968 struct list_head cgr_cbs
;
971 struct work_struct congestion_work
;
972 struct work_struct mr_work
;
973 char irqname
[MAX_IRQNAME
];
976 static cpumask_t affine_mask
;
977 static DEFINE_SPINLOCK(affine_mask_lock
);
978 static u16 affine_channels
[NR_CPUS
];
979 static DEFINE_PER_CPU(struct qman_portal
, qman_affine_portal
);
980 struct qman_portal
*affine_portals
[NR_CPUS
];
982 static inline struct qman_portal
*get_affine_portal(void)
984 return &get_cpu_var(qman_affine_portal
);
987 static inline void put_affine_portal(void)
989 put_cpu_var(qman_affine_portal
);
992 static struct workqueue_struct
*qm_portal_wq
;
994 int qman_wq_alloc(void)
996 qm_portal_wq
= alloc_workqueue("qman_portal_wq", 0, 1);
1003 * This is what everything can wait on, even if it migrates to a different cpu
1004 * to the one whose affine portal it is waiting on.
1006 static DECLARE_WAIT_QUEUE_HEAD(affine_queue
);
1008 static struct qman_fq
**fq_table
;
1009 static u32 num_fqids
;
1011 int qman_alloc_fq_table(u32 _num_fqids
)
1013 num_fqids
= _num_fqids
;
1015 fq_table
= vzalloc(num_fqids
* 2 * sizeof(struct qman_fq
*));
1019 pr_debug("Allocated fq lookup table at %p, entry count %u\n",
1020 fq_table
, num_fqids
* 2);
1024 static struct qman_fq
*idx_to_fq(u32 idx
)
1028 #ifdef CONFIG_FSL_DPAA_CHECKING
1029 if (WARN_ON(idx
>= num_fqids
* 2))
1033 DPAA_ASSERT(!fq
|| idx
== fq
->idx
);
1039 * Only returns full-service fq objects, not enqueue-only
1040 * references (QMAN_FQ_FLAG_NO_MODIFY).
1042 static struct qman_fq
*fqid_to_fq(u32 fqid
)
1044 return idx_to_fq(fqid
* 2);
1047 static struct qman_fq
*tag_to_fq(u32 tag
)
1049 #if BITS_PER_LONG == 64
1050 return idx_to_fq(tag
);
1052 return (struct qman_fq
*)tag
;
1056 static u32
fq_to_tag(struct qman_fq
*fq
)
1058 #if BITS_PER_LONG == 64
1065 static u32
__poll_portal_slow(struct qman_portal
*p
, u32 is
);
1066 static inline unsigned int __poll_portal_fast(struct qman_portal
*p
,
1067 unsigned int poll_limit
);
1068 static void qm_congestion_task(struct work_struct
*work
);
1069 static void qm_mr_process_task(struct work_struct
*work
);
1071 static irqreturn_t
portal_isr(int irq
, void *ptr
)
1073 struct qman_portal
*p
= ptr
;
1075 u32 clear
= QM_DQAVAIL_MASK
| p
->irq_sources
;
1076 u32 is
= qm_in(&p
->p
, QM_REG_ISR
) & p
->irq_sources
;
1081 /* DQRR-handling if it's interrupt-driven */
1082 if (is
& QM_PIRQ_DQRI
)
1083 __poll_portal_fast(p
, QMAN_POLL_LIMIT
);
1084 /* Handling of anything else that's interrupt-driven */
1085 clear
|= __poll_portal_slow(p
, is
);
1086 qm_out(&p
->p
, QM_REG_ISR
, clear
);
1090 static int drain_mr_fqrni(struct qm_portal
*p
)
1092 const union qm_mr_entry
*msg
;
1094 msg
= qm_mr_current(p
);
1097 * if MR was full and h/w had other FQRNI entries to produce, we
1098 * need to allow it time to produce those entries once the
1099 * existing entries are consumed. A worst-case situation
1100 * (fully-loaded system) means h/w sequencers may have to do 3-4
1101 * other things before servicing the portal's MR pump, each of
1102 * which (if slow) may take ~50 qman cycles (which is ~200
1103 * processor cycles). So rounding up and then multiplying this
1104 * worst-case estimate by a factor of 10, just to be
1105 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
1106 * one entry at a time, so h/w has an opportunity to produce new
1107 * entries well before the ring has been fully consumed, so
1108 * we're being *really* paranoid here.
1110 u64 now
, then
= jiffies
;
1114 } while ((then
+ 10000) > now
);
1115 msg
= qm_mr_current(p
);
1119 if ((msg
->verb
& QM_MR_VERB_TYPE_MASK
) != QM_MR_VERB_FQRNI
) {
1120 /* We aren't draining anything but FQRNIs */
1121 pr_err("Found verb 0x%x in MR\n", msg
->verb
);
1125 qm_mr_cci_consume(p
, 1);
1129 static int qman_create_portal(struct qman_portal
*portal
,
1130 const struct qm_portal_config
*c
,
1131 const struct qman_cgrs
*cgrs
)
1133 struct qm_portal
*p
;
1139 #ifdef CONFIG_FSL_PAMU
1140 /* PAMU is required for stashing */
1141 portal
->use_eqcr_ci_stashing
= ((qman_ip_rev
>= QMAN_REV30
) ? 1 : 0);
1143 portal
->use_eqcr_ci_stashing
= 0;
1146 * prep the low-level portal struct with the mapped addresses from the
1147 * config, everything that follows depends on it and "config" is more
1150 p
->addr
.ce
= c
->addr_virt
[DPAA_PORTAL_CE
];
1151 p
->addr
.ci
= c
->addr_virt
[DPAA_PORTAL_CI
];
1153 * If CI-stashing is used, the current defaults use a threshold of 3,
1154 * and stash with high-than-DQRR priority.
1156 if (qm_eqcr_init(p
, qm_eqcr_pvb
,
1157 portal
->use_eqcr_ci_stashing
? 3 : 0, 1)) {
1158 dev_err(c
->dev
, "EQCR initialisation failed\n");
1161 if (qm_dqrr_init(p
, c
, qm_dqrr_dpush
, qm_dqrr_pvb
,
1162 qm_dqrr_cdc
, DQRR_MAXFILL
)) {
1163 dev_err(c
->dev
, "DQRR initialisation failed\n");
1166 if (qm_mr_init(p
, qm_mr_pvb
, qm_mr_cci
)) {
1167 dev_err(c
->dev
, "MR initialisation failed\n");
1170 if (qm_mc_init(p
)) {
1171 dev_err(c
->dev
, "MC initialisation failed\n");
1174 /* static interrupt-gating controls */
1175 qm_dqrr_set_ithresh(p
, QMAN_PIRQ_DQRR_ITHRESH
);
1176 qm_mr_set_ithresh(p
, QMAN_PIRQ_MR_ITHRESH
);
1177 qm_out(p
, QM_REG_ITPR
, QMAN_PIRQ_IPERIOD
);
1178 portal
->cgrs
= kmalloc(2 * sizeof(*cgrs
), GFP_KERNEL
);
1181 /* initial snapshot is no-depletion */
1182 qman_cgrs_init(&portal
->cgrs
[1]);
1184 portal
->cgrs
[0] = *cgrs
;
1186 /* if the given mask is NULL, assume all CGRs can be seen */
1187 qman_cgrs_fill(&portal
->cgrs
[0]);
1188 INIT_LIST_HEAD(&portal
->cgr_cbs
);
1189 spin_lock_init(&portal
->cgr_lock
);
1190 INIT_WORK(&portal
->congestion_work
, qm_congestion_task
);
1191 INIT_WORK(&portal
->mr_work
, qm_mr_process_task
);
1193 portal
->sdqcr
= QM_SDQCR_SOURCE_CHANNELS
| QM_SDQCR_COUNT_UPTO3
|
1194 QM_SDQCR_DEDICATED_PRECEDENCE
| QM_SDQCR_TYPE_PRIO_QOS
|
1195 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED
;
1197 qm_out(p
, QM_REG_ISDR
, isdr
);
1198 portal
->irq_sources
= 0;
1199 qm_out(p
, QM_REG_IER
, 0);
1200 qm_out(p
, QM_REG_ISR
, 0xffffffff);
1201 snprintf(portal
->irqname
, MAX_IRQNAME
, IRQNAME
, c
->cpu
);
1202 if (request_irq(c
->irq
, portal_isr
, 0, portal
->irqname
, portal
)) {
1203 dev_err(c
->dev
, "request_irq() failed\n");
1206 if (c
->cpu
!= -1 && irq_can_set_affinity(c
->irq
) &&
1207 irq_set_affinity(c
->irq
, cpumask_of(c
->cpu
))) {
1208 dev_err(c
->dev
, "irq_set_affinity() failed\n");
1212 /* Need EQCR to be empty before continuing */
1213 isdr
&= ~QM_PIRQ_EQCI
;
1214 qm_out(p
, QM_REG_ISDR
, isdr
);
1215 ret
= qm_eqcr_get_fill(p
);
1217 dev_err(c
->dev
, "EQCR unclean\n");
1218 goto fail_eqcr_empty
;
1220 isdr
&= ~(QM_PIRQ_DQRI
| QM_PIRQ_MRI
);
1221 qm_out(p
, QM_REG_ISDR
, isdr
);
1222 if (qm_dqrr_current(p
)) {
1223 dev_err(c
->dev
, "DQRR unclean\n");
1224 qm_dqrr_cdc_consume_n(p
, 0xffff);
1226 if (qm_mr_current(p
) && drain_mr_fqrni(p
)) {
1227 /* special handling, drain just in case it's a few FQRNIs */
1228 const union qm_mr_entry
*e
= qm_mr_current(p
);
1230 dev_err(c
->dev
, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
1231 e
->verb
, e
->ern
.rc
, qm_fd_addr_get64(&e
->ern
.fd
));
1232 goto fail_dqrr_mr_empty
;
1236 qm_out(p
, QM_REG_ISDR
, 0);
1237 qm_out(p
, QM_REG_IIR
, 0);
1238 /* Write a sane SDQCR */
1239 qm_dqrr_sdqcr_set(p
, portal
->sdqcr
);
1245 free_irq(c
->irq
, portal
);
1247 kfree(portal
->cgrs
);
1260 struct qman_portal
*qman_create_affine_portal(const struct qm_portal_config
*c
,
1261 const struct qman_cgrs
*cgrs
)
1263 struct qman_portal
*portal
;
1266 portal
= &per_cpu(qman_affine_portal
, c
->cpu
);
1267 err
= qman_create_portal(portal
, c
, cgrs
);
1271 spin_lock(&affine_mask_lock
);
1272 cpumask_set_cpu(c
->cpu
, &affine_mask
);
1273 affine_channels
[c
->cpu
] = c
->channel
;
1274 affine_portals
[c
->cpu
] = portal
;
1275 spin_unlock(&affine_mask_lock
);
1280 static void qman_destroy_portal(struct qman_portal
*qm
)
1282 const struct qm_portal_config
*pcfg
;
1284 /* Stop dequeues on the portal */
1285 qm_dqrr_sdqcr_set(&qm
->p
, 0);
1288 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
1289 * something related to QM_PIRQ_EQCI, this may need fixing.
1290 * Also, due to the prefetching model used for CI updates in the enqueue
1291 * path, this update will only invalidate the CI cacheline *after*
1292 * working on it, so we need to call this twice to ensure a full update
1293 * irrespective of where the enqueue processing was at when the teardown
1296 qm_eqcr_cce_update(&qm
->p
);
1297 qm_eqcr_cce_update(&qm
->p
);
1300 free_irq(pcfg
->irq
, qm
);
1303 qm_mc_finish(&qm
->p
);
1304 qm_mr_finish(&qm
->p
);
1305 qm_dqrr_finish(&qm
->p
);
1306 qm_eqcr_finish(&qm
->p
);
1311 const struct qm_portal_config
*qman_destroy_affine_portal(void)
1313 struct qman_portal
*qm
= get_affine_portal();
1314 const struct qm_portal_config
*pcfg
;
1320 qman_destroy_portal(qm
);
1322 spin_lock(&affine_mask_lock
);
1323 cpumask_clear_cpu(cpu
, &affine_mask
);
1324 spin_unlock(&affine_mask_lock
);
1325 put_affine_portal();
1329 /* Inline helper to reduce nesting in __poll_portal_slow() */
1330 static inline void fq_state_change(struct qman_portal
*p
, struct qman_fq
*fq
,
1331 const union qm_mr_entry
*msg
, u8 verb
)
1334 case QM_MR_VERB_FQRL
:
1335 DPAA_ASSERT(fq_isset(fq
, QMAN_FQ_STATE_ORL
));
1336 fq_clear(fq
, QMAN_FQ_STATE_ORL
);
1338 case QM_MR_VERB_FQRN
:
1339 DPAA_ASSERT(fq
->state
== qman_fq_state_parked
||
1340 fq
->state
== qman_fq_state_sched
);
1341 DPAA_ASSERT(fq_isset(fq
, QMAN_FQ_STATE_CHANGING
));
1342 fq_clear(fq
, QMAN_FQ_STATE_CHANGING
);
1343 if (msg
->fq
.fqs
& QM_MR_FQS_NOTEMPTY
)
1344 fq_set(fq
, QMAN_FQ_STATE_NE
);
1345 if (msg
->fq
.fqs
& QM_MR_FQS_ORLPRESENT
)
1346 fq_set(fq
, QMAN_FQ_STATE_ORL
);
1347 fq
->state
= qman_fq_state_retired
;
1349 case QM_MR_VERB_FQPN
:
1350 DPAA_ASSERT(fq
->state
== qman_fq_state_sched
);
1351 DPAA_ASSERT(fq_isclear(fq
, QMAN_FQ_STATE_CHANGING
));
1352 fq
->state
= qman_fq_state_parked
;
1356 static void qm_congestion_task(struct work_struct
*work
)
1358 struct qman_portal
*p
= container_of(work
, struct qman_portal
,
1360 struct qman_cgrs rr
, c
;
1361 union qm_mc_result
*mcr
;
1362 struct qman_cgr
*cgr
;
1364 spin_lock(&p
->cgr_lock
);
1366 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYCONGESTION
);
1367 if (!qm_mc_result_timeout(&p
->p
, &mcr
)) {
1368 spin_unlock(&p
->cgr_lock
);
1369 dev_crit(p
->config
->dev
, "QUERYCONGESTION timeout\n");
1372 /* mask out the ones I'm not interested in */
1373 qman_cgrs_and(&rr
, (struct qman_cgrs
*)&mcr
->querycongestion
.state
,
1375 /* check previous snapshot for delta, enter/exit congestion */
1376 qman_cgrs_xor(&c
, &rr
, &p
->cgrs
[1]);
1377 /* update snapshot */
1378 qman_cgrs_cp(&p
->cgrs
[1], &rr
);
1379 /* Invoke callback */
1380 list_for_each_entry(cgr
, &p
->cgr_cbs
, node
)
1381 if (cgr
->cb
&& qman_cgrs_get(&c
, cgr
->cgrid
))
1382 cgr
->cb(p
, cgr
, qman_cgrs_get(&rr
, cgr
->cgrid
));
1383 spin_unlock(&p
->cgr_lock
);
1386 static void qm_mr_process_task(struct work_struct
*work
)
1388 struct qman_portal
*p
= container_of(work
, struct qman_portal
,
1390 const union qm_mr_entry
*msg
;
1397 qm_mr_pvb_update(&p
->p
);
1398 msg
= qm_mr_current(&p
->p
);
1402 verb
= msg
->verb
& QM_MR_VERB_TYPE_MASK
;
1403 /* The message is a software ERN iff the 0x20 bit is clear */
1406 case QM_MR_VERB_FQRNI
:
1407 /* nada, we drop FQRNIs on the floor */
1409 case QM_MR_VERB_FQRN
:
1410 case QM_MR_VERB_FQRL
:
1411 /* Lookup in the retirement table */
1412 fq
= fqid_to_fq(msg
->fq
.fqid
);
1415 fq_state_change(p
, fq
, msg
, verb
);
1417 fq
->cb
.fqs(p
, fq
, msg
);
1419 case QM_MR_VERB_FQPN
:
1421 fq
= tag_to_fq(msg
->fq
.contextB
);
1422 fq_state_change(p
, fq
, msg
, verb
);
1424 fq
->cb
.fqs(p
, fq
, msg
);
1426 case QM_MR_VERB_DC_ERN
:
1428 pr_crit_once("Leaking DCP ERNs!\n");
1431 pr_crit("Invalid MR verb 0x%02x\n", verb
);
1434 /* Its a software ERN */
1435 fq
= tag_to_fq(msg
->ern
.tag
);
1436 fq
->cb
.ern(p
, fq
, msg
);
1442 qm_mr_cci_consume(&p
->p
, num
);
1446 static u32
__poll_portal_slow(struct qman_portal
*p
, u32 is
)
1448 if (is
& QM_PIRQ_CSCI
) {
1449 queue_work_on(smp_processor_id(), qm_portal_wq
,
1450 &p
->congestion_work
);
1453 if (is
& QM_PIRQ_EQRI
) {
1454 qm_eqcr_cce_update(&p
->p
);
1455 qm_eqcr_set_ithresh(&p
->p
, 0);
1456 wake_up(&affine_queue
);
1459 if (is
& QM_PIRQ_MRI
) {
1460 queue_work_on(smp_processor_id(), qm_portal_wq
,
1468 * remove some slowish-path stuff from the "fast path" and make sure it isn't
1471 static noinline
void clear_vdqcr(struct qman_portal
*p
, struct qman_fq
*fq
)
1473 p
->vdqcr_owned
= NULL
;
1474 fq_clear(fq
, QMAN_FQ_STATE_VDQCR
);
1475 wake_up(&affine_queue
);
1479 * The only states that would conflict with other things if they ran at the
1480 * same time on the same cpu are:
1482 * (i) setting/clearing vdqcr_owned, and
1483 * (ii) clearing the NE (Not Empty) flag.
1485 * Both are safe. Because;
1487 * (i) this clearing can only occur after qman_volatile_dequeue() has set the
1488 * vdqcr_owned field (which it does before setting VDQCR), and
1489 * qman_volatile_dequeue() blocks interrupts and preemption while this is
1490 * done so that we can't interfere.
1491 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
1492 * with (i) that API prevents us from interfering until it's safe.
1494 * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
1495 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
1496 * advantage comes from this function not having to "lock" anything at all.
1498 * Note also that the callbacks are invoked at points which are safe against the
1499 * above potential conflicts, but that this function itself is not re-entrant
1500 * (this is because the function tracks one end of each FIFO in the portal and
1501 * we do *not* want to lock that). So the consequence is that it is safe for
1502 * user callbacks to call into any QMan API.
1504 static inline unsigned int __poll_portal_fast(struct qman_portal
*p
,
1505 unsigned int poll_limit
)
1507 const struct qm_dqrr_entry
*dq
;
1509 enum qman_cb_dqrr_result res
;
1510 unsigned int limit
= 0;
1513 qm_dqrr_pvb_update(&p
->p
);
1514 dq
= qm_dqrr_current(&p
->p
);
1518 if (dq
->stat
& QM_DQRR_STAT_UNSCHEDULED
) {
1520 * VDQCR: don't trust contextB as the FQ may have
1521 * been configured for h/w consumption and we're
1522 * draining it post-retirement.
1524 fq
= p
->vdqcr_owned
;
1526 * We only set QMAN_FQ_STATE_NE when retiring, so we
1527 * only need to check for clearing it when doing
1528 * volatile dequeues. It's one less thing to check
1529 * in the critical path (SDQCR).
1531 if (dq
->stat
& QM_DQRR_STAT_FQ_EMPTY
)
1532 fq_clear(fq
, QMAN_FQ_STATE_NE
);
1534 * This is duplicated from the SDQCR code, but we
1535 * have stuff to do before *and* after this callback,
1536 * and we don't want multiple if()s in the critical
1539 res
= fq
->cb
.dqrr(p
, fq
, dq
);
1540 if (res
== qman_cb_dqrr_stop
)
1542 /* Check for VDQCR completion */
1543 if (dq
->stat
& QM_DQRR_STAT_DQCR_EXPIRED
)
1546 /* SDQCR: contextB points to the FQ */
1547 fq
= tag_to_fq(dq
->contextB
);
1548 /* Now let the callback do its stuff */
1549 res
= fq
->cb
.dqrr(p
, fq
, dq
);
1551 * The callback can request that we exit without
1552 * consuming this entry nor advancing;
1554 if (res
== qman_cb_dqrr_stop
)
1557 /* Interpret 'dq' from a driver perspective. */
1559 * Parking isn't possible unless HELDACTIVE was set. NB,
1560 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1561 * check for HELDACTIVE to cover both.
1563 DPAA_ASSERT((dq
->stat
& QM_DQRR_STAT_FQ_HELDACTIVE
) ||
1564 (res
!= qman_cb_dqrr_park
));
1565 /* just means "skip it, I'll consume it myself later on" */
1566 if (res
!= qman_cb_dqrr_defer
)
1567 qm_dqrr_cdc_consume_1ptr(&p
->p
, dq
,
1568 res
== qman_cb_dqrr_park
);
1570 qm_dqrr_next(&p
->p
);
1572 * Entry processed and consumed, increment our counter. The
1573 * callback can request that we exit after consuming the
1574 * entry, and we also exit if we reach our processing limit,
1575 * so loop back only if neither of these conditions is met.
1577 } while (++limit
< poll_limit
&& res
!= qman_cb_dqrr_consume_stop
);
1582 void qman_p_irqsource_add(struct qman_portal
*p
, u32 bits
)
1584 unsigned long irqflags
;
1586 local_irq_save(irqflags
);
1587 set_bits(bits
& QM_PIRQ_VISIBLE
, &p
->irq_sources
);
1588 qm_out(&p
->p
, QM_REG_IER
, p
->irq_sources
);
1589 local_irq_restore(irqflags
);
1591 EXPORT_SYMBOL(qman_p_irqsource_add
);
1593 void qman_p_irqsource_remove(struct qman_portal
*p
, u32 bits
)
1595 unsigned long irqflags
;
1599 * Our interrupt handler only processes+clears status register bits that
1600 * are in p->irq_sources. As we're trimming that mask, if one of them
1601 * were to assert in the status register just before we remove it from
1602 * the enable register, there would be an interrupt-storm when we
1603 * release the IRQ lock. So we wait for the enable register update to
1604 * take effect in h/w (by reading it back) and then clear all other bits
1605 * in the status register. Ie. we clear them from ISR once it's certain
1606 * IER won't allow them to reassert.
1608 local_irq_save(irqflags
);
1609 bits
&= QM_PIRQ_VISIBLE
;
1610 clear_bits(bits
, &p
->irq_sources
);
1611 qm_out(&p
->p
, QM_REG_IER
, p
->irq_sources
);
1612 ier
= qm_in(&p
->p
, QM_REG_IER
);
1614 * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1615 * data-dependency, ie. to protect against re-ordering.
1617 qm_out(&p
->p
, QM_REG_ISR
, ~ier
);
1618 local_irq_restore(irqflags
);
1620 EXPORT_SYMBOL(qman_p_irqsource_remove
);
1622 const cpumask_t
*qman_affine_cpus(void)
1624 return &affine_mask
;
1626 EXPORT_SYMBOL(qman_affine_cpus
);
1628 u16
qman_affine_channel(int cpu
)
1631 struct qman_portal
*portal
= get_affine_portal();
1633 cpu
= portal
->config
->cpu
;
1634 put_affine_portal();
1636 WARN_ON(!cpumask_test_cpu(cpu
, &affine_mask
));
1637 return affine_channels
[cpu
];
1639 EXPORT_SYMBOL(qman_affine_channel
);
1641 struct qman_portal
*qman_get_affine_portal(int cpu
)
1643 return affine_portals
[cpu
];
1645 EXPORT_SYMBOL(qman_get_affine_portal
);
1647 int qman_p_poll_dqrr(struct qman_portal
*p
, unsigned int limit
)
1649 return __poll_portal_fast(p
, limit
);
1651 EXPORT_SYMBOL(qman_p_poll_dqrr
);
1653 void qman_p_static_dequeue_add(struct qman_portal
*p
, u32 pools
)
1655 unsigned long irqflags
;
1657 local_irq_save(irqflags
);
1658 pools
&= p
->config
->pools
;
1660 qm_dqrr_sdqcr_set(&p
->p
, p
->sdqcr
);
1661 local_irq_restore(irqflags
);
1663 EXPORT_SYMBOL(qman_p_static_dequeue_add
);
1665 /* Frame queue API */
1667 static const char *mcr_result_str(u8 result
)
1670 case QM_MCR_RESULT_NULL
:
1671 return "QM_MCR_RESULT_NULL";
1672 case QM_MCR_RESULT_OK
:
1673 return "QM_MCR_RESULT_OK";
1674 case QM_MCR_RESULT_ERR_FQID
:
1675 return "QM_MCR_RESULT_ERR_FQID";
1676 case QM_MCR_RESULT_ERR_FQSTATE
:
1677 return "QM_MCR_RESULT_ERR_FQSTATE";
1678 case QM_MCR_RESULT_ERR_NOTEMPTY
:
1679 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1680 case QM_MCR_RESULT_PENDING
:
1681 return "QM_MCR_RESULT_PENDING";
1682 case QM_MCR_RESULT_ERR_BADCOMMAND
:
1683 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1685 return "<unknown MCR result>";
1688 int qman_create_fq(u32 fqid
, u32 flags
, struct qman_fq
*fq
)
1690 if (flags
& QMAN_FQ_FLAG_DYNAMIC_FQID
) {
1691 int ret
= qman_alloc_fqid(&fqid
);
1698 fq
->state
= qman_fq_state_oos
;
1699 fq
->cgr_groupid
= 0;
1701 /* A context_b of 0 is allegedly special, so don't use that fqid */
1702 if (fqid
== 0 || fqid
>= num_fqids
) {
1703 WARN(1, "bad fqid %d\n", fqid
);
1708 if (flags
& QMAN_FQ_FLAG_NO_MODIFY
)
1711 WARN_ON(fq_table
[fq
->idx
]);
1712 fq_table
[fq
->idx
] = fq
;
1716 EXPORT_SYMBOL(qman_create_fq
);
1718 void qman_destroy_fq(struct qman_fq
*fq
)
1721 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1722 * quiesced. Instead, run some checks.
1724 switch (fq
->state
) {
1725 case qman_fq_state_parked
:
1726 case qman_fq_state_oos
:
1727 if (fq_isset(fq
, QMAN_FQ_FLAG_DYNAMIC_FQID
))
1728 qman_release_fqid(fq
->fqid
);
1730 DPAA_ASSERT(fq_table
[fq
->idx
]);
1731 fq_table
[fq
->idx
] = NULL
;
1736 DPAA_ASSERT(NULL
== "qman_free_fq() on unquiesced FQ!");
1738 EXPORT_SYMBOL(qman_destroy_fq
);
1740 u32
qman_fq_fqid(struct qman_fq
*fq
)
1744 EXPORT_SYMBOL(qman_fq_fqid
);
1746 int qman_init_fq(struct qman_fq
*fq
, u32 flags
, struct qm_mcc_initfq
*opts
)
1748 union qm_mc_command
*mcc
;
1749 union qm_mc_result
*mcr
;
1750 struct qman_portal
*p
;
1754 myverb
= (flags
& QMAN_INITFQ_FLAG_SCHED
)
1755 ? QM_MCC_VERB_INITFQ_SCHED
: QM_MCC_VERB_INITFQ_PARKED
;
1757 if (fq
->state
!= qman_fq_state_oos
&&
1758 fq
->state
!= qman_fq_state_parked
)
1760 #ifdef CONFIG_FSL_DPAA_CHECKING
1761 if (fq_isset(fq
, QMAN_FQ_FLAG_NO_MODIFY
))
1764 if (opts
&& (opts
->we_mask
& QM_INITFQ_WE_OAC
)) {
1765 /* And can't be set at the same time as TDTHRESH */
1766 if (opts
->we_mask
& QM_INITFQ_WE_TDTHRESH
)
1769 /* Issue an INITFQ_[PARKED|SCHED] management command */
1770 p
= get_affine_portal();
1771 if (fq_isset(fq
, QMAN_FQ_STATE_CHANGING
) ||
1772 (fq
->state
!= qman_fq_state_oos
&&
1773 fq
->state
!= qman_fq_state_parked
)) {
1777 mcc
= qm_mc_start(&p
->p
);
1779 mcc
->initfq
= *opts
;
1780 mcc
->initfq
.fqid
= fq
->fqid
;
1781 mcc
->initfq
.count
= 0;
1783 * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
1784 * demux pointer. Otherwise, the caller-provided value is allowed to
1785 * stand, don't overwrite it.
1787 if (fq_isclear(fq
, QMAN_FQ_FLAG_TO_DCPORTAL
)) {
1790 mcc
->initfq
.we_mask
|= QM_INITFQ_WE_CONTEXTB
;
1791 mcc
->initfq
.fqd
.context_b
= fq_to_tag(fq
);
1793 * and the physical address - NB, if the user wasn't trying to
1794 * set CONTEXTA, clear the stashing settings.
1796 if (!(mcc
->initfq
.we_mask
& QM_INITFQ_WE_CONTEXTA
)) {
1797 mcc
->initfq
.we_mask
|= QM_INITFQ_WE_CONTEXTA
;
1798 memset(&mcc
->initfq
.fqd
.context_a
, 0,
1799 sizeof(mcc
->initfq
.fqd
.context_a
));
1801 struct qman_portal
*p
= qman_dma_portal
;
1803 phys_fq
= dma_map_single(p
->config
->dev
, fq
,
1804 sizeof(*fq
), DMA_TO_DEVICE
);
1805 if (dma_mapping_error(p
->config
->dev
, phys_fq
)) {
1806 dev_err(p
->config
->dev
, "dma_mapping failed\n");
1811 qm_fqd_stashing_set64(&mcc
->initfq
.fqd
, phys_fq
);
1814 if (flags
& QMAN_INITFQ_FLAG_LOCAL
) {
1817 if (!(mcc
->initfq
.we_mask
& QM_INITFQ_WE_DESTWQ
)) {
1818 mcc
->initfq
.we_mask
|= QM_INITFQ_WE_DESTWQ
;
1821 qm_fqd_set_destwq(&mcc
->initfq
.fqd
, p
->config
->channel
, wq
);
1823 qm_mc_commit(&p
->p
, myverb
);
1824 if (!qm_mc_result_timeout(&p
->p
, &mcr
)) {
1825 dev_err(p
->config
->dev
, "MCR timeout\n");
1830 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == myverb
);
1832 if (res
!= QM_MCR_RESULT_OK
) {
1837 if (opts
->we_mask
& QM_INITFQ_WE_FQCTRL
) {
1838 if (opts
->fqd
.fq_ctrl
& QM_FQCTRL_CGE
)
1839 fq_set(fq
, QMAN_FQ_STATE_CGR_EN
);
1841 fq_clear(fq
, QMAN_FQ_STATE_CGR_EN
);
1843 if (opts
->we_mask
& QM_INITFQ_WE_CGID
)
1844 fq
->cgr_groupid
= opts
->fqd
.cgid
;
1846 fq
->state
= (flags
& QMAN_INITFQ_FLAG_SCHED
) ?
1847 qman_fq_state_sched
: qman_fq_state_parked
;
1850 put_affine_portal();
1853 EXPORT_SYMBOL(qman_init_fq
);
1855 int qman_schedule_fq(struct qman_fq
*fq
)
1857 union qm_mc_command
*mcc
;
1858 union qm_mc_result
*mcr
;
1859 struct qman_portal
*p
;
1862 if (fq
->state
!= qman_fq_state_parked
)
1864 #ifdef CONFIG_FSL_DPAA_CHECKING
1865 if (fq_isset(fq
, QMAN_FQ_FLAG_NO_MODIFY
))
1868 /* Issue a ALTERFQ_SCHED management command */
1869 p
= get_affine_portal();
1870 if (fq_isset(fq
, QMAN_FQ_STATE_CHANGING
) ||
1871 fq
->state
!= qman_fq_state_parked
) {
1875 mcc
= qm_mc_start(&p
->p
);
1876 mcc
->alterfq
.fqid
= fq
->fqid
;
1877 qm_mc_commit(&p
->p
, QM_MCC_VERB_ALTER_SCHED
);
1878 if (!qm_mc_result_timeout(&p
->p
, &mcr
)) {
1879 dev_err(p
->config
->dev
, "ALTER_SCHED timeout\n");
1884 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_ALTER_SCHED
);
1885 if (mcr
->result
!= QM_MCR_RESULT_OK
) {
1889 fq
->state
= qman_fq_state_sched
;
1891 put_affine_portal();
1894 EXPORT_SYMBOL(qman_schedule_fq
);
1896 int qman_retire_fq(struct qman_fq
*fq
, u32
*flags
)
1898 union qm_mc_command
*mcc
;
1899 union qm_mc_result
*mcr
;
1900 struct qman_portal
*p
;
1904 if (fq
->state
!= qman_fq_state_parked
&&
1905 fq
->state
!= qman_fq_state_sched
)
1907 #ifdef CONFIG_FSL_DPAA_CHECKING
1908 if (fq_isset(fq
, QMAN_FQ_FLAG_NO_MODIFY
))
1911 p
= get_affine_portal();
1912 if (fq_isset(fq
, QMAN_FQ_STATE_CHANGING
) ||
1913 fq
->state
== qman_fq_state_retired
||
1914 fq
->state
== qman_fq_state_oos
) {
1918 mcc
= qm_mc_start(&p
->p
);
1919 mcc
->alterfq
.fqid
= fq
->fqid
;
1920 qm_mc_commit(&p
->p
, QM_MCC_VERB_ALTER_RETIRE
);
1921 if (!qm_mc_result_timeout(&p
->p
, &mcr
)) {
1922 dev_crit(p
->config
->dev
, "ALTER_RETIRE timeout\n");
1927 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_ALTER_RETIRE
);
1930 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1931 * and defer the flags until FQRNI or FQRN (respectively) show up. But
1932 * "Friendly" is to process OK immediately, and not set CHANGING. We do
1933 * friendly, otherwise the caller doesn't necessarily have a fully
1934 * "retired" FQ on return even if the retirement was immediate. However
1935 * this does mean some code duplication between here and
1936 * fq_state_change().
1938 if (res
== QM_MCR_RESULT_OK
) {
1940 /* Process 'fq' right away, we'll ignore FQRNI */
1941 if (mcr
->alterfq
.fqs
& QM_MCR_FQS_NOTEMPTY
)
1942 fq_set(fq
, QMAN_FQ_STATE_NE
);
1943 if (mcr
->alterfq
.fqs
& QM_MCR_FQS_ORLPRESENT
)
1944 fq_set(fq
, QMAN_FQ_STATE_ORL
);
1947 fq
->state
= qman_fq_state_retired
;
1950 * Another issue with supporting "immediate" retirement
1951 * is that we're forced to drop FQRNIs, because by the
1952 * time they're seen it may already be "too late" (the
1953 * fq may have been OOS'd and free()'d already). But if
1954 * the upper layer wants a callback whether it's
1955 * immediate or not, we have to fake a "MR" entry to
1956 * look like an FQRNI...
1958 union qm_mr_entry msg
;
1960 msg
.verb
= QM_MR_VERB_FQRNI
;
1961 msg
.fq
.fqs
= mcr
->alterfq
.fqs
;
1962 msg
.fq
.fqid
= fq
->fqid
;
1963 msg
.fq
.contextB
= fq_to_tag(fq
);
1964 fq
->cb
.fqs(p
, fq
, &msg
);
1966 } else if (res
== QM_MCR_RESULT_PENDING
) {
1968 fq_set(fq
, QMAN_FQ_STATE_CHANGING
);
1973 put_affine_portal();
1976 EXPORT_SYMBOL(qman_retire_fq
);
1978 int qman_oos_fq(struct qman_fq
*fq
)
1980 union qm_mc_command
*mcc
;
1981 union qm_mc_result
*mcr
;
1982 struct qman_portal
*p
;
1985 if (fq
->state
!= qman_fq_state_retired
)
1987 #ifdef CONFIG_FSL_DPAA_CHECKING
1988 if (fq_isset(fq
, QMAN_FQ_FLAG_NO_MODIFY
))
1991 p
= get_affine_portal();
1992 if (fq_isset(fq
, QMAN_FQ_STATE_BLOCKOOS
) ||
1993 fq
->state
!= qman_fq_state_retired
) {
1997 mcc
= qm_mc_start(&p
->p
);
1998 mcc
->alterfq
.fqid
= fq
->fqid
;
1999 qm_mc_commit(&p
->p
, QM_MCC_VERB_ALTER_OOS
);
2000 if (!qm_mc_result_timeout(&p
->p
, &mcr
)) {
2004 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_ALTER_OOS
);
2005 if (mcr
->result
!= QM_MCR_RESULT_OK
) {
2009 fq
->state
= qman_fq_state_oos
;
2011 put_affine_portal();
2014 EXPORT_SYMBOL(qman_oos_fq
);
2016 int qman_query_fq(struct qman_fq
*fq
, struct qm_fqd
*fqd
)
2018 union qm_mc_command
*mcc
;
2019 union qm_mc_result
*mcr
;
2020 struct qman_portal
*p
= get_affine_portal();
2023 mcc
= qm_mc_start(&p
->p
);
2024 mcc
->queryfq
.fqid
= fq
->fqid
;
2025 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYFQ
);
2026 if (!qm_mc_result_timeout(&p
->p
, &mcr
)) {
2031 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_QUERYFQ
);
2032 if (mcr
->result
== QM_MCR_RESULT_OK
)
2033 *fqd
= mcr
->queryfq
.fqd
;
2037 put_affine_portal();
2041 static int qman_query_fq_np(struct qman_fq
*fq
,
2042 struct qm_mcr_queryfq_np
*np
)
2044 union qm_mc_command
*mcc
;
2045 union qm_mc_result
*mcr
;
2046 struct qman_portal
*p
= get_affine_portal();
2049 mcc
= qm_mc_start(&p
->p
);
2050 mcc
->queryfq
.fqid
= fq
->fqid
;
2051 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYFQ_NP
);
2052 if (!qm_mc_result_timeout(&p
->p
, &mcr
)) {
2057 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_QUERYFQ_NP
);
2058 if (mcr
->result
== QM_MCR_RESULT_OK
)
2059 *np
= mcr
->queryfq_np
;
2060 else if (mcr
->result
== QM_MCR_RESULT_ERR_FQID
)
2065 put_affine_portal();
2069 static int qman_query_cgr(struct qman_cgr
*cgr
,
2070 struct qm_mcr_querycgr
*cgrd
)
2072 union qm_mc_command
*mcc
;
2073 union qm_mc_result
*mcr
;
2074 struct qman_portal
*p
= get_affine_portal();
2077 mcc
= qm_mc_start(&p
->p
);
2078 mcc
->querycgr
.cgid
= cgr
->cgrid
;
2079 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYCGR
);
2080 if (!qm_mc_result_timeout(&p
->p
, &mcr
)) {
2084 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCC_VERB_QUERYCGR
);
2085 if (mcr
->result
== QM_MCR_RESULT_OK
)
2086 *cgrd
= mcr
->querycgr
;
2088 dev_err(p
->config
->dev
, "QUERY_CGR failed: %s\n",
2089 mcr_result_str(mcr
->result
));
2093 put_affine_portal();
2097 int qman_query_cgr_congested(struct qman_cgr
*cgr
, bool *result
)
2099 struct qm_mcr_querycgr query_cgr
;
2102 err
= qman_query_cgr(cgr
, &query_cgr
);
2106 *result
= !!query_cgr
.cgr
.cs
;
2109 EXPORT_SYMBOL(qman_query_cgr_congested
);
2111 /* internal function used as a wait_event() expression */
2112 static int set_p_vdqcr(struct qman_portal
*p
, struct qman_fq
*fq
, u32 vdqcr
)
2114 unsigned long irqflags
;
2117 local_irq_save(irqflags
);
2120 if (fq_isset(fq
, QMAN_FQ_STATE_VDQCR
))
2123 fq_set(fq
, QMAN_FQ_STATE_VDQCR
);
2124 p
->vdqcr_owned
= fq
;
2125 qm_dqrr_vdqcr_set(&p
->p
, vdqcr
);
2128 local_irq_restore(irqflags
);
2132 static int set_vdqcr(struct qman_portal
**p
, struct qman_fq
*fq
, u32 vdqcr
)
2136 *p
= get_affine_portal();
2137 ret
= set_p_vdqcr(*p
, fq
, vdqcr
);
2138 put_affine_portal();
2142 static int wait_vdqcr_start(struct qman_portal
**p
, struct qman_fq
*fq
,
2143 u32 vdqcr
, u32 flags
)
2147 if (flags
& QMAN_VOLATILE_FLAG_WAIT_INT
)
2148 ret
= wait_event_interruptible(affine_queue
,
2149 !set_vdqcr(p
, fq
, vdqcr
));
2151 wait_event(affine_queue
, !set_vdqcr(p
, fq
, vdqcr
));
2155 int qman_volatile_dequeue(struct qman_fq
*fq
, u32 flags
, u32 vdqcr
)
2157 struct qman_portal
*p
;
2160 if (fq
->state
!= qman_fq_state_parked
&&
2161 fq
->state
!= qman_fq_state_retired
)
2163 if (vdqcr
& QM_VDQCR_FQID_MASK
)
2165 if (fq_isset(fq
, QMAN_FQ_STATE_VDQCR
))
2167 vdqcr
= (vdqcr
& ~QM_VDQCR_FQID_MASK
) | fq
->fqid
;
2168 if (flags
& QMAN_VOLATILE_FLAG_WAIT
)
2169 ret
= wait_vdqcr_start(&p
, fq
, vdqcr
, flags
);
2171 ret
= set_vdqcr(&p
, fq
, vdqcr
);
2175 if (flags
& QMAN_VOLATILE_FLAG_FINISH
) {
2176 if (flags
& QMAN_VOLATILE_FLAG_WAIT_INT
)
2178 * NB: don't propagate any error - the caller wouldn't
2179 * know whether the VDQCR was issued or not. A signal
2180 * could arrive after returning anyway, so the caller
2181 * can check signal_pending() if that's an issue.
2183 wait_event_interruptible(affine_queue
,
2184 !fq_isset(fq
, QMAN_FQ_STATE_VDQCR
));
2186 wait_event(affine_queue
,
2187 !fq_isset(fq
, QMAN_FQ_STATE_VDQCR
));
2191 EXPORT_SYMBOL(qman_volatile_dequeue
);
2193 static void update_eqcr_ci(struct qman_portal
*p
, u8 avail
)
2196 qm_eqcr_cce_prefetch(&p
->p
);
2198 qm_eqcr_cce_update(&p
->p
);
2201 int qman_enqueue(struct qman_fq
*fq
, const struct qm_fd
*fd
)
2203 struct qman_portal
*p
;
2204 struct qm_eqcr_entry
*eq
;
2205 unsigned long irqflags
;
2208 p
= get_affine_portal();
2209 local_irq_save(irqflags
);
2211 if (p
->use_eqcr_ci_stashing
) {
2213 * The stashing case is easy, only update if we need to in
2214 * order to try and liberate ring entries.
2216 eq
= qm_eqcr_start_stash(&p
->p
);
2219 * The non-stashing case is harder, need to prefetch ahead of
2222 avail
= qm_eqcr_get_avail(&p
->p
);
2224 update_eqcr_ci(p
, avail
);
2225 eq
= qm_eqcr_start_no_stash(&p
->p
);
2231 eq
->fqid
= fq
->fqid
;
2232 eq
->tag
= fq_to_tag(fq
);
2235 qm_eqcr_pvb_commit(&p
->p
, QM_EQCR_VERB_CMD_ENQUEUE
);
2237 local_irq_restore(irqflags
);
2238 put_affine_portal();
2241 EXPORT_SYMBOL(qman_enqueue
);
2243 static int qm_modify_cgr(struct qman_cgr
*cgr
, u32 flags
,
2244 struct qm_mcc_initcgr
*opts
)
2246 union qm_mc_command
*mcc
;
2247 union qm_mc_result
*mcr
;
2248 struct qman_portal
*p
= get_affine_portal();
2249 u8 verb
= QM_MCC_VERB_MODIFYCGR
;
2252 mcc
= qm_mc_start(&p
->p
);
2254 mcc
->initcgr
= *opts
;
2255 mcc
->initcgr
.cgid
= cgr
->cgrid
;
2256 if (flags
& QMAN_CGR_FLAG_USE_INIT
)
2257 verb
= QM_MCC_VERB_INITCGR
;
2258 qm_mc_commit(&p
->p
, verb
);
2259 if (!qm_mc_result_timeout(&p
->p
, &mcr
)) {
2264 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == verb
);
2265 if (mcr
->result
!= QM_MCR_RESULT_OK
)
2269 put_affine_portal();
2273 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2274 #define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n))
2276 static u8 qman_cgr_cpus
[CGR_NUM
];
2278 void qman_init_cgr_all(void)
2280 struct qman_cgr cgr
;
2283 for (cgr
.cgrid
= 0; cgr
.cgrid
< CGR_NUM
; cgr
.cgrid
++) {
2284 if (qm_modify_cgr(&cgr
, QMAN_CGR_FLAG_USE_INIT
, NULL
))
2289 pr_err("Warning: %d error%s while initialising CGR h/w\n",
2290 err_cnt
, (err_cnt
> 1) ? "s" : "");
2293 int qman_create_cgr(struct qman_cgr
*cgr
, u32 flags
,
2294 struct qm_mcc_initcgr
*opts
)
2296 struct qm_mcr_querycgr cgr_state
;
2298 struct qman_portal
*p
;
2301 * We have to check that the provided CGRID is within the limits of the
2302 * data-structures, for obvious reasons. However we'll let h/w take
2303 * care of determining whether it's within the limits of what exists on
2306 if (cgr
->cgrid
>= CGR_NUM
)
2310 p
= get_affine_portal();
2311 qman_cgr_cpus
[cgr
->cgrid
] = smp_processor_id();
2314 cgr
->chan
= p
->config
->channel
;
2315 spin_lock(&p
->cgr_lock
);
2318 struct qm_mcc_initcgr local_opts
= *opts
;
2320 ret
= qman_query_cgr(cgr
, &cgr_state
);
2324 if ((qman_ip_rev
& 0xFF00) >= QMAN_REV30
)
2325 local_opts
.cgr
.cscn_targ_upd_ctrl
=
2326 QM_CGR_TARG_UDP_CTRL_WRITE_BIT
| PORTAL_IDX(p
);
2328 /* Overwrite TARG */
2329 local_opts
.cgr
.cscn_targ
= cgr_state
.cgr
.cscn_targ
|
2331 local_opts
.we_mask
|= QM_CGR_WE_CSCN_TARG
;
2333 /* send init if flags indicate so */
2334 if (flags
& QMAN_CGR_FLAG_USE_INIT
)
2335 ret
= qm_modify_cgr(cgr
, QMAN_CGR_FLAG_USE_INIT
,
2338 ret
= qm_modify_cgr(cgr
, 0, &local_opts
);
2343 list_add(&cgr
->node
, &p
->cgr_cbs
);
2345 /* Determine if newly added object requires its callback to be called */
2346 ret
= qman_query_cgr(cgr
, &cgr_state
);
2348 /* we can't go back, so proceed and return success */
2349 dev_err(p
->config
->dev
, "CGR HW state partially modified\n");
2353 if (cgr
->cb
&& cgr_state
.cgr
.cscn_en
&&
2354 qman_cgrs_get(&p
->cgrs
[1], cgr
->cgrid
))
2357 spin_unlock(&p
->cgr_lock
);
2358 put_affine_portal();
2361 EXPORT_SYMBOL(qman_create_cgr
);
2363 int qman_delete_cgr(struct qman_cgr
*cgr
)
2365 unsigned long irqflags
;
2366 struct qm_mcr_querycgr cgr_state
;
2367 struct qm_mcc_initcgr local_opts
;
2370 struct qman_portal
*p
= get_affine_portal();
2372 if (cgr
->chan
!= p
->config
->channel
) {
2373 /* attempt to delete from other portal than creator */
2374 dev_err(p
->config
->dev
, "CGR not owned by current portal");
2375 dev_dbg(p
->config
->dev
, " create 0x%x, delete 0x%x\n",
2376 cgr
->chan
, p
->config
->channel
);
2381 memset(&local_opts
, 0, sizeof(struct qm_mcc_initcgr
));
2382 spin_lock_irqsave(&p
->cgr_lock
, irqflags
);
2383 list_del(&cgr
->node
);
2385 * If there are no other CGR objects for this CGRID in the list,
2386 * update CSCN_TARG accordingly
2388 list_for_each_entry(i
, &p
->cgr_cbs
, node
)
2389 if (i
->cgrid
== cgr
->cgrid
&& i
->cb
)
2391 ret
= qman_query_cgr(cgr
, &cgr_state
);
2393 /* add back to the list */
2394 list_add(&cgr
->node
, &p
->cgr_cbs
);
2397 /* Overwrite TARG */
2398 local_opts
.we_mask
= QM_CGR_WE_CSCN_TARG
;
2399 if ((qman_ip_rev
& 0xFF00) >= QMAN_REV30
)
2400 local_opts
.cgr
.cscn_targ_upd_ctrl
= PORTAL_IDX(p
);
2402 local_opts
.cgr
.cscn_targ
= cgr_state
.cgr
.cscn_targ
&
2404 ret
= qm_modify_cgr(cgr
, 0, &local_opts
);
2406 /* add back to the list */
2407 list_add(&cgr
->node
, &p
->cgr_cbs
);
2409 spin_unlock_irqrestore(&p
->cgr_lock
, irqflags
);
2411 put_affine_portal();
2414 EXPORT_SYMBOL(qman_delete_cgr
);
2417 struct qman_cgr
*cgr
;
2418 struct completion completion
;
2421 static int qman_delete_cgr_thread(void *p
)
2423 struct cgr_comp
*cgr_comp
= (struct cgr_comp
*)p
;
2426 ret
= qman_delete_cgr(cgr_comp
->cgr
);
2427 complete(&cgr_comp
->completion
);
2432 void qman_delete_cgr_safe(struct qman_cgr
*cgr
)
2434 struct task_struct
*thread
;
2435 struct cgr_comp cgr_comp
;
2438 if (qman_cgr_cpus
[cgr
->cgrid
] != smp_processor_id()) {
2439 init_completion(&cgr_comp
.completion
);
2441 thread
= kthread_create(qman_delete_cgr_thread
, &cgr_comp
,
2447 kthread_bind(thread
, qman_cgr_cpus
[cgr
->cgrid
]);
2448 wake_up_process(thread
);
2449 wait_for_completion(&cgr_comp
.completion
);
2454 qman_delete_cgr(cgr
);
2457 EXPORT_SYMBOL(qman_delete_cgr_safe
);
2461 static int _qm_mr_consume_and_match_verb(struct qm_portal
*p
, int v
)
2463 const union qm_mr_entry
*msg
;
2466 qm_mr_pvb_update(p
);
2467 msg
= qm_mr_current(p
);
2469 if ((msg
->verb
& QM_MR_VERB_TYPE_MASK
) == v
)
2472 qm_mr_cci_consume_to_current(p
);
2473 qm_mr_pvb_update(p
);
2474 msg
= qm_mr_current(p
);
2479 static int _qm_dqrr_consume_and_match(struct qm_portal
*p
, u32 fqid
, int s
,
2482 const struct qm_dqrr_entry
*dqrr
;
2486 qm_dqrr_pvb_update(p
);
2487 dqrr
= qm_dqrr_current(p
);
2490 } while (wait
&& !dqrr
);
2493 if (dqrr
->fqid
== fqid
&& (dqrr
->stat
& s
))
2495 qm_dqrr_cdc_consume_1ptr(p
, dqrr
, 0);
2496 qm_dqrr_pvb_update(p
);
2498 dqrr
= qm_dqrr_current(p
);
2503 #define qm_mr_drain(p, V) \
2504 _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
2506 #define qm_dqrr_drain(p, f, S) \
2507 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
2509 #define qm_dqrr_drain_wait(p, f, S) \
2510 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
2512 #define qm_dqrr_drain_nomatch(p) \
2513 _qm_dqrr_consume_and_match(p, 0, 0, false)
2515 static int qman_shutdown_fq(u32 fqid
)
2517 struct qman_portal
*p
;
2519 union qm_mc_command
*mcc
;
2520 union qm_mc_result
*mcr
;
2521 int orl_empty
, drain
= 0, ret
= 0;
2522 u32 channel
, wq
, res
;
2525 p
= get_affine_portal();
2526 dev
= p
->config
->dev
;
2527 /* Determine the state of the FQID */
2528 mcc
= qm_mc_start(&p
->p
);
2529 mcc
->queryfq_np
.fqid
= fqid
;
2530 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYFQ_NP
);
2531 if (!qm_mc_result_timeout(&p
->p
, &mcr
)) {
2532 dev_err(dev
, "QUERYFQ_NP timeout\n");
2537 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_QUERYFQ_NP
);
2538 state
= mcr
->queryfq_np
.state
& QM_MCR_NP_STATE_MASK
;
2539 if (state
== QM_MCR_NP_STATE_OOS
)
2540 goto out
; /* Already OOS, no need to do anymore checks */
2542 /* Query which channel the FQ is using */
2543 mcc
= qm_mc_start(&p
->p
);
2544 mcc
->queryfq
.fqid
= fqid
;
2545 qm_mc_commit(&p
->p
, QM_MCC_VERB_QUERYFQ
);
2546 if (!qm_mc_result_timeout(&p
->p
, &mcr
)) {
2547 dev_err(dev
, "QUERYFQ timeout\n");
2552 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) == QM_MCR_VERB_QUERYFQ
);
2553 /* Need to store these since the MCR gets reused */
2554 channel
= qm_fqd_get_chan(&mcr
->queryfq
.fqd
);
2555 wq
= qm_fqd_get_wq(&mcr
->queryfq
.fqd
);
2558 case QM_MCR_NP_STATE_TEN_SCHED
:
2559 case QM_MCR_NP_STATE_TRU_SCHED
:
2560 case QM_MCR_NP_STATE_ACTIVE
:
2561 case QM_MCR_NP_STATE_PARKED
:
2563 mcc
= qm_mc_start(&p
->p
);
2564 mcc
->alterfq
.fqid
= fqid
;
2565 qm_mc_commit(&p
->p
, QM_MCC_VERB_ALTER_RETIRE
);
2566 if (!qm_mc_result_timeout(&p
->p
, &mcr
)) {
2567 dev_err(dev
, "QUERYFQ_NP timeout\n");
2571 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) ==
2572 QM_MCR_VERB_ALTER_RETIRE
);
2573 res
= mcr
->result
; /* Make a copy as we reuse MCR below */
2575 if (res
== QM_MCR_RESULT_PENDING
) {
2577 * Need to wait for the FQRN in the message ring, which
2578 * will only occur once the FQ has been drained. In
2579 * order for the FQ to drain the portal needs to be set
2580 * to dequeue from the channel the FQ is scheduled on
2585 /* Flag that we need to drain FQ */
2588 if (channel
>= qm_channel_pool1
&&
2589 channel
< qm_channel_pool1
+ 15) {
2590 /* Pool channel, enable the bit in the portal */
2591 dequeue_wq
= (channel
-
2592 qm_channel_pool1
+ 1)<<4 | wq
;
2593 } else if (channel
< qm_channel_pool1
) {
2594 /* Dedicated channel */
2597 dev_err(dev
, "Can't recover FQ 0x%x, ch: 0x%x",
2602 /* Set the sdqcr to drain this channel */
2603 if (channel
< qm_channel_pool1
)
2604 qm_dqrr_sdqcr_set(&p
->p
,
2605 QM_SDQCR_TYPE_ACTIVE
|
2606 QM_SDQCR_CHANNELS_DEDICATED
);
2608 qm_dqrr_sdqcr_set(&p
->p
,
2609 QM_SDQCR_TYPE_ACTIVE
|
2610 QM_SDQCR_CHANNELS_POOL_CONV
2613 /* Keep draining DQRR while checking the MR*/
2614 qm_dqrr_drain_nomatch(&p
->p
);
2615 /* Process message ring too */
2616 found_fqrn
= qm_mr_drain(&p
->p
, FQRN
);
2618 } while (!found_fqrn
);
2621 if (res
!= QM_MCR_RESULT_OK
&&
2622 res
!= QM_MCR_RESULT_PENDING
) {
2623 dev_err(dev
, "retire_fq failed: FQ 0x%x, res=0x%x\n",
2628 if (!(mcr
->alterfq
.fqs
& QM_MCR_FQS_ORLPRESENT
)) {
2630 * ORL had no entries, no need to wait until the
2636 * Retirement succeeded, check to see if FQ needs
2639 if (drain
|| mcr
->alterfq
.fqs
& QM_MCR_FQS_NOTEMPTY
) {
2640 /* FQ is Not Empty, drain using volatile DQ commands */
2642 u32 vdqcr
= fqid
| QM_VDQCR_NUMFRAMES_SET(3);
2644 qm_dqrr_vdqcr_set(&p
->p
, vdqcr
);
2646 * Wait for a dequeue and process the dequeues,
2647 * making sure to empty the ring completely
2649 } while (qm_dqrr_drain_wait(&p
->p
, fqid
, FQ_EMPTY
));
2651 qm_dqrr_sdqcr_set(&p
->p
, 0);
2653 while (!orl_empty
) {
2654 /* Wait for the ORL to have been completely drained */
2655 orl_empty
= qm_mr_drain(&p
->p
, FQRL
);
2658 mcc
= qm_mc_start(&p
->p
);
2659 mcc
->alterfq
.fqid
= fqid
;
2660 qm_mc_commit(&p
->p
, QM_MCC_VERB_ALTER_OOS
);
2661 if (!qm_mc_result_timeout(&p
->p
, &mcr
)) {
2666 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) ==
2667 QM_MCR_VERB_ALTER_OOS
);
2668 if (mcr
->result
!= QM_MCR_RESULT_OK
) {
2669 dev_err(dev
, "OOS after drain fail: FQ 0x%x (0x%x)\n",
2676 case QM_MCR_NP_STATE_RETIRED
:
2677 /* Send OOS Command */
2678 mcc
= qm_mc_start(&p
->p
);
2679 mcc
->alterfq
.fqid
= fqid
;
2680 qm_mc_commit(&p
->p
, QM_MCC_VERB_ALTER_OOS
);
2681 if (!qm_mc_result_timeout(&p
->p
, &mcr
)) {
2686 DPAA_ASSERT((mcr
->verb
& QM_MCR_VERB_MASK
) ==
2687 QM_MCR_VERB_ALTER_OOS
);
2689 dev_err(dev
, "OOS fail: FQ 0x%x (0x%x)\n",
2696 case QM_MCR_NP_STATE_OOS
:
2705 put_affine_portal();
2709 const struct qm_portal_config
*qman_get_qm_portal_config(
2710 struct qman_portal
*portal
)
2712 return portal
->config
;
2714 EXPORT_SYMBOL(qman_get_qm_portal_config
);
2716 struct gen_pool
*qm_fqalloc
; /* FQID allocator */
2717 struct gen_pool
*qm_qpalloc
; /* pool-channel allocator */
2718 struct gen_pool
*qm_cgralloc
; /* CGR ID allocator */
2720 static int qman_alloc_range(struct gen_pool
*p
, u32
*result
, u32 cnt
)
2724 addr
= gen_pool_alloc(p
, cnt
);
2728 *result
= addr
& ~DPAA_GENALLOC_OFF
;
2733 int qman_alloc_fqid_range(u32
*result
, u32 count
)
2735 return qman_alloc_range(qm_fqalloc
, result
, count
);
2737 EXPORT_SYMBOL(qman_alloc_fqid_range
);
2739 int qman_alloc_pool_range(u32
*result
, u32 count
)
2741 return qman_alloc_range(qm_qpalloc
, result
, count
);
2743 EXPORT_SYMBOL(qman_alloc_pool_range
);
2745 int qman_alloc_cgrid_range(u32
*result
, u32 count
)
2747 return qman_alloc_range(qm_cgralloc
, result
, count
);
2749 EXPORT_SYMBOL(qman_alloc_cgrid_range
);
2751 int qman_release_fqid(u32 fqid
)
2753 int ret
= qman_shutdown_fq(fqid
);
2756 pr_debug("FQID %d leaked\n", fqid
);
2760 gen_pool_free(qm_fqalloc
, fqid
| DPAA_GENALLOC_OFF
, 1);
2763 EXPORT_SYMBOL(qman_release_fqid
);
2765 static int qpool_cleanup(u32 qp
)
2768 * We query all FQDs starting from
2769 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
2770 * whose destination channel is the pool-channel being released.
2771 * When a non-OOS FQD is found we attempt to clean it up
2773 struct qman_fq fq
= {
2774 .fqid
= QM_FQID_RANGE_START
2779 struct qm_mcr_queryfq_np np
;
2781 err
= qman_query_fq_np(&fq
, &np
);
2783 /* FQID range exceeded, found no problems */
2785 else if (WARN_ON(err
))
2788 if ((np
.state
& QM_MCR_NP_STATE_MASK
) != QM_MCR_NP_STATE_OOS
) {
2791 err
= qman_query_fq(&fq
, &fqd
);
2794 if (qm_fqd_get_chan(&fqd
) == qp
) {
2795 /* The channel is the FQ's target, clean it */
2796 err
= qman_shutdown_fq(fq
.fqid
);
2799 * Couldn't shut down the FQ
2800 * so the pool must be leaked
2805 /* Move to the next FQID */
2810 int qman_release_pool(u32 qp
)
2814 ret
= qpool_cleanup(qp
);
2816 pr_debug("CHID %d leaked\n", qp
);
2820 gen_pool_free(qm_qpalloc
, qp
| DPAA_GENALLOC_OFF
, 1);
2823 EXPORT_SYMBOL(qman_release_pool
);
2825 static int cgr_cleanup(u32 cgrid
)
2828 * query all FQDs starting from FQID 1 until we get an "invalid FQID"
2829 * error, looking for non-OOS FQDs whose CGR is the CGR being released
2831 struct qman_fq fq
= {
2832 .fqid
= QM_FQID_RANGE_START
2837 struct qm_mcr_queryfq_np np
;
2839 err
= qman_query_fq_np(&fq
, &np
);
2841 /* FQID range exceeded, found no problems */
2843 else if (WARN_ON(err
))
2846 if ((np
.state
& QM_MCR_NP_STATE_MASK
) != QM_MCR_NP_STATE_OOS
) {
2849 err
= qman_query_fq(&fq
, &fqd
);
2852 if ((fqd
.fq_ctrl
& QM_FQCTRL_CGE
) &&
2853 fqd
.cgid
== cgrid
) {
2854 pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
2859 /* Move to the next FQID */
2864 int qman_release_cgrid(u32 cgrid
)
2868 ret
= cgr_cleanup(cgrid
);
2870 pr_debug("CGRID %d leaked\n", cgrid
);
2874 gen_pool_free(qm_cgralloc
, cgrid
| DPAA_GENALLOC_OFF
, 1);
2877 EXPORT_SYMBOL(qman_release_cgrid
);