2 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
4 * Authors: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
8 * QE UCC Slow API Set - UCC Slow specific routines implementations.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/stddef.h>
20 #include <linux/interrupt.h>
24 #include <asm/immap_qe.h>
28 #include <asm/ucc_slow.h>
30 #define uccs_printk(level, format, arg...) \
31 printk(level format "\n", ## arg)
33 #define uccs_dbg(format, arg...) \
34 uccs_printk(KERN_DEBUG , format , ## arg)
35 #define uccs_err(format, arg...) \
36 uccs_printk(KERN_ERR , format , ## arg)
37 #define uccs_info(format, arg...) \
38 uccs_printk(KERN_INFO , format , ## arg)
39 #define uccs_warn(format, arg...) \
40 uccs_printk(KERN_WARNING , format , ## arg)
42 #ifdef UCCS_VERBOSE_DEBUG
43 #define uccs_vdbg uccs_dbg
45 #define uccs_vdbg(fmt, args...) do { } while (0)
46 #endif /* UCCS_VERBOSE_DEBUG */
48 u32
ucc_slow_get_qe_cr_subblock(int uccs_num
)
51 case 0: return QE_CR_SUBBLOCK_UCCSLOW1
;
52 case 1: return QE_CR_SUBBLOCK_UCCSLOW2
;
53 case 2: return QE_CR_SUBBLOCK_UCCSLOW3
;
54 case 3: return QE_CR_SUBBLOCK_UCCSLOW4
;
55 case 4: return QE_CR_SUBBLOCK_UCCSLOW5
;
56 case 5: return QE_CR_SUBBLOCK_UCCSLOW6
;
57 case 6: return QE_CR_SUBBLOCK_UCCSLOW7
;
58 case 7: return QE_CR_SUBBLOCK_UCCSLOW8
;
59 default: return QE_CR_SUBBLOCK_INVALID
;
63 void ucc_slow_poll_transmitter_now(struct ucc_slow_private
* uccs
)
65 out_be16(&uccs
->us_regs
->utodr
, UCC_SLOW_TOD
);
68 void ucc_slow_graceful_stop_tx(struct ucc_slow_private
* uccs
)
70 struct ucc_slow_info
*us_info
= uccs
->us_info
;
73 id
= ucc_slow_get_qe_cr_subblock(us_info
->ucc_num
);
74 qe_issue_cmd(QE_GRACEFUL_STOP_TX
, id
,
75 QE_CR_PROTOCOL_UNSPECIFIED
, 0);
78 void ucc_slow_stop_tx(struct ucc_slow_private
* uccs
)
80 struct ucc_slow_info
*us_info
= uccs
->us_info
;
83 id
= ucc_slow_get_qe_cr_subblock(us_info
->ucc_num
);
84 qe_issue_cmd(QE_STOP_TX
, id
, QE_CR_PROTOCOL_UNSPECIFIED
, 0);
87 void ucc_slow_restart_tx(struct ucc_slow_private
* uccs
)
89 struct ucc_slow_info
*us_info
= uccs
->us_info
;
92 id
= ucc_slow_get_qe_cr_subblock(us_info
->ucc_num
);
93 qe_issue_cmd(QE_RESTART_TX
, id
, QE_CR_PROTOCOL_UNSPECIFIED
, 0);
96 void ucc_slow_enable(struct ucc_slow_private
* uccs
, enum comm_dir mode
)
98 struct ucc_slow
*us_regs
;
101 us_regs
= uccs
->us_regs
;
103 /* Enable reception and/or transmission on this UCC. */
104 gumr_l
= in_be32(&us_regs
->gumr_l
);
105 if (mode
& COMM_DIR_TX
) {
106 gumr_l
|= UCC_SLOW_GUMR_L_ENT
;
107 uccs
->enabled_tx
= 1;
109 if (mode
& COMM_DIR_RX
) {
110 gumr_l
|= UCC_SLOW_GUMR_L_ENR
;
111 uccs
->enabled_rx
= 1;
113 out_be32(&us_regs
->gumr_l
, gumr_l
);
116 void ucc_slow_disable(struct ucc_slow_private
* uccs
, enum comm_dir mode
)
118 struct ucc_slow
*us_regs
;
121 us_regs
= uccs
->us_regs
;
123 /* Disable reception and/or transmission on this UCC. */
124 gumr_l
= in_be32(&us_regs
->gumr_l
);
125 if (mode
& COMM_DIR_TX
) {
126 gumr_l
&= ~UCC_SLOW_GUMR_L_ENT
;
127 uccs
->enabled_tx
= 0;
129 if (mode
& COMM_DIR_RX
) {
130 gumr_l
&= ~UCC_SLOW_GUMR_L_ENR
;
131 uccs
->enabled_rx
= 0;
133 out_be32(&us_regs
->gumr_l
, gumr_l
);
136 int ucc_slow_init(struct ucc_slow_info
* us_info
, struct ucc_slow_private
** uccs_ret
)
139 struct ucc_slow
*us_regs
;
141 u8 function_code
= 0;
143 struct ucc_slow_private
*uccs
;
148 uccs_vdbg("%s: IN", __FUNCTION__
);
153 /* check if the UCC port number is in range. */
154 if ((us_info
->ucc_num
< 0) || (us_info
->ucc_num
> UCC_MAX_NUM
- 1)) {
155 uccs_err("ucc_slow_init: Illagal UCC number!");
161 * Check that 'max_rx_buf_length' is properly aligned (4), unless
162 * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
163 * case when QE accepts 32 bits at a time.
165 if ((!us_info
->rfw
) &&
166 (us_info
->max_rx_buf_length
& (UCC_SLOW_MRBLR_ALIGNMENT
- 1))) {
167 uccs_err("max_rx_buf_length not aligned.");
171 uccs
= (struct ucc_slow_private
*)
172 kmalloc(sizeof(struct ucc_slow_private
), GFP_KERNEL
);
175 ("ucc_slow_init: No memory for UCC slow data structure!");
178 memset(uccs
, 0, sizeof(struct ucc_slow_private
));
180 /* Fill slow UCC structure */
181 uccs
->us_info
= us_info
;
182 uccs
->saved_uccm
= 0;
183 uccs
->p_rx_frame
= 0;
184 uccs
->us_regs
= us_info
->us_regs
;
185 us_regs
= uccs
->us_regs
;
186 uccs
->p_ucce
= (u16
*) & (us_regs
->ucce
);
187 uccs
->p_uccm
= (u16
*) & (us_regs
->uccm
);
191 uccs
->rx_discarded
= 0;
192 #endif /* STATISTICS */
195 uccs
->us_pram_offset
= qe_muram_alloc(UCC_SLOW_PRAM_SIZE
,
196 ALIGNMENT_OF_UCC_SLOW_PRAM
);
197 if (IS_MURAM_ERR(uccs
->us_pram_offset
)) {
199 ("ucc_slow_init: Can not allocate MURAM memory "
204 id
= ucc_slow_get_qe_cr_subblock(us_info
->ucc_num
);
205 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE
, id
, QE_CR_PROTOCOL_UNSPECIFIED
,
206 (u32
) uccs
->us_pram_offset
);
208 uccs
->us_pram
= qe_muram_addr(uccs
->us_pram_offset
);
210 /* Init Guemr register */
211 if ((ret
= ucc_init_guemr((struct ucc_common
*) (us_info
->us_regs
)))) {
212 uccs_err("ucc_slow_init: Could not init the guemr register.");
217 /* Set UCC to slow type */
218 if ((ret
= ucc_set_type(us_info
->ucc_num
,
219 (struct ucc_common
*) (us_info
->us_regs
),
220 UCC_SPEED_TYPE_SLOW
))) {
221 uccs_err("ucc_slow_init: Could not init the guemr register.");
226 out_be16(&uccs
->us_pram
->mrblr
, us_info
->max_rx_buf_length
);
228 INIT_LIST_HEAD(&uccs
->confQ
);
231 uccs
->rx_base_offset
=
232 qe_muram_alloc(us_info
->rx_bd_ring_len
* sizeof(struct qe_bd
),
234 if (IS_MURAM_ERR(uccs
->rx_base_offset
)) {
235 uccs_err("ucc_slow_init: No memory for Rx BD's.");
236 uccs
->rx_base_offset
= 0;
241 uccs
->tx_base_offset
=
242 qe_muram_alloc(us_info
->tx_bd_ring_len
* sizeof(struct qe_bd
),
244 if (IS_MURAM_ERR(uccs
->tx_base_offset
)) {
245 uccs_err("ucc_slow_init: No memory for Tx BD's.");
246 uccs
->tx_base_offset
= 0;
252 bd
= uccs
->confBd
= uccs
->tx_bd
= qe_muram_addr(uccs
->tx_base_offset
);
253 for (i
= 0; i
< us_info
->tx_bd_ring_len
; i
++) {
254 /* clear bd buffer */
255 out_be32(&(((struct qe_bd
*)bd
)->buf
), 0);
256 /* set bd status and length */
257 out_be32((u32
*)bd
, 0);
258 bd
+= sizeof(struct qe_bd
);
260 bd
-= sizeof(struct qe_bd
);
261 /* set bd status and length */
262 out_be32((u32
*)bd
, T_W
); /* for last BD set Wrap bit */
265 bd
= uccs
->rx_bd
= qe_muram_addr(uccs
->rx_base_offset
);
266 for (i
= 0; i
< us_info
->rx_bd_ring_len
; i
++) {
267 /* set bd status and length */
268 out_be32((u32
*)bd
, 0);
269 /* clear bd buffer */
270 out_be32(&(((struct qe_bd
*)bd
)->buf
), 0);
271 bd
+= sizeof(struct qe_bd
);
273 bd
-= sizeof(struct qe_bd
);
274 /* set bd status and length */
275 out_be32((u32
*)bd
, R_W
); /* for last BD set Wrap bit */
277 /* Set GUMR (For more details see the hardware spec.). */
280 gumr
|= us_info
->tcrc
;
282 gumr
|= UCC_SLOW_GUMR_H_CDP
;
284 gumr
|= UCC_SLOW_GUMR_H_CTSP
;
286 gumr
|= UCC_SLOW_GUMR_H_CDS
;
288 gumr
|= UCC_SLOW_GUMR_H_CTSS
;
290 gumr
|= UCC_SLOW_GUMR_H_TFL
;
292 gumr
|= UCC_SLOW_GUMR_H_RFW
;
294 gumr
|= UCC_SLOW_GUMR_H_TXSY
;
296 gumr
|= UCC_SLOW_GUMR_H_RTSM
;
297 out_be32(&us_regs
->gumr_h
, gumr
);
302 gumr
|= UCC_SLOW_GUMR_L_TCI
;
304 gumr
|= UCC_SLOW_GUMR_L_RINV
;
306 gumr
|= UCC_SLOW_GUMR_L_TINV
;
308 gumr
|= UCC_SLOW_GUMR_L_TEND
;
309 gumr
|= us_info
->tdcr
;
310 gumr
|= us_info
->rdcr
;
311 gumr
|= us_info
->tenc
;
312 gumr
|= us_info
->renc
;
313 gumr
|= us_info
->diag
;
314 gumr
|= us_info
->mode
;
315 out_be32(&us_regs
->gumr_l
, gumr
);
317 /* Function code registers */
318 /* function_code has initial value 0 */
320 /* if the data is in cachable memory, the 'global' */
321 /* in the function code should be set. */
322 function_code
|= us_info
->data_mem_part
;
323 function_code
|= QE_BMR_BYTE_ORDER_BO_MOT
; /* Required for QE */
324 uccs
->us_pram
->tfcr
= function_code
;
325 uccs
->us_pram
->rfcr
= function_code
;
327 /* rbase, tbase are offsets from MURAM base */
328 out_be16(&uccs
->us_pram
->rbase
, uccs
->us_pram_offset
);
329 out_be16(&uccs
->us_pram
->tbase
, uccs
->us_pram_offset
);
333 ucc_set_qe_mux_grant(us_info
->ucc_num
, us_info
->grant_support
);
334 /* Breakpoint Support */
335 ucc_set_qe_mux_bkpt(us_info
->ucc_num
, us_info
->brkpt_support
);
336 /* Set Tsa or NMSI mode. */
337 ucc_set_qe_mux_tsa(us_info
->ucc_num
, us_info
->tsa
);
338 /* If NMSI (not Tsa), set Tx and Rx clock. */
340 /* Rx clock routing */
341 if (ucc_set_qe_mux_rxtx
342 (us_info
->ucc_num
, us_info
->rx_clock
, COMM_DIR_RX
)) {
344 ("ucc_slow_init: Illegal value for parameter"
349 /* Tx clock routing */
350 if (ucc_set_qe_mux_rxtx(us_info
->ucc_num
,
351 us_info
->tx_clock
, COMM_DIR_TX
)) {
353 ("ucc_slow_init: Illegal value for parameter "
363 /* Set interrupt mask register at UCC level. */
364 out_be16(&us_regs
->uccm
, us_info
->uccm_mask
);
366 /* First, clear anything pending at UCC level, */
367 /* otherwise, old garbage may come through */
368 /* as soon as the dam is opened. */
370 /* Writing '1' clears */
371 out_be16(&us_regs
->ucce
, 0xffff);
373 /* Issue QE Init command */
374 if (us_info
->init_tx
&& us_info
->init_rx
)
375 command
= QE_INIT_TX_RX
;
376 else if (us_info
->init_tx
)
377 command
= QE_INIT_TX
;
379 command
= QE_INIT_RX
; /* We know at least one is TRUE */
380 id
= ucc_slow_get_qe_cr_subblock(us_info
->ucc_num
);
381 qe_issue_cmd(command
, id
, QE_CR_PROTOCOL_UNSPECIFIED
, 0);
387 void ucc_slow_free(struct ucc_slow_private
* uccs
)
392 if (uccs
->rx_base_offset
)
393 qe_muram_free(uccs
->rx_base_offset
);
395 if (uccs
->tx_base_offset
)
396 qe_muram_free(uccs
->tx_base_offset
);
399 qe_muram_free(uccs
->us_pram_offset
);
400 uccs
->us_pram
= NULL
;