1 /*******************************************************************************
2 This contains the functions to handle the enhanced descriptors.
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23 *******************************************************************************/
25 #include <linux/stmmac.h>
27 #include "descs_com.h"
29 static int enh_desc_get_tx_status(void *data
, struct stmmac_extra_stats
*x
,
30 struct dma_desc
*p
, void __iomem
*ioaddr
)
33 struct net_device_stats
*stats
= (struct net_device_stats
*)data
;
35 if (unlikely(p
->des01
.etx
.error_summary
)) {
36 CHIP_DBG(KERN_ERR
"GMAC TX error... 0x%08x\n", p
->des01
.etx
);
37 if (unlikely(p
->des01
.etx
.jabber_timeout
)) {
38 CHIP_DBG(KERN_ERR
"\tjabber_timeout error\n");
42 if (unlikely(p
->des01
.etx
.frame_flushed
)) {
43 CHIP_DBG(KERN_ERR
"\tframe_flushed error\n");
44 x
->tx_frame_flushed
++;
45 dwmac_dma_flush_tx_fifo(ioaddr
);
48 if (unlikely(p
->des01
.etx
.loss_carrier
)) {
49 CHIP_DBG(KERN_ERR
"\tloss_carrier error\n");
51 stats
->tx_carrier_errors
++;
53 if (unlikely(p
->des01
.etx
.no_carrier
)) {
54 CHIP_DBG(KERN_ERR
"\tno_carrier error\n");
56 stats
->tx_carrier_errors
++;
58 if (unlikely(p
->des01
.etx
.late_collision
)) {
59 CHIP_DBG(KERN_ERR
"\tlate_collision error\n");
60 stats
->collisions
+= p
->des01
.etx
.collision_count
;
62 if (unlikely(p
->des01
.etx
.excessive_collisions
)) {
63 CHIP_DBG(KERN_ERR
"\texcessive_collisions\n");
64 stats
->collisions
+= p
->des01
.etx
.collision_count
;
66 if (unlikely(p
->des01
.etx
.excessive_deferral
)) {
67 CHIP_DBG(KERN_INFO
"\texcessive tx_deferral\n");
71 if (unlikely(p
->des01
.etx
.underflow_error
)) {
72 CHIP_DBG(KERN_ERR
"\tunderflow error\n");
73 dwmac_dma_flush_tx_fifo(ioaddr
);
77 if (unlikely(p
->des01
.etx
.ip_header_error
)) {
78 CHIP_DBG(KERN_ERR
"\tTX IP header csum error\n");
79 x
->tx_ip_header_error
++;
82 if (unlikely(p
->des01
.etx
.payload_error
)) {
83 CHIP_DBG(KERN_ERR
"\tAddr/Payload csum error\n");
84 x
->tx_payload_error
++;
85 dwmac_dma_flush_tx_fifo(ioaddr
);
91 if (unlikely(p
->des01
.etx
.deferred
)) {
92 CHIP_DBG(KERN_INFO
"GMAC TX status: tx deferred\n");
95 #ifdef STMMAC_VLAN_TAG_USED
96 if (p
->des01
.etx
.vlan_frame
) {
97 CHIP_DBG(KERN_INFO
"GMAC TX status: VLAN frame\n");
105 static int enh_desc_get_tx_len(struct dma_desc
*p
)
107 return p
->des01
.etx
.buffer1_size
;
110 static int enh_desc_coe_rdes0(int ipc_err
, int type
, int payload_err
)
112 int ret
= good_frame
;
113 u32 status
= (type
<< 2 | ipc_err
<< 1 | payload_err
) & 0x7;
115 /* bits 5 7 0 | Frame status
116 * ----------------------------------------------------------
117 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
118 * 1 0 0 | IPv4/6 No CSUM errorS.
119 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
120 * 1 1 0 | IPv4/6 CSUM IP HR error
121 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
122 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
123 * 0 1 1 | COE bypassed.. no IPv4/6 frame
127 CHIP_DBG(KERN_INFO
"RX Des0 status: IEEE 802.3 Type frame.\n");
129 } else if (status
== 0x4) {
130 CHIP_DBG(KERN_INFO
"RX Des0 status: IPv4/6 No CSUM errorS.\n");
132 } else if (status
== 0x5) {
133 CHIP_DBG(KERN_ERR
"RX Des0 status: IPv4/6 Payload Error.\n");
135 } else if (status
== 0x6) {
136 CHIP_DBG(KERN_ERR
"RX Des0 status: IPv4/6 Header Error.\n");
138 } else if (status
== 0x7) {
140 "RX Des0 status: IPv4/6 Header and Payload Error.\n");
142 } else if (status
== 0x1) {
144 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
146 } else if (status
== 0x3) {
147 CHIP_DBG(KERN_ERR
"RX Des0 status: No IPv4, IPv6 frame.\n");
153 static int enh_desc_get_rx_status(void *data
, struct stmmac_extra_stats
*x
,
156 int ret
= good_frame
;
157 struct net_device_stats
*stats
= (struct net_device_stats
*)data
;
159 if (unlikely(p
->des01
.erx
.error_summary
)) {
160 CHIP_DBG(KERN_ERR
"GMAC RX Error Summary 0x%08x\n",
162 if (unlikely(p
->des01
.erx
.descriptor_error
)) {
163 CHIP_DBG(KERN_ERR
"\tdescriptor error\n");
165 stats
->rx_length_errors
++;
167 if (unlikely(p
->des01
.erx
.overflow_error
)) {
168 CHIP_DBG(KERN_ERR
"\toverflow error\n");
169 x
->rx_gmac_overflow
++;
172 if (unlikely(p
->des01
.erx
.ipc_csum_error
))
173 CHIP_DBG(KERN_ERR
"\tIPC Csum Error/Giant frame\n");
175 if (unlikely(p
->des01
.erx
.late_collision
)) {
176 CHIP_DBG(KERN_ERR
"\tlate_collision error\n");
180 if (unlikely(p
->des01
.erx
.receive_watchdog
)) {
181 CHIP_DBG(KERN_ERR
"\treceive_watchdog error\n");
184 if (unlikely(p
->des01
.erx
.error_gmii
)) {
185 CHIP_DBG(KERN_ERR
"\tReceive Error\n");
188 if (unlikely(p
->des01
.erx
.crc_error
)) {
189 CHIP_DBG(KERN_ERR
"\tCRC error\n");
191 stats
->rx_crc_errors
++;
196 /* After a payload csum error, the ES bit is set.
197 * It doesn't match with the information reported into the databook.
198 * At any rate, we need to understand if the CSUM hw computation is ok
199 * and report this info to the upper layers. */
200 ret
= enh_desc_coe_rdes0(p
->des01
.erx
.ipc_csum_error
,
201 p
->des01
.erx
.frame_type
, p
->des01
.erx
.payload_csum_error
);
203 if (unlikely(p
->des01
.erx
.dribbling
)) {
204 CHIP_DBG(KERN_ERR
"GMAC RX: dribbling error\n");
207 if (unlikely(p
->des01
.erx
.sa_filter_fail
)) {
208 CHIP_DBG(KERN_ERR
"GMAC RX : Source Address filter fail\n");
209 x
->sa_rx_filter_fail
++;
212 if (unlikely(p
->des01
.erx
.da_filter_fail
)) {
213 CHIP_DBG(KERN_ERR
"GMAC RX : Dest Address filter fail\n");
214 x
->da_rx_filter_fail
++;
217 if (unlikely(p
->des01
.erx
.length_error
)) {
218 CHIP_DBG(KERN_ERR
"GMAC RX: length_error error\n");
222 #ifdef STMMAC_VLAN_TAG_USED
223 if (p
->des01
.erx
.vlan_tag
) {
224 CHIP_DBG(KERN_INFO
"GMAC RX: VLAN frame tagged\n");
231 static void enh_desc_init_rx_desc(struct dma_desc
*p
, unsigned int ring_size
,
235 for (i
= 0; i
< ring_size
; i
++) {
236 p
->des01
.erx
.own
= 1;
237 p
->des01
.erx
.buffer1_size
= BUF_SIZE_8KiB
- 1;
239 ehn_desc_rx_set_on_ring_chain(p
, (i
== ring_size
- 1));
242 p
->des01
.erx
.disable_ic
= 1;
247 static void enh_desc_init_tx_desc(struct dma_desc
*p
, unsigned int ring_size
)
251 for (i
= 0; i
< ring_size
; i
++) {
252 p
->des01
.etx
.own
= 0;
253 ehn_desc_tx_set_on_ring_chain(p
, (i
== ring_size
- 1));
258 static int enh_desc_get_tx_owner(struct dma_desc
*p
)
260 return p
->des01
.etx
.own
;
263 static int enh_desc_get_rx_owner(struct dma_desc
*p
)
265 return p
->des01
.erx
.own
;
268 static void enh_desc_set_tx_owner(struct dma_desc
*p
)
270 p
->des01
.etx
.own
= 1;
273 static void enh_desc_set_rx_owner(struct dma_desc
*p
)
275 p
->des01
.erx
.own
= 1;
278 static int enh_desc_get_tx_ls(struct dma_desc
*p
)
280 return p
->des01
.etx
.last_segment
;
283 static void enh_desc_release_tx_desc(struct dma_desc
*p
)
285 int ter
= p
->des01
.etx
.end_ring
;
287 memset(p
, 0, offsetof(struct dma_desc
, des2
));
288 enh_desc_end_tx_desc(p
, ter
);
291 static void enh_desc_prepare_tx_desc(struct dma_desc
*p
, int is_fs
, int len
,
294 p
->des01
.etx
.first_segment
= is_fs
;
296 enh_set_tx_desc_len(p
, len
);
298 if (likely(csum_flag
))
299 p
->des01
.etx
.checksum_insertion
= cic_full
;
302 static void enh_desc_clear_tx_ic(struct dma_desc
*p
)
304 p
->des01
.etx
.interrupt
= 0;
307 static void enh_desc_close_tx_desc(struct dma_desc
*p
)
309 p
->des01
.etx
.last_segment
= 1;
310 p
->des01
.etx
.interrupt
= 1;
313 static int enh_desc_get_rx_frame_len(struct dma_desc
*p
, int rx_coe_type
)
315 /* The type-1 checksum offload engines append the checksum at
316 * the end of frame and the two bytes of checksum are added in
318 * Adjust for that in the framelen for type-1 checksum offload
320 if (rx_coe_type
== STMMAC_RX_COE_TYPE1
)
321 return p
->des01
.erx
.frame_length
- 2;
323 return p
->des01
.erx
.frame_length
;
326 const struct stmmac_desc_ops enh_desc_ops
= {
327 .tx_status
= enh_desc_get_tx_status
,
328 .rx_status
= enh_desc_get_rx_status
,
329 .get_tx_len
= enh_desc_get_tx_len
,
330 .init_rx_desc
= enh_desc_init_rx_desc
,
331 .init_tx_desc
= enh_desc_init_tx_desc
,
332 .get_tx_owner
= enh_desc_get_tx_owner
,
333 .get_rx_owner
= enh_desc_get_rx_owner
,
334 .release_tx_desc
= enh_desc_release_tx_desc
,
335 .prepare_tx_desc
= enh_desc_prepare_tx_desc
,
336 .clear_tx_ic
= enh_desc_clear_tx_ic
,
337 .close_tx_desc
= enh_desc_close_tx_desc
,
338 .get_tx_ls
= enh_desc_get_tx_ls
,
339 .set_tx_owner
= enh_desc_set_tx_owner
,
340 .set_rx_owner
= enh_desc_set_rx_owner
,
341 .get_rx_frame_len
= enh_desc_get_rx_frame_len
,