1 /*********************************************************************
2 * Author: Cavium Networks
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
7 * Copyright (c) 2003-2010 Cavium Networks
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 *********************************************************************/
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/netdevice.h>
30 #include <linux/init.h>
31 #include <linux/etherdevice.h>
33 #include <linux/string.h>
36 #include <linux/xfrm.h>
38 #endif /* CONFIG_XFRM */
40 #include <asm/atomic.h>
42 #include <asm/octeon/octeon.h>
44 #include "ethernet-defines.h"
45 #include "octeon-ethernet.h"
46 #include "ethernet-tx.h"
47 #include "ethernet-util.h"
53 #include "cvmx-helper.h"
55 #include "cvmx-gmxx-defs.h"
57 #define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
60 * You can define GET_SKBUFF_QOS() to override how the skbuff output
61 * function determines which output queue is used. The default
62 * implementation always uses the base queue for the port. If, for
63 * example, you wanted to use the skb->priority fieid, define
64 * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
66 #ifndef GET_SKBUFF_QOS
67 #define GET_SKBUFF_QOS(skb) 0
70 static void cvm_oct_tx_do_cleanup(unsigned long arg
);
71 static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet
, cvm_oct_tx_do_cleanup
, 0);
73 /* Maximum number of SKBs to try to free per xmit packet. */
74 #define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
76 static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free
, int fau
)
79 undo
= skb_to_free
> 0 ? MAX_SKB_TO_FREE
: skb_to_free
+ MAX_SKB_TO_FREE
;
81 cvmx_fau_atomic_add32(fau
, -undo
);
82 skb_to_free
= -skb_to_free
> MAX_SKB_TO_FREE
? MAX_SKB_TO_FREE
: -skb_to_free
;
86 static void cvm_oct_kick_tx_poll_watchdog(void)
88 union cvmx_ciu_timx ciu_timx
;
90 ciu_timx
.s
.one_shot
= 1;
91 ciu_timx
.s
.len
= cvm_oct_tx_poll_interval
;
92 cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx
.u64
);
95 void cvm_oct_free_tx_skbs(struct net_device
*dev
)
98 int qos
, queues_per_port
;
100 int total_remaining
= 0;
102 struct octeon_ethernet
*priv
= netdev_priv(dev
);
104 queues_per_port
= cvmx_pko_get_num_queues(priv
->port
);
105 /* Drain any pending packets in the free list */
106 for (qos
= 0; qos
< queues_per_port
; qos
++) {
107 if (skb_queue_len(&priv
->tx_free_list
[qos
]) == 0)
109 skb_to_free
= cvmx_fau_fetch_and_add32(priv
->fau
+qos
*4, MAX_SKB_TO_FREE
);
110 skb_to_free
= cvm_oct_adjust_skb_to_free(skb_to_free
, priv
->fau
+qos
*4);
113 total_freed
+= skb_to_free
;
114 if (skb_to_free
> 0) {
115 struct sk_buff
*to_free_list
= NULL
;
116 spin_lock_irqsave(&priv
->tx_free_list
[qos
].lock
, flags
);
117 while (skb_to_free
> 0) {
118 struct sk_buff
*t
= __skb_dequeue(&priv
->tx_free_list
[qos
]);
119 t
->next
= to_free_list
;
123 spin_unlock_irqrestore(&priv
->tx_free_list
[qos
].lock
, flags
);
124 /* Do the actual freeing outside of the lock. */
125 while (to_free_list
) {
126 struct sk_buff
*t
= to_free_list
;
127 to_free_list
= to_free_list
->next
;
128 dev_kfree_skb_any(t
);
131 total_remaining
+= skb_queue_len(&priv
->tx_free_list
[qos
]);
133 if (total_freed
>= 0 && netif_queue_stopped(dev
))
134 netif_wake_queue(dev
);
136 cvm_oct_kick_tx_poll_watchdog();
140 * cvm_oct_xmit - transmit a packet
141 * @skb: Packet to send
142 * @dev: Device info structure
144 * Returns Always returns NETDEV_TX_OK
146 int cvm_oct_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
148 cvmx_pko_command_word0_t pko_command
;
149 union cvmx_buf_ptr hw_buffer
;
150 uint64_t old_scratch
;
151 uint64_t old_scratch2
;
154 enum {QUEUE_CORE
, QUEUE_HW
, QUEUE_DROP
} queue_type
;
155 struct octeon_ethernet
*priv
= netdev_priv(dev
);
156 struct sk_buff
*to_free_list
;
158 int32_t buffers_to_free
;
161 #if REUSE_SKBUFFS_WITHOUT_FREE
162 unsigned char *fpa_head
;
166 * Prefetch the private data structure. It is larger that one
172 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
173 * completely remove "qos" in the event neither interface
174 * supports multiple queues per port.
176 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0
> 1) ||
177 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1
> 1)) {
178 qos
= GET_SKBUFF_QOS(skb
);
181 else if (qos
>= cvmx_pko_get_num_queues(priv
->port
))
186 if (USE_ASYNC_IOBDMA
) {
187 /* Save scratch in case userspace is using it */
189 old_scratch
= cvmx_scratch_read64(CVMX_SCR_SCRATCH
);
190 old_scratch2
= cvmx_scratch_read64(CVMX_SCR_SCRATCH
+ 8);
193 * Fetch and increment the number of packets to be
196 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH
+ 8,
197 FAU_NUM_PACKET_BUFFERS_TO_FREE
,
199 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH
,
205 * We have space for 6 segment pointers, If there will be more
206 * than that, we must linearize.
208 if (unlikely(skb_shinfo(skb
)->nr_frags
> 5)) {
209 if (unlikely(__skb_linearize(skb
))) {
210 queue_type
= QUEUE_DROP
;
211 if (USE_ASYNC_IOBDMA
) {
212 /* Get the number of skbuffs in use by the hardware */
214 skb_to_free
= cvmx_scratch_read64(CVMX_SCR_SCRATCH
);
216 /* Get the number of skbuffs in use by the hardware */
217 skb_to_free
= cvmx_fau_fetch_and_add32(priv
->fau
+ qos
* 4,
220 skb_to_free
= cvm_oct_adjust_skb_to_free(skb_to_free
, priv
->fau
+ qos
* 4);
221 spin_lock_irqsave(&priv
->tx_free_list
[qos
].lock
, flags
);
226 if ((skb
->len
< 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX
)) {
227 union cvmx_gmxx_prtx_cfg gmx_prt_cfg
;
228 int interface
= INTERFACE(priv
->port
);
229 int index
= INDEX(priv
->port
);
232 /* We only need to pad packet in half duplex mode */
234 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
235 if (gmx_prt_cfg
.s
.duplex
== 0) {
236 int add_bytes
= 64 - skb
->len
;
237 if ((skb_tail_pointer(skb
) + add_bytes
) <=
238 skb_end_pointer(skb
))
239 memset(__skb_put(skb
, add_bytes
), 0,
245 /* Build the PKO command */
247 pko_command
.s
.n2
= 1; /* Don't pollute L2 with the outgoing packet */
248 pko_command
.s
.segs
= 1;
249 pko_command
.s
.total_bytes
= skb
->len
;
250 pko_command
.s
.size0
= CVMX_FAU_OP_SIZE_32
;
251 pko_command
.s
.subone0
= 1;
253 pko_command
.s
.dontfree
= 1;
255 /* Build the PKO buffer pointer */
257 if (skb_shinfo(skb
)->nr_frags
== 0) {
258 hw_buffer
.s
.addr
= XKPHYS_TO_PHYS((u64
)skb
->data
);
259 hw_buffer
.s
.pool
= 0;
260 hw_buffer
.s
.size
= skb
->len
;
262 hw_buffer
.s
.addr
= XKPHYS_TO_PHYS((u64
)skb
->data
);
263 hw_buffer
.s
.pool
= 0;
264 hw_buffer
.s
.size
= skb_headlen(skb
);
265 CVM_OCT_SKB_CB(skb
)[0] = hw_buffer
.u64
;
266 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
267 struct skb_frag_struct
*fs
= skb_shinfo(skb
)->frags
+ i
;
268 hw_buffer
.s
.addr
= XKPHYS_TO_PHYS((u64
)(page_address(fs
->page
) + fs
->page_offset
));
269 hw_buffer
.s
.size
= fs
->size
;
270 CVM_OCT_SKB_CB(skb
)[i
+ 1] = hw_buffer
.u64
;
272 hw_buffer
.s
.addr
= XKPHYS_TO_PHYS((u64
)CVM_OCT_SKB_CB(skb
));
273 hw_buffer
.s
.size
= skb_shinfo(skb
)->nr_frags
+ 1;
274 pko_command
.s
.segs
= skb_shinfo(skb
)->nr_frags
+ 1;
275 pko_command
.s
.gather
= 1;
276 goto dont_put_skbuff_in_hw
;
280 * See if we can put this skb in the FPA pool. Any strange
281 * behavior from the Linux networking stack will most likely
282 * be caused by a bug in the following code. If some field is
283 * in use by the network stack and get carried over when a
284 * buffer is reused, bad thing may happen. If in doubt and
285 * you dont need the absolute best performance, disable the
286 * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
287 * shown a 25% increase in performance under some loads.
289 #if REUSE_SKBUFFS_WITHOUT_FREE
290 fpa_head
= skb
->head
+ 256 - ((unsigned long)skb
->head
& 0x7f);
291 if (unlikely(skb
->data
< fpa_head
)) {
293 * printk("TX buffer beginning can't meet FPA
294 * alignment constraints\n");
296 goto dont_put_skbuff_in_hw
;
299 ((skb_end_pointer(skb
) - fpa_head
) < CVMX_FPA_PACKET_POOL_SIZE
)) {
301 printk("TX buffer isn't large enough for the FPA\n");
303 goto dont_put_skbuff_in_hw
;
305 if (unlikely(skb_shared(skb
))) {
307 printk("TX buffer sharing data with someone else\n");
309 goto dont_put_skbuff_in_hw
;
311 if (unlikely(skb_cloned(skb
))) {
313 printk("TX buffer has been cloned\n");
315 goto dont_put_skbuff_in_hw
;
317 if (unlikely(skb_header_cloned(skb
))) {
319 printk("TX buffer header has been cloned\n");
321 goto dont_put_skbuff_in_hw
;
323 if (unlikely(skb
->destructor
)) {
325 printk("TX buffer has a destructor\n");
327 goto dont_put_skbuff_in_hw
;
329 if (unlikely(skb_shinfo(skb
)->nr_frags
)) {
331 printk("TX buffer has fragments\n");
333 goto dont_put_skbuff_in_hw
;
337 sizeof(*skb
) + skb_end_pointer(skb
) - skb
->head
)) {
339 printk("TX buffer truesize has been changed\n");
341 goto dont_put_skbuff_in_hw
;
345 * We can use this buffer in the FPA. We don't need the FAU
348 pko_command
.s
.dontfree
= 0;
350 hw_buffer
.s
.back
= ((unsigned long)skb
->data
>> 7) - ((unsigned long)fpa_head
>> 7);
351 *(struct sk_buff
**)(fpa_head
- sizeof(void *)) = skb
;
354 * The skbuff will be reused without ever being freed. We must
355 * cleanup a bunch of core things.
357 dst_release(skb_dst(skb
));
358 skb_dst_set(skb
, NULL
);
360 secpath_put(skb
->sp
);
365 #ifdef CONFIG_NET_SCHED
367 #ifdef CONFIG_NET_CLS_ACT
369 #endif /* CONFIG_NET_CLS_ACT */
370 #endif /* CONFIG_NET_SCHED */
371 #endif /* REUSE_SKBUFFS_WITHOUT_FREE */
373 dont_put_skbuff_in_hw
:
375 /* Check if we can use the hardware checksumming */
376 if (USE_HW_TCPUDP_CHECKSUM
&& (skb
->protocol
== htons(ETH_P_IP
)) &&
377 (ip_hdr(skb
)->version
== 4) && (ip_hdr(skb
)->ihl
== 5) &&
378 ((ip_hdr(skb
)->frag_off
== 0) || (ip_hdr(skb
)->frag_off
== 1 << 14))
379 && ((ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
380 || (ip_hdr(skb
)->protocol
== IPPROTO_UDP
))) {
381 /* Use hardware checksum calc */
382 pko_command
.s
.ipoffp1
= sizeof(struct ethhdr
) + 1;
385 if (USE_ASYNC_IOBDMA
) {
386 /* Get the number of skbuffs in use by the hardware */
388 skb_to_free
= cvmx_scratch_read64(CVMX_SCR_SCRATCH
);
389 buffers_to_free
= cvmx_scratch_read64(CVMX_SCR_SCRATCH
+ 8);
391 /* Get the number of skbuffs in use by the hardware */
392 skb_to_free
= cvmx_fau_fetch_and_add32(priv
->fau
+ qos
* 4,
395 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE
, 0);
398 skb_to_free
= cvm_oct_adjust_skb_to_free(skb_to_free
, priv
->fau
+qos
*4);
401 * If we're sending faster than the receive can free them then
402 * don't do the HW free.
404 if ((buffers_to_free
< -100) && !pko_command
.s
.dontfree
)
405 pko_command
.s
.dontfree
= 1;
407 if (pko_command
.s
.dontfree
) {
408 queue_type
= QUEUE_CORE
;
409 pko_command
.s
.reg0
= priv
->fau
+qos
*4;
411 queue_type
= QUEUE_HW
;
413 if (USE_ASYNC_IOBDMA
)
414 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH
, FAU_TOTAL_TX_TO_CLEAN
, 1);
416 spin_lock_irqsave(&priv
->tx_free_list
[qos
].lock
, flags
);
418 /* Drop this packet if we have too many already queued to the HW */
419 if (unlikely(skb_queue_len(&priv
->tx_free_list
[qos
]) >= MAX_OUT_QUEUE_DEPTH
)) {
420 if (dev
->tx_queue_len
!= 0) {
421 /* Drop the lock when notifying the core. */
422 spin_unlock_irqrestore(&priv
->tx_free_list
[qos
].lock
, flags
);
423 netif_stop_queue(dev
);
424 spin_lock_irqsave(&priv
->tx_free_list
[qos
].lock
, flags
);
426 /* If not using normal queueing. */
427 queue_type
= QUEUE_DROP
;
432 cvmx_pko_send_packet_prepare(priv
->port
, priv
->queue
+ qos
,
435 /* Send the packet to the output queue */
436 if (unlikely(cvmx_pko_send_packet_finish(priv
->port
,
438 pko_command
, hw_buffer
,
439 CVMX_PKO_LOCK_NONE
))) {
440 DEBUGPRINT("%s: Failed to send the packet\n", dev
->name
);
441 queue_type
= QUEUE_DROP
;
446 switch (queue_type
) {
448 skb
->next
= to_free_list
;
450 priv
->stats
.tx_dropped
++;
453 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE
, -1);
456 __skb_queue_tail(&priv
->tx_free_list
[qos
], skb
);
462 while (skb_to_free
> 0) {
463 struct sk_buff
*t
= __skb_dequeue(&priv
->tx_free_list
[qos
]);
464 t
->next
= to_free_list
;
469 spin_unlock_irqrestore(&priv
->tx_free_list
[qos
].lock
, flags
);
471 /* Do the actual freeing outside of the lock. */
472 while (to_free_list
) {
473 struct sk_buff
*t
= to_free_list
;
474 to_free_list
= to_free_list
->next
;
475 dev_kfree_skb_any(t
);
478 if (USE_ASYNC_IOBDMA
) {
480 total_to_clean
= cvmx_scratch_read64(CVMX_SCR_SCRATCH
);
481 /* Restore the scratch area */
482 cvmx_scratch_write64(CVMX_SCR_SCRATCH
, old_scratch
);
483 cvmx_scratch_write64(CVMX_SCR_SCRATCH
+ 8, old_scratch2
);
485 total_to_clean
= cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN
, 1);
488 if (total_to_clean
& 0x3ff) {
490 * Schedule the cleanup tasklet every 1024 packets for
491 * the pathological case of high traffic on one port
492 * delaying clean up of packets on a different port
493 * that is blocked waiting for the cleanup.
495 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet
);
498 cvm_oct_kick_tx_poll_watchdog();
504 * cvm_oct_xmit_pow - transmit a packet to the POW
505 * @skb: Packet to send
506 * @dev: Device info structure
508 * Returns Always returns zero
510 int cvm_oct_xmit_pow(struct sk_buff
*skb
, struct net_device
*dev
)
512 struct octeon_ethernet
*priv
= netdev_priv(dev
);
516 /* Get a work queue entry */
517 cvmx_wqe_t
*work
= cvmx_fpa_alloc(CVMX_FPA_WQE_POOL
);
518 if (unlikely(work
== NULL
)) {
519 DEBUGPRINT("%s: Failed to allocate a work queue entry\n",
521 priv
->stats
.tx_dropped
++;
526 /* Get a packet buffer */
527 packet_buffer
= cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL
);
528 if (unlikely(packet_buffer
== NULL
)) {
529 DEBUGPRINT("%s: Failed to allocate a packet buffer\n",
531 cvmx_fpa_free(work
, CVMX_FPA_WQE_POOL
, DONT_WRITEBACK(1));
532 priv
->stats
.tx_dropped
++;
538 * Calculate where we need to copy the data to. We need to
539 * leave 8 bytes for a next pointer (unused). We also need to
540 * include any configure skip. Then we need to align the IP
541 * packet src and dest into the same 64bit word. The below
542 * calculation may add a little extra, but that doesn't
545 copy_location
= packet_buffer
+ sizeof(uint64_t);
546 copy_location
+= ((CVMX_HELPER_FIRST_MBUFF_SKIP
+ 7) & 0xfff8) + 6;
549 * We have to copy the packet since whoever processes this
550 * packet will free it to a hardware pool. We can't use the
551 * trick of counting outstanding packets like in
554 memcpy(copy_location
, skb
->data
, skb
->len
);
557 * Fill in some of the work queue fields. We may need to add
558 * more if the software at the other end needs them.
560 work
->hw_chksum
= skb
->csum
;
561 work
->len
= skb
->len
;
562 work
->ipprt
= priv
->port
;
563 work
->qos
= priv
->port
& 0x7;
564 work
->grp
= pow_send_group
;
565 work
->tag_type
= CVMX_HELPER_INPUT_TAG_TYPE
;
566 work
->tag
= pow_send_group
;
567 /* Default to zero. Sets of zero later are commented out */
569 work
->word2
.s
.bufs
= 1;
570 work
->packet_ptr
.u64
= 0;
571 work
->packet_ptr
.s
.addr
= cvmx_ptr_to_phys(copy_location
);
572 work
->packet_ptr
.s
.pool
= CVMX_FPA_PACKET_POOL
;
573 work
->packet_ptr
.s
.size
= CVMX_FPA_PACKET_POOL_SIZE
;
574 work
->packet_ptr
.s
.back
= (copy_location
- packet_buffer
) >> 7;
576 if (skb
->protocol
== htons(ETH_P_IP
)) {
577 work
->word2
.s
.ip_offset
= 14;
578 work
->word2
.s
.tcp_or_udp
=
579 (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
580 || (ip_hdr(skb
)->protocol
== IPPROTO_UDP
);
581 work
->word2
.s
.is_frag
= !((ip_hdr(skb
)->frag_off
== 0)
582 || (ip_hdr(skb
)->frag_off
==
584 work
->word2
.s
.is_bcast
= (skb
->pkt_type
== PACKET_BROADCAST
);
585 work
->word2
.s
.is_mcast
= (skb
->pkt_type
== PACKET_MULTICAST
);
588 * When copying the data, include 4 bytes of the
589 * ethernet header to align the same way hardware
592 memcpy(work
->packet_data
, skb
->data
+ 10,
593 sizeof(work
->packet_data
));
595 work
->word2
.snoip
.is_rarp
= skb
->protocol
== htons(ETH_P_RARP
);
596 work
->word2
.snoip
.is_arp
= skb
->protocol
== htons(ETH_P_ARP
);
597 work
->word2
.snoip
.is_bcast
=
598 (skb
->pkt_type
== PACKET_BROADCAST
);
599 work
->word2
.snoip
.is_mcast
=
600 (skb
->pkt_type
== PACKET_MULTICAST
);
601 work
->word2
.snoip
.not_IP
= 1; /* IP was done up above */
602 memcpy(work
->packet_data
, skb
->data
, sizeof(work
->packet_data
));
605 /* Submit the packet to the POW */
606 cvmx_pow_work_submit(work
, work
->tag
, work
->tag_type
, work
->qos
,
608 priv
->stats
.tx_packets
++;
609 priv
->stats
.tx_bytes
+= skb
->len
;
615 * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX.
616 * @dev: Device being shutdown
619 void cvm_oct_tx_shutdown_dev(struct net_device
*dev
)
621 struct octeon_ethernet
*priv
= netdev_priv(dev
);
625 for (qos
= 0; qos
< 16; qos
++) {
626 spin_lock_irqsave(&priv
->tx_free_list
[qos
].lock
, flags
);
627 while (skb_queue_len(&priv
->tx_free_list
[qos
]))
628 dev_kfree_skb_any(__skb_dequeue
629 (&priv
->tx_free_list
[qos
]));
630 spin_unlock_irqrestore(&priv
->tx_free_list
[qos
].lock
, flags
);
634 static void cvm_oct_tx_do_cleanup(unsigned long arg
)
638 for (port
= 0; port
< TOTAL_NUMBER_OF_PORTS
; port
++) {
639 if (cvm_oct_device
[port
]) {
640 struct net_device
*dev
= cvm_oct_device
[port
];
641 cvm_oct_free_tx_skbs(dev
);
646 static irqreturn_t
cvm_oct_tx_cleanup_watchdog(int cpl
, void *dev_id
)
648 /* Disable the interrupt. */
649 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
650 /* Do the work in the tasklet. */
651 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet
);
655 void cvm_oct_tx_initialize(void)
659 /* Disable the interrupt. */
660 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
661 /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */
662 i
= request_irq(OCTEON_IRQ_TIMER1
,
663 cvm_oct_tx_cleanup_watchdog
, 0,
664 "Ethernet", cvm_oct_device
);
667 panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1
);
670 void cvm_oct_tx_shutdown(void)
672 /* Free the interrupt handler */
673 free_irq(OCTEON_IRQ_TIMER1
, cvm_oct_device
);