allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / drivers / net / wireless / bcm43xx / bcm43xx_pio.c
blobc60c1743ea067d4b7535bc62ec331b1b63ba33fc
1 /*
3 Broadcom BCM43xx wireless driver
5 PIO Transmission
7 Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22 Boston, MA 02110-1301, USA.
26 #include "bcm43xx.h"
27 #include "bcm43xx_pio.h"
28 #include "bcm43xx_main.h"
29 #include "bcm43xx_xmit.h"
30 #include "bcm43xx_power.h"
32 #include <linux/delay.h>
35 static void tx_start(struct bcm43xx_pioqueue *queue)
37 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
38 BCM43xx_PIO_TXCTL_INIT);
41 static void tx_octet(struct bcm43xx_pioqueue *queue,
42 u8 octet)
44 if (queue->need_workarounds) {
45 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA,
46 octet);
47 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
48 BCM43xx_PIO_TXCTL_WRITELO);
49 } else {
50 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
51 BCM43xx_PIO_TXCTL_WRITELO);
52 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA,
53 octet);
57 static u16 tx_get_next_word(struct bcm43xx_txhdr *txhdr,
58 const u8 *packet,
59 unsigned int *pos)
61 const u8 *source;
62 unsigned int i = *pos;
63 u16 ret;
65 if (i < sizeof(*txhdr)) {
66 source = (const u8 *)txhdr;
67 } else {
68 source = packet;
69 i -= sizeof(*txhdr);
71 ret = le16_to_cpu( *((u16 *)(source + i)) );
72 *pos += 2;
74 return ret;
77 static void tx_data(struct bcm43xx_pioqueue *queue,
78 struct bcm43xx_txhdr *txhdr,
79 const u8 *packet,
80 unsigned int octets)
82 u16 data;
83 unsigned int i = 0;
85 if (queue->need_workarounds) {
86 data = tx_get_next_word(txhdr, packet, &i);
87 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, data);
89 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
90 BCM43xx_PIO_TXCTL_WRITELO |
91 BCM43xx_PIO_TXCTL_WRITEHI);
92 while (i < octets - 1) {
93 data = tx_get_next_word(txhdr, packet, &i);
94 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, data);
96 if (octets % 2)
97 tx_octet(queue, packet[octets - sizeof(*txhdr) - 1]);
100 static void tx_complete(struct bcm43xx_pioqueue *queue,
101 struct sk_buff *skb)
103 if (queue->need_workarounds) {
104 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA,
105 skb->data[skb->len - 1]);
106 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
107 BCM43xx_PIO_TXCTL_WRITELO |
108 BCM43xx_PIO_TXCTL_COMPLETE);
109 } else {
110 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
111 BCM43xx_PIO_TXCTL_COMPLETE);
115 static u16 generate_cookie(struct bcm43xx_pioqueue *queue,
116 struct bcm43xx_pio_txpacket *packet)
118 u16 cookie = 0x0000;
119 int packetindex;
121 /* We use the upper 4 bits for the PIO
122 * controller ID and the lower 12 bits
123 * for the packet index (in the cache).
125 switch (queue->mmio_base) {
126 case BCM43xx_MMIO_PIO1_BASE:
127 break;
128 case BCM43xx_MMIO_PIO2_BASE:
129 cookie = 0x1000;
130 break;
131 case BCM43xx_MMIO_PIO3_BASE:
132 cookie = 0x2000;
133 break;
134 case BCM43xx_MMIO_PIO4_BASE:
135 cookie = 0x3000;
136 break;
137 default:
138 assert(0);
140 packetindex = pio_txpacket_getindex(packet);
141 assert(((u16)packetindex & 0xF000) == 0x0000);
142 cookie |= (u16)packetindex;
144 return cookie;
147 static
148 struct bcm43xx_pioqueue * parse_cookie(struct bcm43xx_private *bcm,
149 u16 cookie,
150 struct bcm43xx_pio_txpacket **packet)
152 struct bcm43xx_pio *pio = bcm43xx_current_pio(bcm);
153 struct bcm43xx_pioqueue *queue = NULL;
154 int packetindex;
156 switch (cookie & 0xF000) {
157 case 0x0000:
158 queue = pio->queue0;
159 break;
160 case 0x1000:
161 queue = pio->queue1;
162 break;
163 case 0x2000:
164 queue = pio->queue2;
165 break;
166 case 0x3000:
167 queue = pio->queue3;
168 break;
169 default:
170 assert(0);
172 packetindex = (cookie & 0x0FFF);
173 assert(packetindex >= 0 && packetindex < BCM43xx_PIO_MAXTXPACKETS);
174 *packet = &(queue->tx_packets_cache[packetindex]);
176 return queue;
179 static void pio_tx_write_fragment(struct bcm43xx_pioqueue *queue,
180 struct sk_buff *skb,
181 struct bcm43xx_pio_txpacket *packet)
183 struct bcm43xx_txhdr txhdr;
184 unsigned int octets;
186 assert(skb_shinfo(skb)->nr_frags == 0);
187 bcm43xx_generate_txhdr(queue->bcm,
188 &txhdr, skb->data, skb->len,
189 (packet->xmitted_frags == 0),
190 generate_cookie(queue, packet));
192 tx_start(queue);
193 octets = skb->len + sizeof(txhdr);
194 if (queue->need_workarounds)
195 octets--;
196 tx_data(queue, &txhdr, (u8 *)skb->data, octets);
197 tx_complete(queue, skb);
200 static void free_txpacket(struct bcm43xx_pio_txpacket *packet,
201 int irq_context)
203 struct bcm43xx_pioqueue *queue = packet->queue;
205 ieee80211_txb_free(packet->txb);
206 list_move(&packet->list, &queue->txfree);
207 queue->nr_txfree++;
209 assert(queue->tx_devq_used >= packet->xmitted_octets);
210 assert(queue->tx_devq_packets >= packet->xmitted_frags);
211 queue->tx_devq_used -= packet->xmitted_octets;
212 queue->tx_devq_packets -= packet->xmitted_frags;
215 static int pio_tx_packet(struct bcm43xx_pio_txpacket *packet)
217 struct bcm43xx_pioqueue *queue = packet->queue;
218 struct ieee80211_txb *txb = packet->txb;
219 struct sk_buff *skb;
220 u16 octets;
221 int i;
223 for (i = packet->xmitted_frags; i < txb->nr_frags; i++) {
224 skb = txb->fragments[i];
226 octets = (u16)skb->len + sizeof(struct bcm43xx_txhdr);
227 assert(queue->tx_devq_size >= octets);
228 assert(queue->tx_devq_packets <= BCM43xx_PIO_MAXTXDEVQPACKETS);
229 assert(queue->tx_devq_used <= queue->tx_devq_size);
230 /* Check if there is sufficient free space on the device
231 * TX queue. If not, return and let the TX tasklet
232 * retry later.
234 if (queue->tx_devq_packets == BCM43xx_PIO_MAXTXDEVQPACKETS)
235 return -EBUSY;
236 if (queue->tx_devq_used + octets > queue->tx_devq_size)
237 return -EBUSY;
238 /* Now poke the device. */
239 pio_tx_write_fragment(queue, skb, packet);
241 /* Account for the packet size.
242 * (We must not overflow the device TX queue)
244 queue->tx_devq_packets++;
245 queue->tx_devq_used += octets;
247 assert(packet->xmitted_frags < packet->txb->nr_frags);
248 packet->xmitted_frags++;
249 packet->xmitted_octets += octets;
251 list_move_tail(&packet->list, &queue->txrunning);
253 return 0;
256 static void tx_tasklet(unsigned long d)
258 struct bcm43xx_pioqueue *queue = (struct bcm43xx_pioqueue *)d;
259 struct bcm43xx_private *bcm = queue->bcm;
260 unsigned long flags;
261 struct bcm43xx_pio_txpacket *packet, *tmp_packet;
262 int err;
263 u16 txctl;
265 spin_lock_irqsave(&bcm->irq_lock, flags);
267 if (queue->tx_frozen)
268 goto out_unlock;
269 txctl = bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL);
270 if (txctl & BCM43xx_PIO_TXCTL_SUSPEND)
271 goto out_unlock;
273 list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) {
274 assert(packet->xmitted_frags < packet->txb->nr_frags);
275 if (packet->xmitted_frags == 0) {
276 int i;
277 struct sk_buff *skb;
279 /* Check if the device queue is big
280 * enough for every fragment. If not, drop the
281 * whole packet.
283 for (i = 0; i < packet->txb->nr_frags; i++) {
284 skb = packet->txb->fragments[i];
285 if (unlikely(skb->len > queue->tx_devq_size)) {
286 dprintkl(KERN_ERR PFX "PIO TX device queue too small. "
287 "Dropping packet.\n");
288 free_txpacket(packet, 1);
289 goto next_packet;
293 /* Try to transmit the packet.
294 * This may not completely succeed.
296 err = pio_tx_packet(packet);
297 if (err)
298 break;
299 next_packet:
300 continue;
302 out_unlock:
303 spin_unlock_irqrestore(&bcm->irq_lock, flags);
306 static void setup_txqueues(struct bcm43xx_pioqueue *queue)
308 struct bcm43xx_pio_txpacket *packet;
309 int i;
311 queue->nr_txfree = BCM43xx_PIO_MAXTXPACKETS;
312 for (i = 0; i < BCM43xx_PIO_MAXTXPACKETS; i++) {
313 packet = &(queue->tx_packets_cache[i]);
315 packet->queue = queue;
316 INIT_LIST_HEAD(&packet->list);
318 list_add(&packet->list, &queue->txfree);
322 static
323 struct bcm43xx_pioqueue * bcm43xx_setup_pioqueue(struct bcm43xx_private *bcm,
324 u16 pio_mmio_base)
326 struct bcm43xx_pioqueue *queue;
327 u32 value;
328 u16 qsize;
330 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
331 if (!queue)
332 goto out;
334 queue->bcm = bcm;
335 queue->mmio_base = pio_mmio_base;
336 queue->need_workarounds = (bcm->current_core->rev < 3);
338 INIT_LIST_HEAD(&queue->txfree);
339 INIT_LIST_HEAD(&queue->txqueue);
340 INIT_LIST_HEAD(&queue->txrunning);
341 tasklet_init(&queue->txtask, tx_tasklet,
342 (unsigned long)queue);
344 value = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD);
345 value &= ~BCM43xx_SBF_XFER_REG_BYTESWAP;
346 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, value);
348 qsize = bcm43xx_read16(bcm, queue->mmio_base + BCM43xx_PIO_TXQBUFSIZE);
349 if (qsize == 0) {
350 printk(KERN_ERR PFX "ERROR: This card does not support PIO "
351 "operation mode. Please use DMA mode "
352 "(module parameter pio=0).\n");
353 goto err_freequeue;
355 if (qsize <= BCM43xx_PIO_TXQADJUST) {
356 printk(KERN_ERR PFX "PIO tx device-queue too small (%u)\n",
357 qsize);
358 goto err_freequeue;
360 qsize -= BCM43xx_PIO_TXQADJUST;
361 queue->tx_devq_size = qsize;
363 setup_txqueues(queue);
365 out:
366 return queue;
368 err_freequeue:
369 kfree(queue);
370 queue = NULL;
371 goto out;
374 static void cancel_transfers(struct bcm43xx_pioqueue *queue)
376 struct bcm43xx_pio_txpacket *packet, *tmp_packet;
378 netif_tx_disable(queue->bcm->net_dev);
379 tasklet_disable(&queue->txtask);
381 list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list)
382 free_txpacket(packet, 0);
383 list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list)
384 free_txpacket(packet, 0);
387 static void bcm43xx_destroy_pioqueue(struct bcm43xx_pioqueue *queue)
389 if (!queue)
390 return;
392 cancel_transfers(queue);
393 kfree(queue);
396 void bcm43xx_pio_free(struct bcm43xx_private *bcm)
398 struct bcm43xx_pio *pio;
400 if (!bcm43xx_using_pio(bcm))
401 return;
402 pio = bcm43xx_current_pio(bcm);
404 bcm43xx_destroy_pioqueue(pio->queue3);
405 pio->queue3 = NULL;
406 bcm43xx_destroy_pioqueue(pio->queue2);
407 pio->queue2 = NULL;
408 bcm43xx_destroy_pioqueue(pio->queue1);
409 pio->queue1 = NULL;
410 bcm43xx_destroy_pioqueue(pio->queue0);
411 pio->queue0 = NULL;
414 int bcm43xx_pio_init(struct bcm43xx_private *bcm)
416 struct bcm43xx_pio *pio = bcm43xx_current_pio(bcm);
417 struct bcm43xx_pioqueue *queue;
418 int err = -ENOMEM;
420 queue = bcm43xx_setup_pioqueue(bcm, BCM43xx_MMIO_PIO1_BASE);
421 if (!queue)
422 goto out;
423 pio->queue0 = queue;
425 queue = bcm43xx_setup_pioqueue(bcm, BCM43xx_MMIO_PIO2_BASE);
426 if (!queue)
427 goto err_destroy0;
428 pio->queue1 = queue;
430 queue = bcm43xx_setup_pioqueue(bcm, BCM43xx_MMIO_PIO3_BASE);
431 if (!queue)
432 goto err_destroy1;
433 pio->queue2 = queue;
435 queue = bcm43xx_setup_pioqueue(bcm, BCM43xx_MMIO_PIO4_BASE);
436 if (!queue)
437 goto err_destroy2;
438 pio->queue3 = queue;
440 if (bcm->current_core->rev < 3)
441 bcm->irq_savedstate |= BCM43xx_IRQ_PIO_WORKAROUND;
443 dprintk(KERN_INFO PFX "PIO initialized\n");
444 err = 0;
445 out:
446 return err;
448 err_destroy2:
449 bcm43xx_destroy_pioqueue(pio->queue2);
450 pio->queue2 = NULL;
451 err_destroy1:
452 bcm43xx_destroy_pioqueue(pio->queue1);
453 pio->queue1 = NULL;
454 err_destroy0:
455 bcm43xx_destroy_pioqueue(pio->queue0);
456 pio->queue0 = NULL;
457 goto out;
460 int bcm43xx_pio_tx(struct bcm43xx_private *bcm,
461 struct ieee80211_txb *txb)
463 struct bcm43xx_pioqueue *queue = bcm43xx_current_pio(bcm)->queue1;
464 struct bcm43xx_pio_txpacket *packet;
466 assert(!queue->tx_suspended);
467 assert(!list_empty(&queue->txfree));
469 packet = list_entry(queue->txfree.next, struct bcm43xx_pio_txpacket, list);
470 packet->txb = txb;
471 packet->xmitted_frags = 0;
472 packet->xmitted_octets = 0;
473 list_move_tail(&packet->list, &queue->txqueue);
474 queue->nr_txfree--;
475 assert(queue->nr_txfree < BCM43xx_PIO_MAXTXPACKETS);
477 /* Suspend TX, if we are out of packets in the "free" queue. */
478 if (list_empty(&queue->txfree)) {
479 netif_stop_queue(queue->bcm->net_dev);
480 queue->tx_suspended = 1;
483 tasklet_schedule(&queue->txtask);
485 return 0;
488 void bcm43xx_pio_handle_xmitstatus(struct bcm43xx_private *bcm,
489 struct bcm43xx_xmitstatus *status)
491 struct bcm43xx_pioqueue *queue;
492 struct bcm43xx_pio_txpacket *packet;
494 queue = parse_cookie(bcm, status->cookie, &packet);
495 assert(queue);
497 free_txpacket(packet, 1);
498 if (queue->tx_suspended) {
499 queue->tx_suspended = 0;
500 netif_wake_queue(queue->bcm->net_dev);
502 /* If there are packets on the txqueue, poke the tasklet
503 * to transmit them.
505 if (!list_empty(&queue->txqueue))
506 tasklet_schedule(&queue->txtask);
509 static void pio_rx_error(struct bcm43xx_pioqueue *queue,
510 int clear_buffers,
511 const char *error)
513 int i;
515 printkl("PIO RX error: %s\n", error);
516 bcm43xx_pio_write(queue, BCM43xx_PIO_RXCTL,
517 BCM43xx_PIO_RXCTL_READY);
518 if (clear_buffers) {
519 assert(queue->mmio_base == BCM43xx_MMIO_PIO1_BASE);
520 for (i = 0; i < 15; i++) {
521 /* Dummy read. */
522 bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA);
527 void bcm43xx_pio_rx(struct bcm43xx_pioqueue *queue)
529 u16 preamble[21] = { 0 };
530 struct bcm43xx_rxhdr *rxhdr;
531 u16 tmp, len, rxflags2;
532 int i, preamble_readwords;
533 struct sk_buff *skb;
535 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXCTL);
536 if (!(tmp & BCM43xx_PIO_RXCTL_DATAAVAILABLE))
537 return;
538 bcm43xx_pio_write(queue, BCM43xx_PIO_RXCTL,
539 BCM43xx_PIO_RXCTL_DATAAVAILABLE);
541 for (i = 0; i < 10; i++) {
542 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXCTL);
543 if (tmp & BCM43xx_PIO_RXCTL_READY)
544 goto data_ready;
545 udelay(10);
547 dprintkl(KERN_ERR PFX "PIO RX timed out\n");
548 return;
549 data_ready:
551 len = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA);
552 if (unlikely(len > 0x700)) {
553 pio_rx_error(queue, 0, "len > 0x700");
554 return;
556 if (unlikely(len == 0 && queue->mmio_base != BCM43xx_MMIO_PIO4_BASE)) {
557 pio_rx_error(queue, 0, "len == 0");
558 return;
560 preamble[0] = cpu_to_le16(len);
561 if (queue->mmio_base == BCM43xx_MMIO_PIO4_BASE)
562 preamble_readwords = 14 / sizeof(u16);
563 else
564 preamble_readwords = 18 / sizeof(u16);
565 for (i = 0; i < preamble_readwords; i++) {
566 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA);
567 preamble[i + 1] = cpu_to_le16(tmp);
569 rxhdr = (struct bcm43xx_rxhdr *)preamble;
570 rxflags2 = le16_to_cpu(rxhdr->flags2);
571 if (unlikely(rxflags2 & BCM43xx_RXHDR_FLAGS2_INVALIDFRAME)) {
572 pio_rx_error(queue,
573 (queue->mmio_base == BCM43xx_MMIO_PIO1_BASE),
574 "invalid frame");
575 return;
577 if (queue->mmio_base == BCM43xx_MMIO_PIO4_BASE) {
578 /* We received an xmit status. */
579 struct bcm43xx_hwxmitstatus *hw;
580 struct bcm43xx_xmitstatus stat;
582 hw = (struct bcm43xx_hwxmitstatus *)(preamble + 1);
583 stat.cookie = le16_to_cpu(hw->cookie);
584 stat.flags = hw->flags;
585 stat.cnt1 = hw->cnt1;
586 stat.cnt2 = hw->cnt2;
587 stat.seq = le16_to_cpu(hw->seq);
588 stat.unknown = le16_to_cpu(hw->unknown);
590 bcm43xx_debugfs_log_txstat(queue->bcm, &stat);
591 bcm43xx_pio_handle_xmitstatus(queue->bcm, &stat);
593 return;
596 skb = dev_alloc_skb(len);
597 if (unlikely(!skb)) {
598 pio_rx_error(queue, 1, "OOM");
599 return;
601 skb_put(skb, len);
602 for (i = 0; i < len - 1; i += 2) {
603 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA);
604 *((u16 *)(skb->data + i)) = cpu_to_le16(tmp);
606 if (len % 2) {
607 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA);
608 skb->data[len - 1] = (tmp & 0x00FF);
609 /* The specs say the following is required, but
610 * it is wrong and corrupts the PLCP. If we don't do
611 * this, the PLCP seems to be correct. So ifdef it out for now.
613 #if 0
614 if (rxflags2 & BCM43xx_RXHDR_FLAGS2_TYPE2FRAME)
615 skb->data[2] = (tmp & 0xFF00) >> 8;
616 else
617 skb->data[0] = (tmp & 0xFF00) >> 8;
618 #endif
620 skb_trim(skb, len - IEEE80211_FCS_LEN);
621 bcm43xx_rx(queue->bcm, skb, rxhdr);
624 void bcm43xx_pio_tx_suspend(struct bcm43xx_pioqueue *queue)
626 bcm43xx_power_saving_ctl_bits(queue->bcm, -1, 1);
627 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
628 bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL)
629 | BCM43xx_PIO_TXCTL_SUSPEND);
632 void bcm43xx_pio_tx_resume(struct bcm43xx_pioqueue *queue)
634 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
635 bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL)
636 & ~BCM43xx_PIO_TXCTL_SUSPEND);
637 bcm43xx_power_saving_ctl_bits(queue->bcm, -1, -1);
638 if (!list_empty(&queue->txqueue))
639 tasklet_schedule(&queue->txtask);
642 void bcm43xx_pio_freeze_txqueues(struct bcm43xx_private *bcm)
644 struct bcm43xx_pio *pio;
646 assert(bcm43xx_using_pio(bcm));
647 pio = bcm43xx_current_pio(bcm);
648 pio->queue0->tx_frozen = 1;
649 pio->queue1->tx_frozen = 1;
650 pio->queue2->tx_frozen = 1;
651 pio->queue3->tx_frozen = 1;
654 void bcm43xx_pio_thaw_txqueues(struct bcm43xx_private *bcm)
656 struct bcm43xx_pio *pio;
658 assert(bcm43xx_using_pio(bcm));
659 pio = bcm43xx_current_pio(bcm);
660 pio->queue0->tx_frozen = 0;
661 pio->queue1->tx_frozen = 0;
662 pio->queue2->tx_frozen = 0;
663 pio->queue3->tx_frozen = 0;
664 if (!list_empty(&pio->queue0->txqueue))
665 tasklet_schedule(&pio->queue0->txtask);
666 if (!list_empty(&pio->queue1->txqueue))
667 tasklet_schedule(&pio->queue1->txtask);
668 if (!list_empty(&pio->queue2->txqueue))
669 tasklet_schedule(&pio->queue2->txtask);
670 if (!list_empty(&pio->queue3->txqueue))
671 tasklet_schedule(&pio->queue3->txtask);