1 /******************************************************************************
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/etherdevice.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <net/mac80211.h>
34 #include "iwl-eeprom.h"
39 #include "iwl-helpers.h"
42 * iwl_txq_update_write_ptr - Send new write index to hardware
44 void iwl_txq_update_write_ptr(struct iwl_priv
*priv
, struct iwl_tx_queue
*txq
)
47 int txq_id
= txq
->q
.id
;
49 if (txq
->need_update
== 0)
52 /* if we're trying to save power */
53 if (test_bit(STATUS_POWER_PMI
, &priv
->status
)) {
54 /* wake up nic if it's powered down ...
55 * uCode will wake up, and interrupt us again, so next
56 * time we'll skip this part. */
57 reg
= iwl_read32(priv
, CSR_UCODE_DRV_GP1
);
59 if (reg
& CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP
) {
60 IWL_DEBUG_INFO(priv
, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
62 iwl_set_bit(priv
, CSR_GP_CNTRL
,
63 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
67 iwl_write_direct32(priv
, HBUS_TARG_WRPTR
,
68 txq
->q
.write_ptr
| (txq_id
<< 8));
70 /* else not in power-save mode, uCode will never sleep when we're
71 * trying to tx (during RFKILL, we're not trying to tx). */
73 iwl_write32(priv
, HBUS_TARG_WRPTR
,
74 txq
->q
.write_ptr
| (txq_id
<< 8));
78 EXPORT_SYMBOL(iwl_txq_update_write_ptr
);
81 * iwl_tx_queue_free - Deallocate DMA queue.
82 * @txq: Transmit queue to deallocate.
84 * Empty queue by removing and destroying all BD's.
86 * 0-fill, but do not free "txq" descriptor structure.
88 void iwl_tx_queue_free(struct iwl_priv
*priv
, int txq_id
)
90 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
91 struct iwl_queue
*q
= &txq
->q
;
92 struct device
*dev
= &priv
->pci_dev
->dev
;
98 /* first, empty all BD's */
99 for (; q
->write_ptr
!= q
->read_ptr
;
100 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
))
101 priv
->cfg
->ops
->lib
->txq_free_tfd(priv
, txq
);
103 /* De-alloc array of command/tx buffers */
104 for (i
= 0; i
< TFD_TX_CMD_SLOTS
; i
++)
107 /* De-alloc circular buffer of TFDs */
109 dma_free_coherent(dev
, priv
->hw_params
.tfd_size
*
110 txq
->q
.n_bd
, txq
->tfds
, txq
->q
.dma_addr
);
112 /* De-alloc array of per-TFD driver data */
116 /* deallocate arrays */
122 /* 0-fill queue descriptor structure */
123 memset(txq
, 0, sizeof(*txq
));
125 EXPORT_SYMBOL(iwl_tx_queue_free
);
128 * iwl_cmd_queue_free - Deallocate DMA queue.
129 * @txq: Transmit queue to deallocate.
131 * Empty queue by removing and destroying all BD's.
133 * 0-fill, but do not free "txq" descriptor structure.
135 void iwl_cmd_queue_free(struct iwl_priv
*priv
)
137 struct iwl_tx_queue
*txq
= &priv
->txq
[priv
->cmd_queue
];
138 struct iwl_queue
*q
= &txq
->q
;
139 struct device
*dev
= &priv
->pci_dev
->dev
;
146 for (; q
->read_ptr
!= q
->write_ptr
;
147 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
148 /* we have no way to tell if it is a huge cmd ATM */
149 i
= get_cmd_index(q
, q
->read_ptr
, 0);
151 if (txq
->meta
[i
].flags
& CMD_SIZE_HUGE
) {
156 pci_unmap_single(priv
->pci_dev
,
157 dma_unmap_addr(&txq
->meta
[i
], mapping
),
158 dma_unmap_len(&txq
->meta
[i
], len
),
159 PCI_DMA_BIDIRECTIONAL
);
163 pci_unmap_single(priv
->pci_dev
,
164 dma_unmap_addr(&txq
->meta
[i
], mapping
),
165 dma_unmap_len(&txq
->meta
[i
], len
),
166 PCI_DMA_BIDIRECTIONAL
);
169 /* De-alloc array of command/tx buffers */
170 for (i
= 0; i
<= TFD_CMD_SLOTS
; i
++)
173 /* De-alloc circular buffer of TFDs */
175 dma_free_coherent(dev
, priv
->hw_params
.tfd_size
* txq
->q
.n_bd
,
176 txq
->tfds
, txq
->q
.dma_addr
);
178 /* deallocate arrays */
184 /* 0-fill queue descriptor structure */
185 memset(txq
, 0, sizeof(*txq
));
187 EXPORT_SYMBOL(iwl_cmd_queue_free
);
189 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
192 * Theory of operation
194 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
195 * of buffer descriptors, each of which points to one or more data buffers for
196 * the device to read from or fill. Driver and device exchange status of each
197 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
198 * entries in each circular buffer, to protect against confusing empty and full
201 * The device reads or writes the data in the queues via the device's several
202 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
204 * For Tx queue, there are low mark and high mark limits. If, after queuing
205 * the packet for Tx, free space become < low mark, Tx queue stopped. When
206 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
209 * See more detailed info in iwl-4965-hw.h.
210 ***************************************************/
212 int iwl_queue_space(const struct iwl_queue
*q
)
214 int s
= q
->read_ptr
- q
->write_ptr
;
216 if (q
->read_ptr
> q
->write_ptr
)
221 /* keep some reserve to not confuse empty and full situations */
227 EXPORT_SYMBOL(iwl_queue_space
);
231 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
233 static int iwl_queue_init(struct iwl_priv
*priv
, struct iwl_queue
*q
,
234 int count
, int slots_num
, u32 id
)
237 q
->n_window
= slots_num
;
240 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
241 * and iwl_queue_dec_wrap are broken. */
242 BUG_ON(!is_power_of_2(count
));
244 /* slots_num must be power-of-two size, otherwise
245 * get_cmd_index is broken. */
246 BUG_ON(!is_power_of_2(slots_num
));
248 q
->low_mark
= q
->n_window
/ 4;
252 q
->high_mark
= q
->n_window
/ 8;
253 if (q
->high_mark
< 2)
256 q
->write_ptr
= q
->read_ptr
= 0;
257 q
->last_read_ptr
= 0;
258 q
->repeat_same_read_ptr
= 0;
264 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
266 static int iwl_tx_queue_alloc(struct iwl_priv
*priv
,
267 struct iwl_tx_queue
*txq
, u32 id
)
269 struct device
*dev
= &priv
->pci_dev
->dev
;
270 size_t tfd_sz
= priv
->hw_params
.tfd_size
* TFD_QUEUE_SIZE_MAX
;
272 /* Driver private data, only for Tx (not command) queues,
273 * not shared with device. */
274 if (id
!= priv
->cmd_queue
) {
275 txq
->txb
= kzalloc(sizeof(txq
->txb
[0]) *
276 TFD_QUEUE_SIZE_MAX
, GFP_KERNEL
);
278 IWL_ERR(priv
, "kmalloc for auxiliary BD "
279 "structures failed\n");
286 /* Circular buffer of transmit frame descriptors (TFDs),
287 * shared with device */
288 txq
->tfds
= dma_alloc_coherent(dev
, tfd_sz
, &txq
->q
.dma_addr
,
291 IWL_ERR(priv
, "pci_alloc_consistent(%zd) failed\n", tfd_sz
);
306 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
308 int iwl_tx_queue_init(struct iwl_priv
*priv
, struct iwl_tx_queue
*txq
,
309 int slots_num
, u32 txq_id
)
313 int actual_slots
= slots_num
;
316 * Alloc buffer array for commands (Tx or other types of commands).
317 * For the command queue (#4/#9), allocate command space + one big
318 * command for scan, since scan command is very huge; the system will
319 * not have two scans at the same time, so only one is needed.
320 * For normal Tx queues (all other queues), no super-size command
323 if (txq_id
== priv
->cmd_queue
)
326 txq
->meta
= kzalloc(sizeof(struct iwl_cmd_meta
) * actual_slots
,
328 txq
->cmd
= kzalloc(sizeof(struct iwl_device_cmd
*) * actual_slots
,
331 if (!txq
->meta
|| !txq
->cmd
)
332 goto out_free_arrays
;
334 len
= sizeof(struct iwl_device_cmd
);
335 for (i
= 0; i
< actual_slots
; i
++) {
336 /* only happens for cmd queue */
338 len
= IWL_MAX_CMD_SIZE
;
340 txq
->cmd
[i
] = kmalloc(len
, GFP_KERNEL
);
345 /* Alloc driver data array and TFD circular buffer */
346 ret
= iwl_tx_queue_alloc(priv
, txq
, txq_id
);
350 txq
->need_update
= 0;
353 * Aggregation TX queues will get their ID when aggregation begins;
354 * they overwrite the setting done here. The command FIFO doesn't
355 * need an swq_id so don't set one to catch errors, all others can
356 * be set up to the identity mapping.
358 if (txq_id
!= priv
->cmd_queue
)
359 txq
->swq_id
= txq_id
;
361 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
362 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
363 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX
& (TFD_QUEUE_SIZE_MAX
- 1));
365 /* Initialize queue's high/low-water marks, and head/tail indexes */
366 iwl_queue_init(priv
, &txq
->q
, TFD_QUEUE_SIZE_MAX
, slots_num
, txq_id
);
368 /* Tell device where to find queue */
369 priv
->cfg
->ops
->lib
->txq_init(priv
, txq
);
373 for (i
= 0; i
< actual_slots
; i
++)
381 EXPORT_SYMBOL(iwl_tx_queue_init
);
383 void iwl_tx_queue_reset(struct iwl_priv
*priv
, struct iwl_tx_queue
*txq
,
384 int slots_num
, u32 txq_id
)
386 int actual_slots
= slots_num
;
388 if (txq_id
== priv
->cmd_queue
)
391 memset(txq
->meta
, 0, sizeof(struct iwl_cmd_meta
) * actual_slots
);
393 txq
->need_update
= 0;
395 /* Initialize queue's high/low-water marks, and head/tail indexes */
396 iwl_queue_init(priv
, &txq
->q
, TFD_QUEUE_SIZE_MAX
, slots_num
, txq_id
);
398 /* Tell device where to find queue */
399 priv
->cfg
->ops
->lib
->txq_init(priv
, txq
);
401 EXPORT_SYMBOL(iwl_tx_queue_reset
);
403 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
406 * iwl_enqueue_hcmd - enqueue a uCode command
407 * @priv: device private data point
408 * @cmd: a point to the ucode command structure
410 * The function returns < 0 values to indicate the operation is
411 * failed. On success, it turns the index (> 0) of command in the
414 int iwl_enqueue_hcmd(struct iwl_priv
*priv
, struct iwl_host_cmd
*cmd
)
416 struct iwl_tx_queue
*txq
= &priv
->txq
[priv
->cmd_queue
];
417 struct iwl_queue
*q
= &txq
->q
;
418 struct iwl_device_cmd
*out_cmd
;
419 struct iwl_cmd_meta
*out_meta
;
420 dma_addr_t phys_addr
;
425 bool is_ct_kill
= false;
427 cmd
->len
= priv
->cfg
->ops
->utils
->get_hcmd_size(cmd
->id
, cmd
->len
);
428 fix_size
= (u16
)(cmd
->len
+ sizeof(out_cmd
->hdr
));
430 /* If any of the command structures end up being larger than
431 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
432 * we will need to increase the size of the TFD entries
433 * Also, check to see if command buffer should not exceed the size
434 * of device_cmd and max_cmd_size. */
435 BUG_ON((fix_size
> TFD_MAX_PAYLOAD_SIZE
) &&
436 !(cmd
->flags
& CMD_SIZE_HUGE
));
437 BUG_ON(fix_size
> IWL_MAX_CMD_SIZE
);
439 if (iwl_is_rfkill(priv
) || iwl_is_ctkill(priv
)) {
440 IWL_WARN(priv
, "Not sending command - %s KILL\n",
441 iwl_is_rfkill(priv
) ? "RF" : "CT");
445 if (iwl_queue_space(q
) < ((cmd
->flags
& CMD_ASYNC
) ? 2 : 1)) {
446 IWL_ERR(priv
, "No space in command queue\n");
447 if (priv
->cfg
->ops
->lib
->tt_ops
.ct_kill_check
) {
449 priv
->cfg
->ops
->lib
->tt_ops
.ct_kill_check(priv
);
452 IWL_ERR(priv
, "Restarting adapter due to queue full\n");
453 queue_work(priv
->workqueue
, &priv
->restart
);
458 spin_lock_irqsave(&priv
->hcmd_lock
, flags
);
460 /* If this is a huge cmd, mark the huge flag also on the meta.flags
461 * of the _original_ cmd. This is used for DMA mapping clean up.
463 if (cmd
->flags
& CMD_SIZE_HUGE
) {
464 idx
= get_cmd_index(q
, q
->write_ptr
, 0);
465 txq
->meta
[idx
].flags
= CMD_SIZE_HUGE
;
468 idx
= get_cmd_index(q
, q
->write_ptr
, cmd
->flags
& CMD_SIZE_HUGE
);
469 out_cmd
= txq
->cmd
[idx
];
470 out_meta
= &txq
->meta
[idx
];
472 memset(out_meta
, 0, sizeof(*out_meta
)); /* re-initialize to NULL */
473 out_meta
->flags
= cmd
->flags
;
474 if (cmd
->flags
& CMD_WANT_SKB
)
475 out_meta
->source
= cmd
;
476 if (cmd
->flags
& CMD_ASYNC
)
477 out_meta
->callback
= cmd
->callback
;
479 out_cmd
->hdr
.cmd
= cmd
->id
;
480 memcpy(&out_cmd
->cmd
.payload
, cmd
->data
, cmd
->len
);
482 /* At this point, the out_cmd now has all of the incoming cmd
485 out_cmd
->hdr
.flags
= 0;
486 out_cmd
->hdr
.sequence
= cpu_to_le16(QUEUE_TO_SEQ(priv
->cmd_queue
) |
487 INDEX_TO_SEQ(q
->write_ptr
));
488 if (cmd
->flags
& CMD_SIZE_HUGE
)
489 out_cmd
->hdr
.sequence
|= SEQ_HUGE_FRAME
;
490 len
= sizeof(struct iwl_device_cmd
);
491 if (idx
== TFD_CMD_SLOTS
)
492 len
= IWL_MAX_CMD_SIZE
;
494 #ifdef CONFIG_IWLWIFI_DEBUG
495 switch (out_cmd
->hdr
.cmd
) {
496 case REPLY_TX_LINK_QUALITY_CMD
:
497 case SENSITIVITY_CMD
:
498 IWL_DEBUG_HC_DUMP(priv
, "Sending command %s (#%x), seq: 0x%04X, "
499 "%d bytes at %d[%d]:%d\n",
500 get_cmd_string(out_cmd
->hdr
.cmd
),
502 le16_to_cpu(out_cmd
->hdr
.sequence
), fix_size
,
503 q
->write_ptr
, idx
, priv
->cmd_queue
);
506 IWL_DEBUG_HC(priv
, "Sending command %s (#%x), seq: 0x%04X, "
507 "%d bytes at %d[%d]:%d\n",
508 get_cmd_string(out_cmd
->hdr
.cmd
),
510 le16_to_cpu(out_cmd
->hdr
.sequence
), fix_size
,
511 q
->write_ptr
, idx
, priv
->cmd_queue
);
514 txq
->need_update
= 1;
516 if (priv
->cfg
->ops
->lib
->txq_update_byte_cnt_tbl
)
517 /* Set up entry in queue's byte count circular buffer */
518 priv
->cfg
->ops
->lib
->txq_update_byte_cnt_tbl(priv
, txq
, 0);
520 phys_addr
= pci_map_single(priv
->pci_dev
, &out_cmd
->hdr
,
521 fix_size
, PCI_DMA_BIDIRECTIONAL
);
522 dma_unmap_addr_set(out_meta
, mapping
, phys_addr
);
523 dma_unmap_len_set(out_meta
, len
, fix_size
);
525 trace_iwlwifi_dev_hcmd(priv
, &out_cmd
->hdr
, fix_size
, cmd
->flags
);
527 priv
->cfg
->ops
->lib
->txq_attach_buf_to_tfd(priv
, txq
,
528 phys_addr
, fix_size
, 1,
531 /* Increment and update queue's write index */
532 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
533 iwl_txq_update_write_ptr(priv
, txq
);
535 spin_unlock_irqrestore(&priv
->hcmd_lock
, flags
);
540 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
542 * When FW advances 'R' index, all entries between old and new 'R' index
543 * need to be reclaimed. As result, some free space forms. If there is
544 * enough free space (> low mark), wake the stack that feeds us.
546 static void iwl_hcmd_queue_reclaim(struct iwl_priv
*priv
, int txq_id
,
547 int idx
, int cmd_idx
)
549 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
550 struct iwl_queue
*q
= &txq
->q
;
553 if ((idx
>= q
->n_bd
) || (iwl_queue_used(q
, idx
) == 0)) {
554 IWL_ERR(priv
, "Read index for DMA queue txq id (%d), index %d, "
555 "is out of range [0-%d] %d %d.\n", txq_id
,
556 idx
, q
->n_bd
, q
->write_ptr
, q
->read_ptr
);
560 for (idx
= iwl_queue_inc_wrap(idx
, q
->n_bd
); q
->read_ptr
!= idx
;
561 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
564 IWL_ERR(priv
, "HCMD skipped: index (%d) %d %d\n", idx
,
565 q
->write_ptr
, q
->read_ptr
);
566 queue_work(priv
->workqueue
, &priv
->restart
);
573 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
574 * @rxb: Rx buffer to reclaim
576 * If an Rx buffer has an async callback associated with it the callback
577 * will be executed. The attached skb (if present) will only be freed
578 * if the callback returns 1
580 void iwl_tx_cmd_complete(struct iwl_priv
*priv
, struct iwl_rx_mem_buffer
*rxb
)
582 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
583 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
584 int txq_id
= SEQ_TO_QUEUE(sequence
);
585 int index
= SEQ_TO_INDEX(sequence
);
587 bool huge
= !!(pkt
->hdr
.sequence
& SEQ_HUGE_FRAME
);
588 struct iwl_device_cmd
*cmd
;
589 struct iwl_cmd_meta
*meta
;
590 struct iwl_tx_queue
*txq
= &priv
->txq
[priv
->cmd_queue
];
592 /* If a Tx command is being handled and it isn't in the actual
593 * command queue then there a command routing bug has been introduced
594 * in the queue management code. */
595 if (WARN(txq_id
!= priv
->cmd_queue
,
596 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
597 txq_id
, priv
->cmd_queue
, sequence
,
598 priv
->txq
[priv
->cmd_queue
].q
.read_ptr
,
599 priv
->txq
[priv
->cmd_queue
].q
.write_ptr
)) {
600 iwl_print_hex_error(priv
, pkt
, 32);
604 /* If this is a huge cmd, clear the huge flag on the meta.flags
605 * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap
606 * the DMA buffer for the scan (huge) command.
609 cmd_index
= get_cmd_index(&txq
->q
, index
, 0);
610 txq
->meta
[cmd_index
].flags
= 0;
612 cmd_index
= get_cmd_index(&txq
->q
, index
, huge
);
613 cmd
= txq
->cmd
[cmd_index
];
614 meta
= &txq
->meta
[cmd_index
];
616 pci_unmap_single(priv
->pci_dev
,
617 dma_unmap_addr(meta
, mapping
),
618 dma_unmap_len(meta
, len
),
619 PCI_DMA_BIDIRECTIONAL
);
621 /* Input error checking is done when commands are added to queue. */
622 if (meta
->flags
& CMD_WANT_SKB
) {
623 meta
->source
->reply_page
= (unsigned long)rxb_addr(rxb
);
625 } else if (meta
->callback
)
626 meta
->callback(priv
, cmd
, pkt
);
628 iwl_hcmd_queue_reclaim(priv
, txq_id
, index
, cmd_index
);
630 if (!(meta
->flags
& CMD_ASYNC
)) {
631 clear_bit(STATUS_HCMD_ACTIVE
, &priv
->status
);
632 IWL_DEBUG_INFO(priv
, "Clearing HCMD_ACTIVE for command %s\n",
633 get_cmd_string(cmd
->hdr
.cmd
));
634 wake_up_interruptible(&priv
->wait_command_queue
);
638 EXPORT_SYMBOL(iwl_tx_cmd_complete
);
640 #ifdef CONFIG_IWLWIFI_DEBUG
641 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
642 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
644 const char *iwl_get_tx_fail_reason(u32 status
)
646 switch (status
& TX_STATUS_MSK
) {
647 case TX_STATUS_SUCCESS
:
649 TX_STATUS_POSTPONE(DELAY
);
650 TX_STATUS_POSTPONE(FEW_BYTES
);
651 TX_STATUS_POSTPONE(BT_PRIO
);
652 TX_STATUS_POSTPONE(QUIET_PERIOD
);
653 TX_STATUS_POSTPONE(CALC_TTAK
);
654 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY
);
655 TX_STATUS_FAIL(SHORT_LIMIT
);
656 TX_STATUS_FAIL(LONG_LIMIT
);
657 TX_STATUS_FAIL(FIFO_UNDERRUN
);
658 TX_STATUS_FAIL(DRAIN_FLOW
);
659 TX_STATUS_FAIL(RFKILL_FLUSH
);
660 TX_STATUS_FAIL(LIFE_EXPIRE
);
661 TX_STATUS_FAIL(DEST_PS
);
662 TX_STATUS_FAIL(HOST_ABORTED
);
663 TX_STATUS_FAIL(BT_RETRY
);
664 TX_STATUS_FAIL(STA_INVALID
);
665 TX_STATUS_FAIL(FRAG_DROPPED
);
666 TX_STATUS_FAIL(TID_DISABLE
);
667 TX_STATUS_FAIL(FIFO_FLUSHED
);
668 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL
);
669 TX_STATUS_FAIL(PASSIVE_NO_RX
);
670 TX_STATUS_FAIL(NO_BEACON_ON_RADAR
);
675 EXPORT_SYMBOL(iwl_get_tx_fail_reason
);
676 #endif /* CONFIG_IWLWIFI_DEBUG */