1 /******************************************************************************
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/etherdevice.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <net/mac80211.h>
34 #include "iwl-eeprom.h"
40 #include "iwl-helpers.h"
43 * iwl_txq_update_write_ptr - Send new write index to hardware
45 void iwl_txq_update_write_ptr(struct iwl_priv
*priv
, struct iwl_tx_queue
*txq
)
48 int txq_id
= txq
->q
.id
;
50 if (txq
->need_update
== 0)
53 if (priv
->cfg
->base_params
->shadow_reg_enable
) {
54 /* shadow register enabled */
55 iwl_write32(priv
, HBUS_TARG_WRPTR
,
56 txq
->q
.write_ptr
| (txq_id
<< 8));
58 /* if we're trying to save power */
59 if (test_bit(STATUS_POWER_PMI
, &priv
->status
)) {
60 /* wake up nic if it's powered down ...
61 * uCode will wake up, and interrupt us again, so next
62 * time we'll skip this part. */
63 reg
= iwl_read32(priv
, CSR_UCODE_DRV_GP1
);
65 if (reg
& CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP
) {
67 "Tx queue %d requesting wakeup,"
68 " GP1 = 0x%x\n", txq_id
, reg
);
69 iwl_set_bit(priv
, CSR_GP_CNTRL
,
70 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
74 iwl_write_direct32(priv
, HBUS_TARG_WRPTR
,
75 txq
->q
.write_ptr
| (txq_id
<< 8));
78 * else not in power-save mode,
79 * uCode will never sleep when we're
80 * trying to tx (during RFKILL, we're not trying to tx).
83 iwl_write32(priv
, HBUS_TARG_WRPTR
,
84 txq
->q
.write_ptr
| (txq_id
<< 8));
89 static inline dma_addr_t
iwl_tfd_tb_get_addr(struct iwl_tfd
*tfd
, u8 idx
)
91 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
93 dma_addr_t addr
= get_unaligned_le32(&tb
->lo
);
94 if (sizeof(dma_addr_t
) > sizeof(u32
))
96 ((dma_addr_t
)(le16_to_cpu(tb
->hi_n_len
) & 0xF) << 16) << 16;
101 static inline u16
iwl_tfd_tb_get_len(struct iwl_tfd
*tfd
, u8 idx
)
103 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
105 return le16_to_cpu(tb
->hi_n_len
) >> 4;
108 static inline void iwl_tfd_set_tb(struct iwl_tfd
*tfd
, u8 idx
,
109 dma_addr_t addr
, u16 len
)
111 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
112 u16 hi_n_len
= len
<< 4;
114 put_unaligned_le32(addr
, &tb
->lo
);
115 if (sizeof(dma_addr_t
) > sizeof(u32
))
116 hi_n_len
|= ((addr
>> 16) >> 16) & 0xF;
118 tb
->hi_n_len
= cpu_to_le16(hi_n_len
);
120 tfd
->num_tbs
= idx
+ 1;
123 static inline u8
iwl_tfd_get_num_tbs(struct iwl_tfd
*tfd
)
125 return tfd
->num_tbs
& 0x1f;
128 static void iwlagn_unmap_tfd(struct iwl_priv
*priv
, struct iwl_cmd_meta
*meta
,
129 struct iwl_tfd
*tfd
, int dma_dir
)
131 struct pci_dev
*dev
= priv
->pci_dev
;
135 /* Sanity check on number of chunks */
136 num_tbs
= iwl_tfd_get_num_tbs(tfd
);
138 if (num_tbs
>= IWL_NUM_OF_TBS
) {
139 IWL_ERR(priv
, "Too many chunks: %i\n", num_tbs
);
140 /* @todo issue fatal error, it is quite serious situation */
146 pci_unmap_single(dev
,
147 dma_unmap_addr(meta
, mapping
),
148 dma_unmap_len(meta
, len
),
149 PCI_DMA_BIDIRECTIONAL
);
151 /* Unmap chunks, if any. */
152 for (i
= 1; i
< num_tbs
; i
++)
153 pci_unmap_single(dev
, iwl_tfd_tb_get_addr(tfd
, i
),
154 iwl_tfd_tb_get_len(tfd
, i
), dma_dir
);
158 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
159 * @priv - driver private data
162 * Does NOT advance any TFD circular buffer read/write indexes
163 * Does NOT free the TFD itself (which is within circular buffer)
165 void iwlagn_txq_free_tfd(struct iwl_priv
*priv
, struct iwl_tx_queue
*txq
)
167 struct iwl_tfd
*tfd_tmp
= txq
->tfds
;
168 int index
= txq
->q
.read_ptr
;
170 iwlagn_unmap_tfd(priv
, &txq
->meta
[index
], &tfd_tmp
[index
],
177 skb
= txq
->txb
[txq
->q
.read_ptr
].skb
;
179 /* can be called from irqs-disabled context */
181 dev_kfree_skb_any(skb
);
182 txq
->txb
[txq
->q
.read_ptr
].skb
= NULL
;
187 int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv
*priv
,
188 struct iwl_tx_queue
*txq
,
189 dma_addr_t addr
, u16 len
,
193 struct iwl_tfd
*tfd
, *tfd_tmp
;
198 tfd
= &tfd_tmp
[q
->write_ptr
];
201 memset(tfd
, 0, sizeof(*tfd
));
203 num_tbs
= iwl_tfd_get_num_tbs(tfd
);
205 /* Each TFD can point to a maximum 20 Tx buffers */
206 if (num_tbs
>= IWL_NUM_OF_TBS
) {
207 IWL_ERR(priv
, "Error can not send more than %d chunks\n",
212 if (WARN_ON(addr
& ~DMA_BIT_MASK(36)))
215 if (unlikely(addr
& ~IWL_TX_DMA_MASK
))
216 IWL_ERR(priv
, "Unaligned address = %llx\n",
217 (unsigned long long)addr
);
219 iwl_tfd_set_tb(tfd
, num_tbs
, addr
, len
);
225 * Tell nic where to find circular buffer of Tx Frame Descriptors for
226 * given Tx queue, and enable the DMA channel used for that queue.
228 * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
229 * channels supported in hardware.
231 static int iwlagn_tx_queue_init(struct iwl_priv
*priv
, struct iwl_tx_queue
*txq
)
233 int txq_id
= txq
->q
.id
;
235 /* Circular buffer (TFD queue in DRAM) physical base address */
236 iwl_write_direct32(priv
, FH_MEM_CBBC_QUEUE(txq_id
),
237 txq
->q
.dma_addr
>> 8);
243 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
245 void iwl_tx_queue_unmap(struct iwl_priv
*priv
, int txq_id
)
247 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
248 struct iwl_queue
*q
= &txq
->q
;
253 while (q
->write_ptr
!= q
->read_ptr
) {
254 iwlagn_txq_free_tfd(priv
, txq
);
255 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
);
260 * iwl_tx_queue_free - Deallocate DMA queue.
261 * @txq: Transmit queue to deallocate.
263 * Empty queue by removing and destroying all BD's.
265 * 0-fill, but do not free "txq" descriptor structure.
267 void iwl_tx_queue_free(struct iwl_priv
*priv
, int txq_id
)
269 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
270 struct device
*dev
= &priv
->pci_dev
->dev
;
273 iwl_tx_queue_unmap(priv
, txq_id
);
275 /* De-alloc array of command/tx buffers */
276 for (i
= 0; i
< TFD_TX_CMD_SLOTS
; i
++)
279 /* De-alloc circular buffer of TFDs */
281 dma_free_coherent(dev
, priv
->hw_params
.tfd_size
*
282 txq
->q
.n_bd
, txq
->tfds
, txq
->q
.dma_addr
);
284 /* De-alloc array of per-TFD driver data */
288 /* deallocate arrays */
294 /* 0-fill queue descriptor structure */
295 memset(txq
, 0, sizeof(*txq
));
299 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
301 void iwl_cmd_queue_unmap(struct iwl_priv
*priv
)
303 struct iwl_tx_queue
*txq
= &priv
->txq
[priv
->cmd_queue
];
304 struct iwl_queue
*q
= &txq
->q
;
310 while (q
->read_ptr
!= q
->write_ptr
) {
311 i
= get_cmd_index(q
, q
->read_ptr
);
313 if (txq
->meta
[i
].flags
& CMD_MAPPED
) {
314 iwlagn_unmap_tfd(priv
, &txq
->meta
[i
], &txq
->tfds
[i
],
315 PCI_DMA_BIDIRECTIONAL
);
316 txq
->meta
[i
].flags
= 0;
319 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
);
324 * iwl_cmd_queue_free - Deallocate DMA queue.
325 * @txq: Transmit queue to deallocate.
327 * Empty queue by removing and destroying all BD's.
329 * 0-fill, but do not free "txq" descriptor structure.
331 void iwl_cmd_queue_free(struct iwl_priv
*priv
)
333 struct iwl_tx_queue
*txq
= &priv
->txq
[priv
->cmd_queue
];
334 struct device
*dev
= &priv
->pci_dev
->dev
;
337 iwl_cmd_queue_unmap(priv
);
339 /* De-alloc array of command/tx buffers */
340 for (i
= 0; i
< TFD_CMD_SLOTS
; i
++)
343 /* De-alloc circular buffer of TFDs */
345 dma_free_coherent(dev
, priv
->hw_params
.tfd_size
* txq
->q
.n_bd
,
346 txq
->tfds
, txq
->q
.dma_addr
);
348 /* deallocate arrays */
354 /* 0-fill queue descriptor structure */
355 memset(txq
, 0, sizeof(*txq
));
358 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
361 * Theory of operation
363 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
364 * of buffer descriptors, each of which points to one or more data buffers for
365 * the device to read from or fill. Driver and device exchange status of each
366 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
367 * entries in each circular buffer, to protect against confusing empty and full
370 * The device reads or writes the data in the queues via the device's several
371 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
373 * For Tx queue, there are low mark and high mark limits. If, after queuing
374 * the packet for Tx, free space become < low mark, Tx queue stopped. When
375 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
378 ***************************************************/
380 int iwl_queue_space(const struct iwl_queue
*q
)
382 int s
= q
->read_ptr
- q
->write_ptr
;
384 if (q
->read_ptr
> q
->write_ptr
)
389 /* keep some reserve to not confuse empty and full situations */
398 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
400 static int iwl_queue_init(struct iwl_priv
*priv
, struct iwl_queue
*q
,
401 int count
, int slots_num
, u32 id
)
404 q
->n_window
= slots_num
;
407 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
408 * and iwl_queue_dec_wrap are broken. */
409 if (WARN_ON(!is_power_of_2(count
)))
412 /* slots_num must be power-of-two size, otherwise
413 * get_cmd_index is broken. */
414 if (WARN_ON(!is_power_of_2(slots_num
)))
417 q
->low_mark
= q
->n_window
/ 4;
421 q
->high_mark
= q
->n_window
/ 8;
422 if (q
->high_mark
< 2)
425 q
->write_ptr
= q
->read_ptr
= 0;
431 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
433 static int iwl_tx_queue_alloc(struct iwl_priv
*priv
,
434 struct iwl_tx_queue
*txq
, u32 id
)
436 struct device
*dev
= &priv
->pci_dev
->dev
;
437 size_t tfd_sz
= priv
->hw_params
.tfd_size
* TFD_QUEUE_SIZE_MAX
;
439 /* Driver private data, only for Tx (not command) queues,
440 * not shared with device. */
441 if (id
!= priv
->cmd_queue
) {
442 txq
->txb
= kzalloc(sizeof(txq
->txb
[0]) *
443 TFD_QUEUE_SIZE_MAX
, GFP_KERNEL
);
445 IWL_ERR(priv
, "kmalloc for auxiliary BD "
446 "structures failed\n");
453 /* Circular buffer of transmit frame descriptors (TFDs),
454 * shared with device */
455 txq
->tfds
= dma_alloc_coherent(dev
, tfd_sz
, &txq
->q
.dma_addr
,
458 IWL_ERR(priv
, "pci_alloc_consistent(%zd) failed\n", tfd_sz
);
473 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
475 int iwl_tx_queue_init(struct iwl_priv
*priv
, struct iwl_tx_queue
*txq
,
476 int slots_num
, u32 txq_id
)
481 txq
->meta
= kzalloc(sizeof(struct iwl_cmd_meta
) * slots_num
,
483 txq
->cmd
= kzalloc(sizeof(struct iwl_device_cmd
*) * slots_num
,
486 if (!txq
->meta
|| !txq
->cmd
)
487 goto out_free_arrays
;
489 len
= sizeof(struct iwl_device_cmd
);
490 for (i
= 0; i
< slots_num
; i
++) {
491 txq
->cmd
[i
] = kmalloc(len
, GFP_KERNEL
);
496 /* Alloc driver data array and TFD circular buffer */
497 ret
= iwl_tx_queue_alloc(priv
, txq
, txq_id
);
501 txq
->need_update
= 0;
504 * For the default queues 0-3, set up the swq_id
505 * already -- all others need to get one later
506 * (if they need one at all).
509 iwl_set_swq_id(txq
, txq_id
, txq_id
);
511 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
512 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
513 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX
& (TFD_QUEUE_SIZE_MAX
- 1));
515 /* Initialize queue's high/low-water marks, and head/tail indexes */
516 ret
= iwl_queue_init(priv
, &txq
->q
, TFD_QUEUE_SIZE_MAX
, slots_num
, txq_id
);
520 /* Tell device where to find queue */
521 iwlagn_tx_queue_init(priv
, txq
);
525 for (i
= 0; i
< slots_num
; i
++)
534 void iwl_tx_queue_reset(struct iwl_priv
*priv
, struct iwl_tx_queue
*txq
,
535 int slots_num
, u32 txq_id
)
537 memset(txq
->meta
, 0, sizeof(struct iwl_cmd_meta
) * slots_num
);
539 txq
->need_update
= 0;
541 /* Initialize queue's high/low-water marks, and head/tail indexes */
542 iwl_queue_init(priv
, &txq
->q
, TFD_QUEUE_SIZE_MAX
, slots_num
, txq_id
);
544 /* Tell device where to find queue */
545 iwlagn_tx_queue_init(priv
, txq
);
548 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
551 * iwl_enqueue_hcmd - enqueue a uCode command
552 * @priv: device private data point
553 * @cmd: a point to the ucode command structure
555 * The function returns < 0 values to indicate the operation is
556 * failed. On success, it turns the index (> 0) of command in the
559 int iwl_enqueue_hcmd(struct iwl_priv
*priv
, struct iwl_host_cmd
*cmd
)
561 struct iwl_tx_queue
*txq
= &priv
->txq
[priv
->cmd_queue
];
562 struct iwl_queue
*q
= &txq
->q
;
563 struct iwl_device_cmd
*out_cmd
;
564 struct iwl_cmd_meta
*out_meta
;
565 dma_addr_t phys_addr
;
568 u16 copy_size
, cmd_size
;
569 bool is_ct_kill
= false;
570 bool had_nocopy
= false;
573 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
574 const void *trace_bufs
[IWL_MAX_CMD_TFDS
+ 1] = {};
575 int trace_lens
[IWL_MAX_CMD_TFDS
+ 1] = {};
579 if (test_bit(STATUS_FW_ERROR
, &priv
->status
)) {
580 IWL_WARN(priv
, "fw recovery, no hcmd send\n");
584 copy_size
= sizeof(out_cmd
->hdr
);
585 cmd_size
= sizeof(out_cmd
->hdr
);
587 /* need one for the header if the first is NOCOPY */
588 BUILD_BUG_ON(IWL_MAX_CMD_TFDS
> IWL_NUM_OF_TBS
- 1);
590 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
593 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
) {
596 /* NOCOPY must not be followed by normal! */
597 if (WARN_ON(had_nocopy
))
599 copy_size
+= cmd
->len
[i
];
601 cmd_size
+= cmd
->len
[i
];
605 * If any of the command structures end up being larger than
606 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
607 * allocated into separate TFDs, then we will need to
608 * increase the size of the buffers.
610 if (WARN_ON(copy_size
> TFD_MAX_PAYLOAD_SIZE
))
613 if (iwl_is_rfkill(priv
) || iwl_is_ctkill(priv
)) {
614 IWL_WARN(priv
, "Not sending command - %s KILL\n",
615 iwl_is_rfkill(priv
) ? "RF" : "CT");
619 spin_lock_irqsave(&priv
->hcmd_lock
, flags
);
621 if (iwl_queue_space(q
) < ((cmd
->flags
& CMD_ASYNC
) ? 2 : 1)) {
622 spin_unlock_irqrestore(&priv
->hcmd_lock
, flags
);
624 IWL_ERR(priv
, "No space in command queue\n");
625 is_ct_kill
= iwl_check_for_ct_kill(priv
);
627 IWL_ERR(priv
, "Restarting adapter due to queue full\n");
628 iwlagn_fw_error(priv
, false);
633 idx
= get_cmd_index(q
, q
->write_ptr
);
634 out_cmd
= txq
->cmd
[idx
];
635 out_meta
= &txq
->meta
[idx
];
637 if (WARN_ON(out_meta
->flags
& CMD_MAPPED
)) {
638 spin_unlock_irqrestore(&priv
->hcmd_lock
, flags
);
642 memset(out_meta
, 0, sizeof(*out_meta
)); /* re-initialize to NULL */
643 if (cmd
->flags
& CMD_WANT_SKB
)
644 out_meta
->source
= cmd
;
645 if (cmd
->flags
& CMD_ASYNC
)
646 out_meta
->callback
= cmd
->callback
;
648 /* set up the header */
650 out_cmd
->hdr
.cmd
= cmd
->id
;
651 out_cmd
->hdr
.flags
= 0;
652 out_cmd
->hdr
.sequence
= cpu_to_le16(QUEUE_TO_SEQ(priv
->cmd_queue
) |
653 INDEX_TO_SEQ(q
->write_ptr
));
655 /* and copy the data that needs to be copied */
657 cmd_dest
= &out_cmd
->cmd
.payload
[0];
658 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
661 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
)
663 memcpy(cmd_dest
, cmd
->data
[i
], cmd
->len
[i
]);
664 cmd_dest
+= cmd
->len
[i
];
667 IWL_DEBUG_HC(priv
, "Sending command %s (#%x), seq: 0x%04X, "
668 "%d bytes at %d[%d]:%d\n",
669 get_cmd_string(out_cmd
->hdr
.cmd
),
671 le16_to_cpu(out_cmd
->hdr
.sequence
), cmd_size
,
672 q
->write_ptr
, idx
, priv
->cmd_queue
);
674 phys_addr
= pci_map_single(priv
->pci_dev
, &out_cmd
->hdr
,
675 copy_size
, PCI_DMA_BIDIRECTIONAL
);
676 if (unlikely(pci_dma_mapping_error(priv
->pci_dev
, phys_addr
))) {
681 dma_unmap_addr_set(out_meta
, mapping
, phys_addr
);
682 dma_unmap_len_set(out_meta
, len
, copy_size
);
684 iwlagn_txq_attach_buf_to_tfd(priv
, txq
, phys_addr
, copy_size
, 1);
685 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
686 trace_bufs
[0] = &out_cmd
->hdr
;
687 trace_lens
[0] = copy_size
;
691 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
694 if (!(cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
))
696 phys_addr
= pci_map_single(priv
->pci_dev
, (void *)cmd
->data
[i
],
697 cmd
->len
[i
], PCI_DMA_BIDIRECTIONAL
);
698 if (pci_dma_mapping_error(priv
->pci_dev
, phys_addr
)) {
699 iwlagn_unmap_tfd(priv
, out_meta
,
700 &txq
->tfds
[q
->write_ptr
],
701 PCI_DMA_BIDIRECTIONAL
);
706 iwlagn_txq_attach_buf_to_tfd(priv
, txq
, phys_addr
,
708 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
709 trace_bufs
[trace_idx
] = cmd
->data
[i
];
710 trace_lens
[trace_idx
] = cmd
->len
[i
];
715 out_meta
->flags
= cmd
->flags
| CMD_MAPPED
;
717 txq
->need_update
= 1;
719 /* check that tracing gets all possible blocks */
720 BUILD_BUG_ON(IWL_MAX_CMD_TFDS
+ 1 != 3);
721 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
722 trace_iwlwifi_dev_hcmd(priv
, cmd
->flags
,
723 trace_bufs
[0], trace_lens
[0],
724 trace_bufs
[1], trace_lens
[1],
725 trace_bufs
[2], trace_lens
[2]);
728 /* Increment and update queue's write index */
729 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
730 iwl_txq_update_write_ptr(priv
, txq
);
733 spin_unlock_irqrestore(&priv
->hcmd_lock
, flags
);
738 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
740 * When FW advances 'R' index, all entries between old and new 'R' index
741 * need to be reclaimed. As result, some free space forms. If there is
742 * enough free space (> low mark), wake the stack that feeds us.
744 static void iwl_hcmd_queue_reclaim(struct iwl_priv
*priv
, int txq_id
, int idx
)
746 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
747 struct iwl_queue
*q
= &txq
->q
;
750 if ((idx
>= q
->n_bd
) || (iwl_queue_used(q
, idx
) == 0)) {
751 IWL_ERR(priv
, "Read index for DMA queue txq id (%d), index %d, "
752 "is out of range [0-%d] %d %d.\n", txq_id
,
753 idx
, q
->n_bd
, q
->write_ptr
, q
->read_ptr
);
757 for (idx
= iwl_queue_inc_wrap(idx
, q
->n_bd
); q
->read_ptr
!= idx
;
758 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
761 IWL_ERR(priv
, "HCMD skipped: index (%d) %d %d\n", idx
,
762 q
->write_ptr
, q
->read_ptr
);
763 iwlagn_fw_error(priv
, false);
770 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
771 * @rxb: Rx buffer to reclaim
773 * If an Rx buffer has an async callback associated with it the callback
774 * will be executed. The attached skb (if present) will only be freed
775 * if the callback returns 1
777 void iwl_tx_cmd_complete(struct iwl_priv
*priv
, struct iwl_rx_mem_buffer
*rxb
)
779 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
780 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
781 int txq_id
= SEQ_TO_QUEUE(sequence
);
782 int index
= SEQ_TO_INDEX(sequence
);
784 struct iwl_device_cmd
*cmd
;
785 struct iwl_cmd_meta
*meta
;
786 struct iwl_tx_queue
*txq
= &priv
->txq
[priv
->cmd_queue
];
789 /* If a Tx command is being handled and it isn't in the actual
790 * command queue then there a command routing bug has been introduced
791 * in the queue management code. */
792 if (WARN(txq_id
!= priv
->cmd_queue
,
793 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
794 txq_id
, priv
->cmd_queue
, sequence
,
795 priv
->txq
[priv
->cmd_queue
].q
.read_ptr
,
796 priv
->txq
[priv
->cmd_queue
].q
.write_ptr
)) {
797 iwl_print_hex_error(priv
, pkt
, 32);
801 cmd_index
= get_cmd_index(&txq
->q
, index
);
802 cmd
= txq
->cmd
[cmd_index
];
803 meta
= &txq
->meta
[cmd_index
];
805 iwlagn_unmap_tfd(priv
, meta
, &txq
->tfds
[index
], PCI_DMA_BIDIRECTIONAL
);
807 /* Input error checking is done when commands are added to queue. */
808 if (meta
->flags
& CMD_WANT_SKB
) {
809 meta
->source
->reply_page
= (unsigned long)rxb_addr(rxb
);
811 } else if (meta
->callback
)
812 meta
->callback(priv
, cmd
, pkt
);
814 spin_lock_irqsave(&priv
->hcmd_lock
, flags
);
816 iwl_hcmd_queue_reclaim(priv
, txq_id
, index
);
818 if (!(meta
->flags
& CMD_ASYNC
)) {
819 clear_bit(STATUS_HCMD_ACTIVE
, &priv
->status
);
820 IWL_DEBUG_INFO(priv
, "Clearing HCMD_ACTIVE for command %s\n",
821 get_cmd_string(cmd
->hdr
.cmd
));
822 wake_up_interruptible(&priv
->wait_command_queue
);
825 /* Mark as unmapped */
828 spin_unlock_irqrestore(&priv
->hcmd_lock
, flags
);