ath6kl: add a timeout to ath6kl_hif_intr_bh_handler()
[linux-2.6.git] / drivers / net / wireless / ath / ath6kl / hif.c
blob309be9823e02a7f7bb1f572fcd398b312147db06
1 /*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include "hif.h"
18 #include "core.h"
19 #include "target.h"
20 #include "hif-ops.h"
21 #include "debug.h"
23 #define MAILBOX_FOR_BLOCK_SIZE 1
25 #define ATH6KL_TIME_QUANTUM 10 /* in ms */
27 static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
28 bool from_dma)
30 u8 *buf;
31 int i;
33 buf = req->virt_dma_buf;
35 for (i = 0; i < req->scat_entries; i++) {
37 if (from_dma)
38 memcpy(req->scat_list[i].buf, buf,
39 req->scat_list[i].len);
40 else
41 memcpy(buf, req->scat_list[i].buf,
42 req->scat_list[i].len);
44 buf += req->scat_list[i].len;
47 return 0;
50 int ath6kl_hif_rw_comp_handler(void *context, int status)
52 struct htc_packet *packet = context;
54 ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n",
55 packet, status);
57 packet->status = status;
58 packet->completion(packet->context, packet);
60 return 0;
63 static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
65 u32 dummy;
66 int status;
68 ath6kl_err("target debug interrupt\n");
70 ath6kl_target_failure(dev->ar);
73 * read counter to clear the interrupt, the debug error interrupt is
74 * counter 0.
76 status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
77 (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
78 if (status)
79 WARN_ON(1);
81 return status;
84 /* mailbox recv message polling */
85 int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
86 int timeout)
88 struct ath6kl_irq_proc_registers *rg;
89 int status = 0, i;
90 u8 htc_mbox = 1 << HTC_MAILBOX;
92 for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) {
93 /* this is the standard HIF way, load the reg table */
94 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
95 (u8 *) &dev->irq_proc_reg,
96 sizeof(dev->irq_proc_reg),
97 HIF_RD_SYNC_BYTE_INC);
99 if (status) {
100 ath6kl_err("failed to read reg table\n");
101 return status;
104 /* check for MBOX data and valid lookahead */
105 if (dev->irq_proc_reg.host_int_status & htc_mbox) {
106 if (dev->irq_proc_reg.rx_lkahd_valid &
107 htc_mbox) {
109 * Mailbox has a message and the look ahead
110 * is valid.
112 rg = &dev->irq_proc_reg;
113 *lk_ahd =
114 le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
115 break;
119 /* delay a little */
120 mdelay(ATH6KL_TIME_QUANTUM);
121 ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i);
124 if (i == 0) {
125 ath6kl_err("timeout waiting for recv message\n");
126 status = -ETIME;
127 /* check if the target asserted */
128 if (dev->irq_proc_reg.counter_int_status &
129 ATH6KL_TARGET_DEBUG_INTR_MASK)
131 * Target failure handler will be called in case of
132 * an assert.
134 ath6kl_hif_proc_dbg_intr(dev);
137 return status;
141 * Disable packet reception (used in case the host runs out of buffers)
142 * using the interrupt enable registers through the host I/F
144 int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx)
146 struct ath6kl_irq_enable_reg regs;
147 int status = 0;
149 ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n",
150 enable_rx ? "enable" : "disable");
152 /* take the lock to protect interrupt enable shadows */
153 spin_lock_bh(&dev->lock);
155 if (enable_rx)
156 dev->irq_en_reg.int_status_en |=
157 SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
158 else
159 dev->irq_en_reg.int_status_en &=
160 ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
162 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
164 spin_unlock_bh(&dev->lock);
166 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
167 &regs.int_status_en,
168 sizeof(struct ath6kl_irq_enable_reg),
169 HIF_WR_SYNC_BYTE_INC);
171 return status;
174 int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
175 struct hif_scatter_req *scat_req, bool read)
177 int status = 0;
179 if (read) {
180 scat_req->req = HIF_RD_SYNC_BLOCK_FIX;
181 scat_req->addr = dev->ar->mbox_info.htc_addr;
182 } else {
183 scat_req->req = HIF_WR_ASYNC_BLOCK_INC;
185 scat_req->addr =
186 (scat_req->len > HIF_MBOX_WIDTH) ?
187 dev->ar->mbox_info.htc_ext_addr :
188 dev->ar->mbox_info.htc_addr;
191 ath6kl_dbg(ATH6KL_DBG_HIF,
192 "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n",
193 scat_req->scat_entries, scat_req->len,
194 scat_req->addr, !read ? "async" : "sync",
195 (read) ? "rd" : "wr");
197 if (!read && scat_req->virt_scat) {
198 status = ath6kl_hif_cp_scat_dma_buf(scat_req, false);
199 if (status) {
200 scat_req->status = status;
201 scat_req->complete(dev->ar->htc_target, scat_req);
202 return 0;
206 status = ath6kl_hif_scat_req_rw(dev->ar, scat_req);
208 if (read) {
209 /* in sync mode, we can touch the scatter request */
210 scat_req->status = status;
211 if (!status && scat_req->virt_scat)
212 scat_req->status =
213 ath6kl_hif_cp_scat_dma_buf(scat_req, true);
216 return status;
219 static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev)
221 u8 counter_int_status;
223 ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n");
225 counter_int_status = dev->irq_proc_reg.counter_int_status &
226 dev->irq_en_reg.cntr_int_status_en;
228 ath6kl_dbg(ATH6KL_DBG_IRQ,
229 "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
230 counter_int_status);
233 * NOTE: other modules like GMBOX may use the counter interrupt for
234 * credit flow control on other counters, we only need to check for
235 * the debug assertion counter interrupt.
237 if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
238 return ath6kl_hif_proc_dbg_intr(dev);
240 return 0;
243 static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev)
245 int status;
246 u8 error_int_status;
247 u8 reg_buf[4];
249 ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n");
251 error_int_status = dev->irq_proc_reg.error_int_status & 0x0F;
252 if (!error_int_status) {
253 WARN_ON(1);
254 return -EIO;
257 ath6kl_dbg(ATH6KL_DBG_IRQ,
258 "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
259 error_int_status);
261 if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status))
262 ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n");
264 if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status))
265 ath6kl_err("rx underflow\n");
267 if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status))
268 ath6kl_err("tx overflow\n");
270 /* Clear the interrupt */
271 dev->irq_proc_reg.error_int_status &= ~error_int_status;
273 /* set W1C value to clear the interrupt, this hits the register first */
274 reg_buf[0] = error_int_status;
275 reg_buf[1] = 0;
276 reg_buf[2] = 0;
277 reg_buf[3] = 0;
279 status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
280 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
282 if (status)
283 WARN_ON(1);
285 return status;
288 static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
290 int status;
291 u8 cpu_int_status;
292 u8 reg_buf[4];
294 ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n");
296 cpu_int_status = dev->irq_proc_reg.cpu_int_status &
297 dev->irq_en_reg.cpu_int_status_en;
298 if (!cpu_int_status) {
299 WARN_ON(1);
300 return -EIO;
303 ath6kl_dbg(ATH6KL_DBG_IRQ,
304 "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
305 cpu_int_status);
307 /* Clear the interrupt */
308 dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status;
311 * Set up the register transfer buffer to hit the register 4 times ,
312 * this is done to make the access 4-byte aligned to mitigate issues
313 * with host bus interconnects that restrict bus transfer lengths to
314 * be a multiple of 4-bytes.
317 /* set W1C value to clear the interrupt, this hits the register first */
318 reg_buf[0] = cpu_int_status;
319 /* the remaining are set to zero which have no-effect */
320 reg_buf[1] = 0;
321 reg_buf[2] = 0;
322 reg_buf[3] = 0;
324 status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
325 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
327 if (status)
328 WARN_ON(1);
330 return status;
333 /* process pending interrupts synchronously */
334 static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
336 struct ath6kl_irq_proc_registers *rg;
337 int status = 0;
338 u8 host_int_status = 0;
339 u32 lk_ahd = 0;
340 u8 htc_mbox = 1 << HTC_MAILBOX;
342 ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev);
345 * NOTE: HIF implementation guarantees that the context of this
346 * call allows us to perform SYNCHRONOUS I/O, that is we can block,
347 * sleep or call any API that can block or switch thread/task
348 * contexts. This is a fully schedulable context.
352 * Process pending intr only when int_status_en is clear, it may
353 * result in unnecessary bus transaction otherwise. Target may be
354 * unresponsive at the time.
356 if (dev->irq_en_reg.int_status_en) {
358 * Read the first 28 bytes of the HTC register table. This
359 * will yield us the value of different int status
360 * registers and the lookahead registers.
362 * length = sizeof(int_status) + sizeof(cpu_int_status)
363 * + sizeof(error_int_status) +
364 * sizeof(counter_int_status) +
365 * sizeof(mbox_frame) + sizeof(rx_lkahd_valid)
366 * + sizeof(hole) + sizeof(rx_lkahd) +
367 * sizeof(int_status_en) +
368 * sizeof(cpu_int_status_en) +
369 * sizeof(err_int_status_en) +
370 * sizeof(cntr_int_status_en);
372 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
373 (u8 *) &dev->irq_proc_reg,
374 sizeof(dev->irq_proc_reg),
375 HIF_RD_SYNC_BYTE_INC);
376 if (status)
377 goto out;
379 if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ))
380 ath6kl_dump_registers(dev, &dev->irq_proc_reg,
381 &dev->irq_en_reg);
383 /* Update only those registers that are enabled */
384 host_int_status = dev->irq_proc_reg.host_int_status &
385 dev->irq_en_reg.int_status_en;
387 /* Look at mbox status */
388 if (host_int_status & htc_mbox) {
390 * Mask out pending mbox value, we use "lookAhead as
391 * the real flag for mbox processing.
393 host_int_status &= ~htc_mbox;
394 if (dev->irq_proc_reg.rx_lkahd_valid &
395 htc_mbox) {
396 rg = &dev->irq_proc_reg;
397 lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
398 if (!lk_ahd)
399 ath6kl_err("lookAhead is zero!\n");
404 if (!host_int_status && !lk_ahd) {
405 *done = true;
406 goto out;
409 if (lk_ahd) {
410 int fetched = 0;
412 ath6kl_dbg(ATH6KL_DBG_IRQ,
413 "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd);
415 * Mailbox Interrupt, the HTC layer may issue async
416 * requests to empty the mailbox. When emptying the recv
417 * mailbox we use the async handler above called from the
418 * completion routine of the callers read request. This can
419 * improve performance by reducing context switching when
420 * we rapidly pull packets.
422 status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt,
423 lk_ahd, &fetched);
424 if (status)
425 goto out;
427 if (!fetched)
429 * HTC could not pull any messages out due to lack
430 * of resources.
432 dev->htc_cnxt->chk_irq_status_cnt = 0;
435 /* now handle the rest of them */
436 ath6kl_dbg(ATH6KL_DBG_IRQ,
437 "valid interrupt source(s) for other interrupts: 0x%x\n",
438 host_int_status);
440 if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
441 /* CPU Interrupt */
442 status = ath6kl_hif_proc_cpu_intr(dev);
443 if (status)
444 goto out;
447 if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
448 /* Error Interrupt */
449 status = ath6kl_hif_proc_err_intr(dev);
450 if (status)
451 goto out;
454 if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
455 /* Counter Interrupt */
456 status = ath6kl_hif_proc_counter_intr(dev);
458 out:
460 * An optimization to bypass reading the IRQ status registers
461 * unecessarily which can re-wake the target, if upper layers
462 * determine that we are in a low-throughput mode, we can rely on
463 * taking another interrupt rather than re-checking the status
464 * registers which can re-wake the target.
466 * NOTE : for host interfaces that makes use of detecting pending
467 * mbox messages at hif can not use this optimization due to
468 * possible side effects, SPI requires the host to drain all
469 * messages from the mailbox before exiting the ISR routine.
472 ath6kl_dbg(ATH6KL_DBG_IRQ,
473 "bypassing irq status re-check, forcing done\n");
475 if (!dev->htc_cnxt->chk_irq_status_cnt)
476 *done = true;
478 ath6kl_dbg(ATH6KL_DBG_IRQ,
479 "proc_pending_irqs: (done:%d, status=%d\n", *done, status);
481 return status;
484 /* interrupt handler, kicks off all interrupt processing */
485 int ath6kl_hif_intr_bh_handler(struct ath6kl *ar)
487 struct ath6kl_device *dev = ar->htc_target->dev;
488 unsigned long timeout;
489 int status = 0;
490 bool done = false;
493 * Reset counter used to flag a re-scan of IRQ status registers on
494 * the target.
496 dev->htc_cnxt->chk_irq_status_cnt = 0;
499 * IRQ processing is synchronous, interrupt status registers can be
500 * re-read.
502 timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT);
503 while (time_before(jiffies, timeout) && !done) {
504 status = proc_pending_irqs(dev, &done);
505 if (status)
506 break;
509 return status;
512 static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev)
514 struct ath6kl_irq_enable_reg regs;
515 int status;
517 spin_lock_bh(&dev->lock);
519 /* Enable all but ATH6KL CPU interrupts */
520 dev->irq_en_reg.int_status_en =
521 SM(INT_STATUS_ENABLE_ERROR, 0x01) |
522 SM(INT_STATUS_ENABLE_CPU, 0x01) |
523 SM(INT_STATUS_ENABLE_COUNTER, 0x01);
526 * NOTE: There are some cases where HIF can do detection of
527 * pending mbox messages which is disabled now.
529 dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
531 /* Set up the CPU Interrupt status Register */
532 dev->irq_en_reg.cpu_int_status_en = 0;
534 /* Set up the Error Interrupt status Register */
535 dev->irq_en_reg.err_int_status_en =
536 SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) |
537 SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1);
540 * Enable Counter interrupt status register to get fatal errors for
541 * debugging.
543 dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT,
544 ATH6KL_TARGET_DEBUG_INTR_MASK);
545 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
547 spin_unlock_bh(&dev->lock);
549 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
550 &regs.int_status_en, sizeof(regs),
551 HIF_WR_SYNC_BYTE_INC);
553 if (status)
554 ath6kl_err("failed to update interrupt ctl reg err: %d\n",
555 status);
557 return status;
560 int ath6kl_hif_disable_intrs(struct ath6kl_device *dev)
562 struct ath6kl_irq_enable_reg regs;
564 spin_lock_bh(&dev->lock);
565 /* Disable all interrupts */
566 dev->irq_en_reg.int_status_en = 0;
567 dev->irq_en_reg.cpu_int_status_en = 0;
568 dev->irq_en_reg.err_int_status_en = 0;
569 dev->irq_en_reg.cntr_int_status_en = 0;
570 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
571 spin_unlock_bh(&dev->lock);
573 return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
574 &regs.int_status_en, sizeof(regs),
575 HIF_WR_SYNC_BYTE_INC);
578 /* enable device interrupts */
579 int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev)
581 int status = 0;
584 * Make sure interrupt are disabled before unmasking at the HIF
585 * layer. The rationale here is that between device insertion
586 * (where we clear the interrupts the first time) and when HTC
587 * is finally ready to handle interrupts, other software can perform
588 * target "soft" resets. The ATH6KL interrupt enables reset back to an
589 * "enabled" state when this happens.
591 ath6kl_hif_disable_intrs(dev);
593 /* unmask the host controller interrupts */
594 ath6kl_hif_irq_enable(dev->ar);
595 status = ath6kl_hif_enable_intrs(dev);
597 return status;
600 /* disable all device interrupts */
601 int ath6kl_hif_mask_intrs(struct ath6kl_device *dev)
604 * Mask the interrupt at the HIF layer to avoid any stray interrupt
605 * taken while we zero out our shadow registers in
606 * ath6kl_hif_disable_intrs().
608 ath6kl_hif_irq_disable(dev->ar);
610 return ath6kl_hif_disable_intrs(dev);
613 int ath6kl_hif_setup(struct ath6kl_device *dev)
615 int status = 0;
617 spin_lock_init(&dev->lock);
620 * NOTE: we actually get the block size of a mailbox other than 0,
621 * for SDIO the block size on mailbox 0 is artificially set to 1.
622 * So we use the block size that is set for the other 3 mailboxes.
624 dev->htc_cnxt->block_sz = dev->ar->mbox_info.block_size;
626 /* must be a power of 2 */
627 if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) {
628 WARN_ON(1);
629 status = -EINVAL;
630 goto fail_setup;
633 /* assemble mask, used for padding to a block */
634 dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1;
636 ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
637 dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
639 status = ath6kl_hif_disable_intrs(dev);
641 fail_setup:
642 return status;