3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param
;
59 static bool bug_on_recovery
;
60 static bool no_recovery
;
62 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
63 struct ieee80211_vif
*vif
,
64 bool reset_tx_queues
);
65 static void wl1271_op_stop(struct ieee80211_hw
*hw
);
66 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
);
68 static int wl12xx_set_authorized(struct wl1271
*wl
,
69 struct wl12xx_vif
*wlvif
)
73 if (WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
))
82 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
->sta
.hlid
);
86 wl12xx_croc(wl
, wlvif
->role_id
);
88 wl1271_info("Association completed.");
92 static int wl1271_reg_notify(struct wiphy
*wiphy
,
93 struct regulatory_request
*request
)
95 struct ieee80211_supported_band
*band
;
96 struct ieee80211_channel
*ch
;
99 band
= wiphy
->bands
[IEEE80211_BAND_5GHZ
];
100 for (i
= 0; i
< band
->n_channels
; i
++) {
101 ch
= &band
->channels
[i
];
102 if (ch
->flags
& IEEE80211_CHAN_DISABLED
)
105 if (ch
->flags
& IEEE80211_CHAN_RADAR
)
106 ch
->flags
|= IEEE80211_CHAN_NO_IBSS
|
107 IEEE80211_CHAN_PASSIVE_SCAN
;
114 static int wl1271_set_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
119 /* we should hold wl->mutex */
120 ret
= wl1271_acx_ps_rx_streaming(wl
, wlvif
, enable
);
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
136 int wl1271_recalc_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
139 int period
= wl
->conf
.rx_streaming
.interval
;
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
145 /* reconfigure/disable according to new streaming_period */
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
148 (wl
->conf
.rx_streaming
.always
||
149 test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
150 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
152 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif
->rx_streaming_timer
);
160 static void wl1271_rx_streaming_enable_work(struct work_struct
*work
)
163 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
164 rx_streaming_enable_work
);
165 struct wl1271
*wl
= wlvif
->wl
;
167 mutex_lock(&wl
->mutex
);
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
171 (!wl
->conf
.rx_streaming
.always
&&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
175 if (!wl
->conf
.rx_streaming
.interval
)
178 ret
= wl1271_ps_elp_wakeup(wl
);
182 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif
->rx_streaming_timer
,
188 jiffies
+ msecs_to_jiffies(wl
->conf
.rx_streaming
.duration
));
191 wl1271_ps_elp_sleep(wl
);
193 mutex_unlock(&wl
->mutex
);
196 static void wl1271_rx_streaming_disable_work(struct work_struct
*work
)
199 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
200 rx_streaming_disable_work
);
201 struct wl1271
*wl
= wlvif
->wl
;
203 mutex_lock(&wl
->mutex
);
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
208 ret
= wl1271_ps_elp_wakeup(wl
);
212 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
217 wl1271_ps_elp_sleep(wl
);
219 mutex_unlock(&wl
->mutex
);
222 static void wl1271_rx_streaming_timer(unsigned long data
)
224 struct wl12xx_vif
*wlvif
= (struct wl12xx_vif
*)data
;
225 struct wl1271
*wl
= wlvif
->wl
;
226 ieee80211_queue_work(wl
->hw
, &wlvif
->rx_streaming_disable_work
);
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271
*wl
)
232 /* if the watchdog is not armed, don't do anything */
233 if (wl
->tx_allocated_blocks
== 0)
236 cancel_delayed_work(&wl
->tx_watchdog_work
);
237 ieee80211_queue_delayed_work(wl
->hw
, &wl
->tx_watchdog_work
,
238 msecs_to_jiffies(wl
->conf
.tx
.tx_watchdog_timeout
));
241 static void wl12xx_tx_watchdog_work(struct work_struct
*work
)
243 struct delayed_work
*dwork
;
246 dwork
= container_of(work
, struct delayed_work
, work
);
247 wl
= container_of(dwork
, struct wl1271
, tx_watchdog_work
);
249 mutex_lock(&wl
->mutex
);
251 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl
->tx_allocated_blocks
== 0))
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
262 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
263 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to ROC",
264 wl
->conf
.tx
.tx_watchdog_timeout
);
265 wl12xx_rearm_tx_watchdog_locked(wl
);
270 * if a scan is in progress, we might not have any Tx for a long
273 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
) {
274 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to scan",
275 wl
->conf
.tx
.tx_watchdog_timeout
);
276 wl12xx_rearm_tx_watchdog_locked(wl
);
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
286 if (wl
->active_sta_count
) {
287 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms. AP has "
289 wl
->conf
.tx
.tx_watchdog_timeout
,
290 wl
->active_sta_count
);
291 wl12xx_rearm_tx_watchdog_locked(wl
);
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl
->conf
.tx
.tx_watchdog_timeout
);
297 wl12xx_queue_recovery_work(wl
);
300 mutex_unlock(&wl
->mutex
);
303 static void wlcore_adjust_conf(struct wl1271
*wl
)
305 /* Adjust settings according to optional module parameters */
307 if (!strcmp(fwlog_param
, "continuous")) {
308 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
309 } else if (!strcmp(fwlog_param
, "ondemand")) {
310 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_ON_DEMAND
;
311 } else if (!strcmp(fwlog_param
, "dbgpins")) {
312 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
313 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_DBG_PINS
;
314 } else if (!strcmp(fwlog_param
, "disable")) {
315 wl
->conf
.fwlog
.mem_blocks
= 0;
316 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_NONE
;
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param
);
323 static int wl1271_plt_init(struct wl1271
*wl
)
327 ret
= wl
->ops
->hw_init(wl
);
331 ret
= wl1271_acx_init_mem_config(wl
);
335 ret
= wl12xx_acx_mem_cfg(wl
);
337 goto out_free_memmap
;
339 /* Enable data path */
340 ret
= wl1271_cmd_data_path(wl
, 1);
342 goto out_free_memmap
;
344 /* Configure for CAM power saving (ie. always active) */
345 ret
= wl1271_acx_sleep_auth(wl
, WL1271_PSM_CAM
);
347 goto out_free_memmap
;
350 ret
= wl1271_acx_pm_config(wl
);
352 goto out_free_memmap
;
357 kfree(wl
->target_mem_map
);
358 wl
->target_mem_map
= NULL
;
363 static void wl12xx_irq_ps_regulate_link(struct wl1271
*wl
,
364 struct wl12xx_vif
*wlvif
,
367 bool fw_ps
, single_sta
;
369 fw_ps
= test_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
370 single_sta
= (wl
->active_sta_count
== 1);
373 * Wake up from high level PS if the STA is asleep with too little
374 * packets in FW or if the STA is awake.
376 if (!fw_ps
|| tx_pkts
< WL1271_PS_STA_MAX_PACKETS
)
377 wl12xx_ps_link_end(wl
, wlvif
, hlid
);
380 * Start high-level PS if the STA is asleep with enough blocks in FW.
381 * Make an exception if this is the only connected station. In this
382 * case FW-memory congestion is not a problem.
384 else if (!single_sta
&& fw_ps
&& tx_pkts
>= WL1271_PS_STA_MAX_PACKETS
)
385 wl12xx_ps_link_start(wl
, wlvif
, hlid
, true);
388 static void wl12xx_irq_update_links_status(struct wl1271
*wl
,
389 struct wl12xx_vif
*wlvif
,
390 struct wl_fw_status
*status
)
392 struct wl1271_link
*lnk
;
396 /* TODO: also use link_fast_bitmap here */
398 cur_fw_ps_map
= le32_to_cpu(status
->link_ps_bitmap
);
399 if (wl
->ap_fw_ps_map
!= cur_fw_ps_map
) {
400 wl1271_debug(DEBUG_PSM
,
401 "link ps prev 0x%x cur 0x%x changed 0x%x",
402 wl
->ap_fw_ps_map
, cur_fw_ps_map
,
403 wl
->ap_fw_ps_map
^ cur_fw_ps_map
);
405 wl
->ap_fw_ps_map
= cur_fw_ps_map
;
408 for_each_set_bit(hlid
, wlvif
->ap
.sta_hlid_map
, WL12XX_MAX_LINKS
) {
409 lnk
= &wl
->links
[hlid
];
410 cnt
= status
->counters
.tx_lnk_free_pkts
[hlid
] -
411 lnk
->prev_freed_pkts
;
413 lnk
->prev_freed_pkts
= status
->counters
.tx_lnk_free_pkts
[hlid
];
414 lnk
->allocated_pkts
-= cnt
;
416 wl12xx_irq_ps_regulate_link(wl
, wlvif
, hlid
,
417 lnk
->allocated_pkts
);
421 static void wl12xx_fw_status(struct wl1271
*wl
,
422 struct wl_fw_status
*status
)
424 struct wl12xx_vif
*wlvif
;
426 u32 old_tx_blk_count
= wl
->tx_blocks_available
;
427 int avail
, freed_blocks
;
431 status_len
= sizeof(*status
) + wl
->fw_status_priv_len
;
433 wlcore_raw_read_data(wl
, REG_RAW_FW_STATUS_ADDR
, status
,
436 wl1271_debug(DEBUG_IRQ
, "intr: 0x%x (fw_rx_counter = %d, "
437 "drv_rx_counter = %d, tx_results_counter = %d)",
439 status
->fw_rx_counter
,
440 status
->drv_rx_counter
,
441 status
->tx_results_counter
);
443 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
444 /* prevent wrap-around in freed-packets counter */
445 wl
->tx_allocated_pkts
[i
] -=
446 (status
->counters
.tx_released_pkts
[i
] -
447 wl
->tx_pkts_freed
[i
]) & 0xff;
449 wl
->tx_pkts_freed
[i
] = status
->counters
.tx_released_pkts
[i
];
452 /* prevent wrap-around in total blocks counter */
453 if (likely(wl
->tx_blocks_freed
<=
454 le32_to_cpu(status
->total_released_blks
)))
455 freed_blocks
= le32_to_cpu(status
->total_released_blks
) -
458 freed_blocks
= 0x100000000LL
- wl
->tx_blocks_freed
+
459 le32_to_cpu(status
->total_released_blks
);
461 wl
->tx_blocks_freed
= le32_to_cpu(status
->total_released_blks
);
463 wl
->tx_allocated_blocks
-= freed_blocks
;
466 * If the FW freed some blocks:
467 * If we still have allocated blocks - re-arm the timer, Tx is
468 * not stuck. Otherwise, cancel the timer (no Tx currently).
471 if (wl
->tx_allocated_blocks
)
472 wl12xx_rearm_tx_watchdog_locked(wl
);
474 cancel_delayed_work(&wl
->tx_watchdog_work
);
477 avail
= le32_to_cpu(status
->tx_total
) - wl
->tx_allocated_blocks
;
480 * The FW might change the total number of TX memblocks before
481 * we get a notification about blocks being released. Thus, the
482 * available blocks calculation might yield a temporary result
483 * which is lower than the actual available blocks. Keeping in
484 * mind that only blocks that were allocated can be moved from
485 * TX to RX, tx_blocks_available should never decrease here.
487 wl
->tx_blocks_available
= max((int)wl
->tx_blocks_available
,
490 /* if more blocks are available now, tx work can be scheduled */
491 if (wl
->tx_blocks_available
> old_tx_blk_count
)
492 clear_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
);
494 /* for AP update num of allocated TX blocks per link and ps status */
495 wl12xx_for_each_wlvif_ap(wl
, wlvif
) {
496 wl12xx_irq_update_links_status(wl
, wlvif
, status
);
499 /* update the host-chipset time offset */
501 wl
->time_offset
= (timespec_to_ns(&ts
) >> 10) -
502 (s64
)le32_to_cpu(status
->fw_localtime
);
505 static void wl1271_flush_deferred_work(struct wl1271
*wl
)
509 /* Pass all received frames to the network stack */
510 while ((skb
= skb_dequeue(&wl
->deferred_rx_queue
)))
511 ieee80211_rx_ni(wl
->hw
, skb
);
513 /* Return sent skbs to the network stack */
514 while ((skb
= skb_dequeue(&wl
->deferred_tx_queue
)))
515 ieee80211_tx_status_ni(wl
->hw
, skb
);
518 static void wl1271_netstack_work(struct work_struct
*work
)
521 container_of(work
, struct wl1271
, netstack_work
);
524 wl1271_flush_deferred_work(wl
);
525 } while (skb_queue_len(&wl
->deferred_rx_queue
));
528 #define WL1271_IRQ_MAX_LOOPS 256
530 static irqreturn_t
wl1271_irq(int irq
, void *cookie
)
534 int loopcount
= WL1271_IRQ_MAX_LOOPS
;
535 struct wl1271
*wl
= (struct wl1271
*)cookie
;
537 unsigned int defer_count
;
540 /* TX might be handled here, avoid redundant work */
541 set_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
542 cancel_work_sync(&wl
->tx_work
);
545 * In case edge triggered interrupt must be used, we cannot iterate
546 * more than once without introducing race conditions with the hardirq.
548 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
551 mutex_lock(&wl
->mutex
);
553 wl1271_debug(DEBUG_IRQ
, "IRQ work");
555 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
558 ret
= wl1271_ps_elp_wakeup(wl
);
562 while (!done
&& loopcount
--) {
564 * In order to avoid a race with the hardirq, clear the flag
565 * before acknowledging the chip. Since the mutex is held,
566 * wl1271_ps_elp_wakeup cannot be called concurrently.
568 clear_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
569 smp_mb__after_clear_bit();
571 wl12xx_fw_status(wl
, wl
->fw_status
);
573 wlcore_hw_tx_immediate_compl(wl
);
575 intr
= le32_to_cpu(wl
->fw_status
->intr
);
576 intr
&= WL1271_INTR_MASK
;
582 if (unlikely(intr
& WL1271_ACX_INTR_WATCHDOG
)) {
583 wl1271_error("watchdog interrupt received! "
584 "starting recovery.");
585 wl12xx_queue_recovery_work(wl
);
587 /* restarting the chip. ignore any other interrupt. */
591 if (likely(intr
& WL1271_ACX_INTR_DATA
)) {
592 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_DATA");
594 wl12xx_rx(wl
, wl
->fw_status
);
596 /* Check if any tx blocks were freed */
597 spin_lock_irqsave(&wl
->wl_lock
, flags
);
598 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
599 wl1271_tx_total_queue_count(wl
) > 0) {
600 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
602 * In order to avoid starvation of the TX path,
603 * call the work function directly.
605 wl1271_tx_work_locked(wl
);
607 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
610 /* check for tx results */
611 wlcore_hw_tx_delayed_compl(wl
);
613 /* Make sure the deferred queues don't get too long */
614 defer_count
= skb_queue_len(&wl
->deferred_tx_queue
) +
615 skb_queue_len(&wl
->deferred_rx_queue
);
616 if (defer_count
> WL1271_DEFERRED_QUEUE_LIMIT
)
617 wl1271_flush_deferred_work(wl
);
620 if (intr
& WL1271_ACX_INTR_EVENT_A
) {
621 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_A");
622 wl1271_event_handle(wl
, 0);
625 if (intr
& WL1271_ACX_INTR_EVENT_B
) {
626 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_B");
627 wl1271_event_handle(wl
, 1);
630 if (intr
& WL1271_ACX_INTR_INIT_COMPLETE
)
631 wl1271_debug(DEBUG_IRQ
,
632 "WL1271_ACX_INTR_INIT_COMPLETE");
634 if (intr
& WL1271_ACX_INTR_HW_AVAILABLE
)
635 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_HW_AVAILABLE");
638 wl1271_ps_elp_sleep(wl
);
641 spin_lock_irqsave(&wl
->wl_lock
, flags
);
642 /* In case TX was not handled here, queue TX work */
643 clear_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
644 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
645 wl1271_tx_total_queue_count(wl
) > 0)
646 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
647 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
649 mutex_unlock(&wl
->mutex
);
654 struct vif_counter_data
{
657 struct ieee80211_vif
*cur_vif
;
658 bool cur_vif_running
;
661 static void wl12xx_vif_count_iter(void *data
, u8
*mac
,
662 struct ieee80211_vif
*vif
)
664 struct vif_counter_data
*counter
= data
;
667 if (counter
->cur_vif
== vif
)
668 counter
->cur_vif_running
= true;
671 /* caller must not hold wl->mutex, as it might deadlock */
672 static void wl12xx_get_vif_count(struct ieee80211_hw
*hw
,
673 struct ieee80211_vif
*cur_vif
,
674 struct vif_counter_data
*data
)
676 memset(data
, 0, sizeof(*data
));
677 data
->cur_vif
= cur_vif
;
679 ieee80211_iterate_active_interfaces(hw
,
680 wl12xx_vif_count_iter
, data
);
683 static int wl12xx_fetch_firmware(struct wl1271
*wl
, bool plt
)
685 const struct firmware
*fw
;
687 enum wl12xx_fw_type fw_type
;
691 fw_type
= WL12XX_FW_TYPE_PLT
;
692 fw_name
= wl
->plt_fw_name
;
695 * we can't call wl12xx_get_vif_count() here because
696 * wl->mutex is taken, so use the cached last_vif_count value
698 if (wl
->last_vif_count
> 1) {
699 fw_type
= WL12XX_FW_TYPE_MULTI
;
700 fw_name
= wl
->mr_fw_name
;
702 fw_type
= WL12XX_FW_TYPE_NORMAL
;
703 fw_name
= wl
->sr_fw_name
;
707 if (wl
->fw_type
== fw_type
)
710 wl1271_debug(DEBUG_BOOT
, "booting firmware %s", fw_name
);
712 ret
= request_firmware(&fw
, fw_name
, wl
->dev
);
715 wl1271_error("could not get firmware %s: %d", fw_name
, ret
);
720 wl1271_error("firmware size is not multiple of 32 bits: %zu",
727 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
728 wl
->fw_len
= fw
->size
;
729 wl
->fw
= vmalloc(wl
->fw_len
);
732 wl1271_error("could not allocate memory for the firmware");
737 memcpy(wl
->fw
, fw
->data
, wl
->fw_len
);
739 wl
->fw_type
= fw_type
;
741 release_firmware(fw
);
746 static int wl1271_fetch_nvs(struct wl1271
*wl
)
748 const struct firmware
*fw
;
751 ret
= request_firmware(&fw
, WL12XX_NVS_NAME
, wl
->dev
);
754 wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME
,
759 wl
->nvs
= kmemdup(fw
->data
, fw
->size
, GFP_KERNEL
);
762 wl1271_error("could not allocate memory for the nvs file");
767 wl
->nvs_len
= fw
->size
;
770 release_firmware(fw
);
775 void wl12xx_queue_recovery_work(struct wl1271
*wl
)
777 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
778 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
781 size_t wl12xx_copy_fwlog(struct wl1271
*wl
, u8
*memblock
, size_t maxlen
)
785 /* The FW log is a length-value list, find where the log end */
786 while (len
< maxlen
) {
787 if (memblock
[len
] == 0)
789 if (len
+ memblock
[len
] + 1 > maxlen
)
791 len
+= memblock
[len
] + 1;
794 /* Make sure we have enough room */
795 len
= min(len
, (size_t)(PAGE_SIZE
- wl
->fwlog_size
));
797 /* Fill the FW log file, consumed by the sysfs fwlog entry */
798 memcpy(wl
->fwlog
+ wl
->fwlog_size
, memblock
, len
);
799 wl
->fwlog_size
+= len
;
804 static void wl12xx_read_fwlog_panic(struct wl1271
*wl
)
810 if ((wl
->quirks
& WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED
) ||
811 (wl
->conf
.fwlog
.mode
!= WL12XX_FWLOG_ON_DEMAND
) ||
812 (wl
->conf
.fwlog
.mem_blocks
== 0))
815 wl1271_info("Reading FW panic log");
817 block
= kmalloc(WL12XX_HW_BLOCK_SIZE
, GFP_KERNEL
);
822 * Make sure the chip is awake and the logger isn't active.
823 * This might fail if the firmware hanged.
825 if (!wl1271_ps_elp_wakeup(wl
))
826 wl12xx_cmd_stop_fwlog(wl
);
828 /* Read the first memory block address */
829 wl12xx_fw_status(wl
, wl
->fw_status
);
830 first_addr
= le32_to_cpu(wl
->fw_status
->log_start_addr
);
834 /* Traverse the memory blocks linked list */
837 memset(block
, 0, WL12XX_HW_BLOCK_SIZE
);
838 wl1271_read_hwaddr(wl
, addr
, block
, WL12XX_HW_BLOCK_SIZE
,
842 * Memory blocks are linked to one another. The first 4 bytes
843 * of each memory block hold the hardware address of the next
844 * one. The last memory block points to the first one.
846 addr
= le32_to_cpup((__le32
*)block
);
847 if (!wl12xx_copy_fwlog(wl
, block
+ sizeof(addr
),
848 WL12XX_HW_BLOCK_SIZE
- sizeof(addr
)))
850 } while (addr
&& (addr
!= first_addr
));
852 wake_up_interruptible(&wl
->fwlog_waitq
);
858 static void wl1271_recovery_work(struct work_struct
*work
)
861 container_of(work
, struct wl1271
, recovery_work
);
862 struct wl12xx_vif
*wlvif
;
863 struct ieee80211_vif
*vif
;
865 mutex_lock(&wl
->mutex
);
867 if (wl
->state
!= WL1271_STATE_ON
|| wl
->plt
)
870 /* Avoid a recursive recovery */
871 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
873 wl12xx_read_fwlog_panic(wl
);
875 /* change partitions momentarily so we can read the FW pc */
876 wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
877 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
879 wlcore_read_reg(wl
, REG_PC_ON_RECOVERY
));
880 wlcore_set_partition(wl
, &wl
->ptable
[PART_WORK
]);
882 BUG_ON(bug_on_recovery
&&
883 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
886 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
887 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
891 BUG_ON(bug_on_recovery
);
894 * Advance security sequence number to overcome potential progress
895 * in the firmware during recovery. This doens't hurt if the network is
898 wl12xx_for_each_wlvif(wl
, wlvif
) {
899 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
900 test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
901 wlvif
->tx_security_seq
+=
902 WL1271_TX_SQN_POST_RECOVERY_PADDING
;
905 /* Prevent spurious TX during FW restart */
906 ieee80211_stop_queues(wl
->hw
);
908 if (wl
->sched_scanning
) {
909 ieee80211_sched_scan_stopped(wl
->hw
);
910 wl
->sched_scanning
= false;
913 /* reboot the chipset */
914 while (!list_empty(&wl
->wlvif_list
)) {
915 wlvif
= list_first_entry(&wl
->wlvif_list
,
916 struct wl12xx_vif
, list
);
917 vif
= wl12xx_wlvif_to_vif(wlvif
);
918 __wl1271_op_remove_interface(wl
, vif
, false);
920 mutex_unlock(&wl
->mutex
);
921 wl1271_op_stop(wl
->hw
);
923 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
925 ieee80211_restart_hw(wl
->hw
);
928 * Its safe to enable TX now - the queues are stopped after a request
931 ieee80211_wake_queues(wl
->hw
);
934 mutex_unlock(&wl
->mutex
);
937 static void wl1271_fw_wakeup(struct wl1271
*wl
)
939 wl1271_raw_write32(wl
, HW_ACCESS_ELP_CTRL_REG
, ELPCTRL_WAKE_UP
);
942 static int wl1271_setup(struct wl1271
*wl
)
944 wl
->fw_status
= kmalloc(sizeof(*wl
->fw_status
), GFP_KERNEL
);
948 wl
->tx_res_if
= kmalloc(sizeof(*wl
->tx_res_if
), GFP_KERNEL
);
949 if (!wl
->tx_res_if
) {
950 kfree(wl
->fw_status
);
957 static int wl12xx_set_power_on(struct wl1271
*wl
)
961 msleep(WL1271_PRE_POWER_ON_SLEEP
);
962 ret
= wl1271_power_on(wl
);
965 msleep(WL1271_POWER_ON_SLEEP
);
969 wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
971 /* ELP module wake up */
972 wl1271_fw_wakeup(wl
);
978 static int wl12xx_chip_wakeup(struct wl1271
*wl
, bool plt
)
982 ret
= wl12xx_set_power_on(wl
);
987 * For wl127x based devices we could use the default block
988 * size (512 bytes), but due to a bug in the sdio driver, we
989 * need to set it explicitly after the chip is powered on. To
990 * simplify the code and since the performance impact is
991 * negligible, we use the same block size for all different
994 if (wl1271_set_block_size(wl
))
995 wl
->quirks
|= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN
;
997 ret
= wl
->ops
->identify_chip(wl
);
1001 /* TODO: make sure the lower driver has set things up correctly */
1003 ret
= wl1271_setup(wl
);
1007 ret
= wl12xx_fetch_firmware(wl
, plt
);
1011 /* No NVS from netlink, try to get it from the filesystem */
1012 if (wl
->nvs
== NULL
) {
1013 ret
= wl1271_fetch_nvs(wl
);
1022 int wl1271_plt_start(struct wl1271
*wl
)
1024 int retries
= WL1271_BOOT_RETRIES
;
1025 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1028 mutex_lock(&wl
->mutex
);
1030 wl1271_notice("power up");
1032 if (wl
->state
!= WL1271_STATE_OFF
) {
1033 wl1271_error("cannot go into PLT state because not "
1034 "in off state: %d", wl
->state
);
1041 ret
= wl12xx_chip_wakeup(wl
, true);
1045 ret
= wl
->ops
->boot(wl
);
1049 ret
= wl1271_plt_init(wl
);
1054 wl
->state
= WL1271_STATE_ON
;
1055 wl1271_notice("firmware booted in PLT mode (%s)",
1056 wl
->chip
.fw_ver_str
);
1058 /* update hw/fw version info in wiphy struct */
1059 wiphy
->hw_version
= wl
->chip
.id
;
1060 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1061 sizeof(wiphy
->fw_version
));
1066 mutex_unlock(&wl
->mutex
);
1067 /* Unlocking the mutex in the middle of handling is
1068 inherently unsafe. In this case we deem it safe to do,
1069 because we need to let any possibly pending IRQ out of
1070 the system (and while we are WL1271_STATE_OFF the IRQ
1071 work function will not do anything.) Also, any other
1072 possible concurrent operations will fail due to the
1073 current state, hence the wl1271 struct should be safe. */
1074 wlcore_disable_interrupts(wl
);
1075 wl1271_flush_deferred_work(wl
);
1076 cancel_work_sync(&wl
->netstack_work
);
1077 mutex_lock(&wl
->mutex
);
1079 wl1271_power_off(wl
);
1082 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1083 WL1271_BOOT_RETRIES
);
1085 mutex_unlock(&wl
->mutex
);
1090 int wl1271_plt_stop(struct wl1271
*wl
)
1094 wl1271_notice("power down");
1097 * Interrupts must be disabled before setting the state to OFF.
1098 * Otherwise, the interrupt handler might be called and exit without
1099 * reading the interrupt status.
1101 wlcore_disable_interrupts(wl
);
1102 mutex_lock(&wl
->mutex
);
1104 mutex_unlock(&wl
->mutex
);
1107 * This will not necessarily enable interrupts as interrupts
1108 * may have been disabled when op_stop was called. It will,
1109 * however, balance the above call to disable_interrupts().
1111 wlcore_enable_interrupts(wl
);
1113 wl1271_error("cannot power down because not in PLT "
1114 "state: %d", wl
->state
);
1119 mutex_unlock(&wl
->mutex
);
1121 wl1271_flush_deferred_work(wl
);
1122 cancel_work_sync(&wl
->netstack_work
);
1123 cancel_work_sync(&wl
->recovery_work
);
1124 cancel_delayed_work_sync(&wl
->elp_work
);
1125 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1126 cancel_delayed_work_sync(&wl
->connection_loss_work
);
1128 mutex_lock(&wl
->mutex
);
1129 wl1271_power_off(wl
);
1131 wl
->state
= WL1271_STATE_OFF
;
1134 mutex_unlock(&wl
->mutex
);
1140 static void wl1271_op_tx(struct ieee80211_hw
*hw
, struct sk_buff
*skb
)
1142 struct wl1271
*wl
= hw
->priv
;
1143 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1144 struct ieee80211_vif
*vif
= info
->control
.vif
;
1145 struct wl12xx_vif
*wlvif
= NULL
;
1146 unsigned long flags
;
1151 wlvif
= wl12xx_vif_to_data(vif
);
1153 mapping
= skb_get_queue_mapping(skb
);
1154 q
= wl1271_tx_get_queue(mapping
);
1156 hlid
= wl12xx_tx_get_hlid(wl
, wlvif
, skb
);
1158 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1160 /* queue the packet */
1161 if (hlid
== WL12XX_INVALID_LINK_ID
||
1162 (wlvif
&& !test_bit(hlid
, wlvif
->links_map
))) {
1163 wl1271_debug(DEBUG_TX
, "DROP skb hlid %d q %d", hlid
, q
);
1164 ieee80211_free_txskb(hw
, skb
);
1168 wl1271_debug(DEBUG_TX
, "queue skb hlid %d q %d len %d",
1170 skb_queue_tail(&wl
->links
[hlid
].tx_queue
[q
], skb
);
1172 wl
->tx_queue_count
[q
]++;
1175 * The workqueue is slow to process the tx_queue and we need stop
1176 * the queue here, otherwise the queue will get too long.
1178 if (wl
->tx_queue_count
[q
] >= WL1271_TX_QUEUE_HIGH_WATERMARK
) {
1179 wl1271_debug(DEBUG_TX
, "op_tx: stopping queues for q %d", q
);
1180 ieee80211_stop_queue(wl
->hw
, mapping
);
1181 set_bit(q
, &wl
->stopped_queues_map
);
1185 * The chip specific setup must run before the first TX packet -
1186 * before that, the tx_work will not be initialized!
1189 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
1190 !test_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
))
1191 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
1194 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1197 int wl1271_tx_dummy_packet(struct wl1271
*wl
)
1199 unsigned long flags
;
1202 /* no need to queue a new dummy packet if one is already pending */
1203 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
))
1206 q
= wl1271_tx_get_queue(skb_get_queue_mapping(wl
->dummy_packet
));
1208 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1209 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
);
1210 wl
->tx_queue_count
[q
]++;
1211 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1213 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1214 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
))
1215 wl1271_tx_work_locked(wl
);
1218 * If the FW TX is busy, TX work will be scheduled by the threaded
1219 * interrupt handler function
1225 * The size of the dummy packet should be at least 1400 bytes. However, in
1226 * order to minimize the number of bus transactions, aligning it to 512 bytes
1227 * boundaries could be beneficial, performance wise
1229 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1231 static struct sk_buff
*wl12xx_alloc_dummy_packet(struct wl1271
*wl
)
1233 struct sk_buff
*skb
;
1234 struct ieee80211_hdr_3addr
*hdr
;
1235 unsigned int dummy_packet_size
;
1237 dummy_packet_size
= TOTAL_TX_DUMMY_PACKET_SIZE
-
1238 sizeof(struct wl1271_tx_hw_descr
) - sizeof(*hdr
);
1240 skb
= dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE
);
1242 wl1271_warning("Failed to allocate a dummy packet skb");
1246 skb_reserve(skb
, sizeof(struct wl1271_tx_hw_descr
));
1248 hdr
= (struct ieee80211_hdr_3addr
*) skb_put(skb
, sizeof(*hdr
));
1249 memset(hdr
, 0, sizeof(*hdr
));
1250 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_DATA
|
1251 IEEE80211_STYPE_NULLFUNC
|
1252 IEEE80211_FCTL_TODS
);
1254 memset(skb_put(skb
, dummy_packet_size
), 0, dummy_packet_size
);
1256 /* Dummy packets require the TID to be management */
1257 skb
->priority
= WL1271_TID_MGMT
;
1259 /* Initialize all fields that might be used */
1260 skb_set_queue_mapping(skb
, 0);
1261 memset(IEEE80211_SKB_CB(skb
), 0, sizeof(struct ieee80211_tx_info
));
1269 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern
*p
)
1271 int num_fields
= 0, in_field
= 0, fields_size
= 0;
1272 int i
, pattern_len
= 0;
1275 wl1271_warning("No mask in WoWLAN pattern");
1280 * The pattern is broken up into segments of bytes at different offsets
1281 * that need to be checked by the FW filter. Each segment is called
1282 * a field in the FW API. We verify that the total number of fields
1283 * required for this pattern won't exceed FW limits (8)
1284 * as well as the total fields buffer won't exceed the FW limit.
1285 * Note that if there's a pattern which crosses Ethernet/IP header
1286 * boundary a new field is required.
1288 for (i
= 0; i
< p
->pattern_len
; i
++) {
1289 if (test_bit(i
, (unsigned long *)p
->mask
)) {
1294 if (i
== WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1296 fields_size
+= pattern_len
+
1297 RX_FILTER_FIELD_OVERHEAD
;
1305 fields_size
+= pattern_len
+
1306 RX_FILTER_FIELD_OVERHEAD
;
1313 fields_size
+= pattern_len
+ RX_FILTER_FIELD_OVERHEAD
;
1317 if (num_fields
> WL1271_RX_FILTER_MAX_FIELDS
) {
1318 wl1271_warning("RX Filter too complex. Too many segments");
1322 if (fields_size
> WL1271_RX_FILTER_MAX_FIELDS_SIZE
) {
1323 wl1271_warning("RX filter pattern is too big");
1330 struct wl12xx_rx_filter
*wl1271_rx_filter_alloc(void)
1332 return kzalloc(sizeof(struct wl12xx_rx_filter
), GFP_KERNEL
);
1335 void wl1271_rx_filter_free(struct wl12xx_rx_filter
*filter
)
1342 for (i
= 0; i
< filter
->num_fields
; i
++)
1343 kfree(filter
->fields
[i
].pattern
);
1348 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter
*filter
,
1349 u16 offset
, u8 flags
,
1350 u8
*pattern
, u8 len
)
1352 struct wl12xx_rx_filter_field
*field
;
1354 if (filter
->num_fields
== WL1271_RX_FILTER_MAX_FIELDS
) {
1355 wl1271_warning("Max fields per RX filter. can't alloc another");
1359 field
= &filter
->fields
[filter
->num_fields
];
1361 field
->pattern
= kzalloc(len
, GFP_KERNEL
);
1362 if (!field
->pattern
) {
1363 wl1271_warning("Failed to allocate RX filter pattern");
1367 filter
->num_fields
++;
1369 field
->offset
= cpu_to_le16(offset
);
1370 field
->flags
= flags
;
1372 memcpy(field
->pattern
, pattern
, len
);
1377 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter
*filter
)
1379 int i
, fields_size
= 0;
1381 for (i
= 0; i
< filter
->num_fields
; i
++)
1382 fields_size
+= filter
->fields
[i
].len
+
1383 sizeof(struct wl12xx_rx_filter_field
) -
1389 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter
*filter
,
1393 struct wl12xx_rx_filter_field
*field
;
1395 for (i
= 0; i
< filter
->num_fields
; i
++) {
1396 field
= (struct wl12xx_rx_filter_field
*)buf
;
1398 field
->offset
= filter
->fields
[i
].offset
;
1399 field
->flags
= filter
->fields
[i
].flags
;
1400 field
->len
= filter
->fields
[i
].len
;
1402 memcpy(&field
->pattern
, filter
->fields
[i
].pattern
, field
->len
);
1403 buf
+= sizeof(struct wl12xx_rx_filter_field
) -
1404 sizeof(u8
*) + field
->len
;
1409 * Allocates an RX filter returned through f
1410 * which needs to be freed using rx_filter_free()
1412 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1413 struct cfg80211_wowlan_trig_pkt_pattern
*p
,
1414 struct wl12xx_rx_filter
**f
)
1417 struct wl12xx_rx_filter
*filter
;
1421 filter
= wl1271_rx_filter_alloc();
1423 wl1271_warning("Failed to alloc rx filter");
1429 while (i
< p
->pattern_len
) {
1430 if (!test_bit(i
, (unsigned long *)p
->mask
)) {
1435 for (j
= i
; j
< p
->pattern_len
; j
++) {
1436 if (!test_bit(j
, (unsigned long *)p
->mask
))
1439 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
&&
1440 j
>= WL1271_RX_FILTER_ETH_HEADER_SIZE
)
1444 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1446 flags
= WL1271_RX_FILTER_FLAG_ETHERNET_HEADER
;
1448 offset
= i
- WL1271_RX_FILTER_ETH_HEADER_SIZE
;
1449 flags
= WL1271_RX_FILTER_FLAG_IP_HEADER
;
1454 ret
= wl1271_rx_filter_alloc_field(filter
,
1457 &p
->pattern
[i
], len
);
1464 filter
->action
= FILTER_SIGNAL
;
1470 wl1271_rx_filter_free(filter
);
1476 static int wl1271_configure_wowlan(struct wl1271
*wl
,
1477 struct cfg80211_wowlan
*wow
)
1481 if (!wow
|| wow
->any
|| !wow
->n_patterns
) {
1482 wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1483 wl1271_rx_filter_clear_all(wl
);
1487 if (WARN_ON(wow
->n_patterns
> WL1271_MAX_RX_FILTERS
))
1490 /* Validate all incoming patterns before clearing current FW state */
1491 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1492 ret
= wl1271_validate_wowlan_pattern(&wow
->patterns
[i
]);
1494 wl1271_warning("Bad wowlan pattern %d", i
);
1499 wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1500 wl1271_rx_filter_clear_all(wl
);
1502 /* Translate WoWLAN patterns into filters */
1503 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1504 struct cfg80211_wowlan_trig_pkt_pattern
*p
;
1505 struct wl12xx_rx_filter
*filter
= NULL
;
1507 p
= &wow
->patterns
[i
];
1509 ret
= wl1271_convert_wowlan_pattern_to_rx_filter(p
, &filter
);
1511 wl1271_warning("Failed to create an RX filter from "
1512 "wowlan pattern %d", i
);
1516 ret
= wl1271_rx_filter_enable(wl
, i
, 1, filter
);
1518 wl1271_rx_filter_free(filter
);
1523 ret
= wl1271_acx_default_rx_filter_enable(wl
, 1, FILTER_DROP
);
1529 static int wl1271_configure_suspend_sta(struct wl1271
*wl
,
1530 struct wl12xx_vif
*wlvif
,
1531 struct cfg80211_wowlan
*wow
)
1535 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1538 ret
= wl1271_ps_elp_wakeup(wl
);
1542 wl1271_configure_wowlan(wl
, wow
);
1543 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1544 wl
->conf
.conn
.suspend_wake_up_event
,
1545 wl
->conf
.conn
.suspend_listen_interval
);
1548 wl1271_error("suspend: set wake up conditions failed: %d", ret
);
1550 wl1271_ps_elp_sleep(wl
);
1557 static int wl1271_configure_suspend_ap(struct wl1271
*wl
,
1558 struct wl12xx_vif
*wlvif
)
1562 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
1565 ret
= wl1271_ps_elp_wakeup(wl
);
1569 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
1571 wl1271_ps_elp_sleep(wl
);
1577 static int wl1271_configure_suspend(struct wl1271
*wl
,
1578 struct wl12xx_vif
*wlvif
,
1579 struct cfg80211_wowlan
*wow
)
1581 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
)
1582 return wl1271_configure_suspend_sta(wl
, wlvif
, wow
);
1583 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
1584 return wl1271_configure_suspend_ap(wl
, wlvif
);
1588 static void wl1271_configure_resume(struct wl1271
*wl
,
1589 struct wl12xx_vif
*wlvif
)
1592 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
1593 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
1595 if ((!is_ap
) && (!is_sta
))
1598 ret
= wl1271_ps_elp_wakeup(wl
);
1603 wl1271_configure_wowlan(wl
, NULL
);
1605 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1606 wl
->conf
.conn
.wake_up_event
,
1607 wl
->conf
.conn
.listen_interval
);
1610 wl1271_error("resume: wake up conditions failed: %d",
1614 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
1617 wl1271_ps_elp_sleep(wl
);
1620 static int wl1271_op_suspend(struct ieee80211_hw
*hw
,
1621 struct cfg80211_wowlan
*wow
)
1623 struct wl1271
*wl
= hw
->priv
;
1624 struct wl12xx_vif
*wlvif
;
1627 wl1271_debug(DEBUG_MAC80211
, "mac80211 suspend wow=%d", !!wow
);
1630 wl1271_tx_flush(wl
);
1632 mutex_lock(&wl
->mutex
);
1633 wl
->wow_enabled
= true;
1634 wl12xx_for_each_wlvif(wl
, wlvif
) {
1635 ret
= wl1271_configure_suspend(wl
, wlvif
, wow
);
1637 mutex_unlock(&wl
->mutex
);
1638 wl1271_warning("couldn't prepare device to suspend");
1642 mutex_unlock(&wl
->mutex
);
1643 /* flush any remaining work */
1644 wl1271_debug(DEBUG_MAC80211
, "flushing remaining works");
1647 * disable and re-enable interrupts in order to flush
1650 wlcore_disable_interrupts(wl
);
1653 * set suspended flag to avoid triggering a new threaded_irq
1654 * work. no need for spinlock as interrupts are disabled.
1656 set_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1658 wlcore_enable_interrupts(wl
);
1659 flush_work(&wl
->tx_work
);
1660 flush_delayed_work(&wl
->elp_work
);
1665 static int wl1271_op_resume(struct ieee80211_hw
*hw
)
1667 struct wl1271
*wl
= hw
->priv
;
1668 struct wl12xx_vif
*wlvif
;
1669 unsigned long flags
;
1670 bool run_irq_work
= false;
1672 wl1271_debug(DEBUG_MAC80211
, "mac80211 resume wow=%d",
1674 WARN_ON(!wl
->wow_enabled
);
1677 * re-enable irq_work enqueuing, and call irq_work directly if
1678 * there is a pending work.
1680 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1681 clear_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1682 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
))
1683 run_irq_work
= true;
1684 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1687 wl1271_debug(DEBUG_MAC80211
,
1688 "run postponed irq_work directly");
1690 wlcore_enable_interrupts(wl
);
1693 mutex_lock(&wl
->mutex
);
1694 wl12xx_for_each_wlvif(wl
, wlvif
) {
1695 wl1271_configure_resume(wl
, wlvif
);
1697 wl
->wow_enabled
= false;
1698 mutex_unlock(&wl
->mutex
);
1704 static int wl1271_op_start(struct ieee80211_hw
*hw
)
1706 wl1271_debug(DEBUG_MAC80211
, "mac80211 start");
1709 * We have to delay the booting of the hardware because
1710 * we need to know the local MAC address before downloading and
1711 * initializing the firmware. The MAC address cannot be changed
1712 * after boot, and without the proper MAC address, the firmware
1713 * will not function properly.
1715 * The MAC address is first known when the corresponding interface
1716 * is added. That is where we will initialize the hardware.
1722 static void wl1271_op_stop(struct ieee80211_hw
*hw
)
1724 struct wl1271
*wl
= hw
->priv
;
1727 wl1271_debug(DEBUG_MAC80211
, "mac80211 stop");
1730 * Interrupts must be disabled before setting the state to OFF.
1731 * Otherwise, the interrupt handler might be called and exit without
1732 * reading the interrupt status.
1734 wlcore_disable_interrupts(wl
);
1735 mutex_lock(&wl
->mutex
);
1736 if (wl
->state
== WL1271_STATE_OFF
) {
1737 mutex_unlock(&wl
->mutex
);
1740 * This will not necessarily enable interrupts as interrupts
1741 * may have been disabled when op_stop was called. It will,
1742 * however, balance the above call to disable_interrupts().
1744 wlcore_enable_interrupts(wl
);
1749 * this must be before the cancel_work calls below, so that the work
1750 * functions don't perform further work.
1752 wl
->state
= WL1271_STATE_OFF
;
1753 mutex_unlock(&wl
->mutex
);
1755 wl1271_flush_deferred_work(wl
);
1756 cancel_delayed_work_sync(&wl
->scan_complete_work
);
1757 cancel_work_sync(&wl
->netstack_work
);
1758 cancel_work_sync(&wl
->tx_work
);
1759 cancel_delayed_work_sync(&wl
->elp_work
);
1760 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1761 cancel_delayed_work_sync(&wl
->connection_loss_work
);
1763 /* let's notify MAC80211 about the remaining pending TX frames */
1764 wl12xx_tx_reset(wl
, true);
1765 mutex_lock(&wl
->mutex
);
1767 wl1271_power_off(wl
);
1769 wl
->band
= IEEE80211_BAND_2GHZ
;
1772 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
1773 wl
->channel_type
= NL80211_CHAN_NO_HT
;
1774 wl
->tx_blocks_available
= 0;
1775 wl
->tx_allocated_blocks
= 0;
1776 wl
->tx_results_count
= 0;
1777 wl
->tx_packets_count
= 0;
1778 wl
->time_offset
= 0;
1779 wl
->ap_fw_ps_map
= 0;
1781 wl
->sched_scanning
= false;
1782 memset(wl
->roles_map
, 0, sizeof(wl
->roles_map
));
1783 memset(wl
->links_map
, 0, sizeof(wl
->links_map
));
1784 memset(wl
->roc_map
, 0, sizeof(wl
->roc_map
));
1785 wl
->active_sta_count
= 0;
1787 /* The system link is always allocated */
1788 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
1791 * this is performed after the cancel_work calls and the associated
1792 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1793 * get executed before all these vars have been reset.
1797 wl
->tx_blocks_freed
= 0;
1799 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
1800 wl
->tx_pkts_freed
[i
] = 0;
1801 wl
->tx_allocated_pkts
[i
] = 0;
1804 wl1271_debugfs_reset(wl
);
1806 kfree(wl
->fw_status
);
1807 wl
->fw_status
= NULL
;
1808 kfree(wl
->tx_res_if
);
1809 wl
->tx_res_if
= NULL
;
1810 kfree(wl
->target_mem_map
);
1811 wl
->target_mem_map
= NULL
;
1813 mutex_unlock(&wl
->mutex
);
1816 static int wl12xx_allocate_rate_policy(struct wl1271
*wl
, u8
*idx
)
1818 u8 policy
= find_first_zero_bit(wl
->rate_policies_map
,
1819 WL12XX_MAX_RATE_POLICIES
);
1820 if (policy
>= WL12XX_MAX_RATE_POLICIES
)
1823 __set_bit(policy
, wl
->rate_policies_map
);
1828 static void wl12xx_free_rate_policy(struct wl1271
*wl
, u8
*idx
)
1830 if (WARN_ON(*idx
>= WL12XX_MAX_RATE_POLICIES
))
1833 __clear_bit(*idx
, wl
->rate_policies_map
);
1834 *idx
= WL12XX_MAX_RATE_POLICIES
;
1837 static u8
wl12xx_get_role_type(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
1839 switch (wlvif
->bss_type
) {
1840 case BSS_TYPE_AP_BSS
:
1842 return WL1271_ROLE_P2P_GO
;
1844 return WL1271_ROLE_AP
;
1846 case BSS_TYPE_STA_BSS
:
1848 return WL1271_ROLE_P2P_CL
;
1850 return WL1271_ROLE_STA
;
1853 return WL1271_ROLE_IBSS
;
1856 wl1271_error("invalid bss_type: %d", wlvif
->bss_type
);
1858 return WL12XX_INVALID_ROLE_TYPE
;
1861 static int wl12xx_init_vif_data(struct wl1271
*wl
, struct ieee80211_vif
*vif
)
1863 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
1866 /* clear everything but the persistent data */
1867 memset(wlvif
, 0, offsetof(struct wl12xx_vif
, persistent
));
1869 switch (ieee80211_vif_type_p2p(vif
)) {
1870 case NL80211_IFTYPE_P2P_CLIENT
:
1873 case NL80211_IFTYPE_STATION
:
1874 wlvif
->bss_type
= BSS_TYPE_STA_BSS
;
1876 case NL80211_IFTYPE_ADHOC
:
1877 wlvif
->bss_type
= BSS_TYPE_IBSS
;
1879 case NL80211_IFTYPE_P2P_GO
:
1882 case NL80211_IFTYPE_AP
:
1883 wlvif
->bss_type
= BSS_TYPE_AP_BSS
;
1886 wlvif
->bss_type
= MAX_BSS_TYPE
;
1890 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
1891 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
1892 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
1894 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
1895 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
1896 /* init sta/ibss data */
1897 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
1898 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
1899 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
1900 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
1903 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
1904 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
1905 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
1906 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
1907 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
1908 wl12xx_allocate_rate_policy(wl
,
1909 &wlvif
->ap
.ucast_rate_idx
[i
]);
1912 wlvif
->bitrate_masks
[IEEE80211_BAND_2GHZ
] = wl
->conf
.tx
.basic_rate
;
1913 wlvif
->bitrate_masks
[IEEE80211_BAND_5GHZ
] = wl
->conf
.tx
.basic_rate_5
;
1914 wlvif
->basic_rate_set
= CONF_TX_RATE_MASK_BASIC
;
1915 wlvif
->basic_rate
= CONF_TX_RATE_MASK_BASIC
;
1916 wlvif
->rate_set
= CONF_TX_RATE_MASK_BASIC
;
1917 wlvif
->beacon_int
= WL1271_DEFAULT_BEACON_INT
;
1920 * mac80211 configures some values globally, while we treat them
1921 * per-interface. thus, on init, we have to copy them from wl
1923 wlvif
->band
= wl
->band
;
1924 wlvif
->channel
= wl
->channel
;
1925 wlvif
->power_level
= wl
->power_level
;
1926 wlvif
->channel_type
= wl
->channel_type
;
1928 INIT_WORK(&wlvif
->rx_streaming_enable_work
,
1929 wl1271_rx_streaming_enable_work
);
1930 INIT_WORK(&wlvif
->rx_streaming_disable_work
,
1931 wl1271_rx_streaming_disable_work
);
1932 INIT_LIST_HEAD(&wlvif
->list
);
1934 setup_timer(&wlvif
->rx_streaming_timer
, wl1271_rx_streaming_timer
,
1935 (unsigned long) wlvif
);
1939 static bool wl12xx_init_fw(struct wl1271
*wl
)
1941 int retries
= WL1271_BOOT_RETRIES
;
1942 bool booted
= false;
1943 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1948 ret
= wl12xx_chip_wakeup(wl
, false);
1952 ret
= wl
->ops
->boot(wl
);
1956 ret
= wl1271_hw_init(wl
);
1964 mutex_unlock(&wl
->mutex
);
1965 /* Unlocking the mutex in the middle of handling is
1966 inherently unsafe. In this case we deem it safe to do,
1967 because we need to let any possibly pending IRQ out of
1968 the system (and while we are WL1271_STATE_OFF the IRQ
1969 work function will not do anything.) Also, any other
1970 possible concurrent operations will fail due to the
1971 current state, hence the wl1271 struct should be safe. */
1972 wlcore_disable_interrupts(wl
);
1973 wl1271_flush_deferred_work(wl
);
1974 cancel_work_sync(&wl
->netstack_work
);
1975 mutex_lock(&wl
->mutex
);
1977 wl1271_power_off(wl
);
1981 wl1271_error("firmware boot failed despite %d retries",
1982 WL1271_BOOT_RETRIES
);
1986 wl1271_info("firmware booted (%s)", wl
->chip
.fw_ver_str
);
1988 /* update hw/fw version info in wiphy struct */
1989 wiphy
->hw_version
= wl
->chip
.id
;
1990 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1991 sizeof(wiphy
->fw_version
));
1994 * Now we know if 11a is supported (info from the NVS), so disable
1995 * 11a channels if not supported
1997 if (!wl
->enable_11a
)
1998 wiphy
->bands
[IEEE80211_BAND_5GHZ
]->n_channels
= 0;
2000 wl1271_debug(DEBUG_MAC80211
, "11a is %ssupported",
2001 wl
->enable_11a
? "" : "not ");
2003 wl
->state
= WL1271_STATE_ON
;
2008 static bool wl12xx_dev_role_started(struct wl12xx_vif
*wlvif
)
2010 return wlvif
->dev_hlid
!= WL12XX_INVALID_LINK_ID
;
2014 * Check whether a fw switch (i.e. moving from one loaded
2015 * fw to another) is needed. This function is also responsible
2016 * for updating wl->last_vif_count, so it must be called before
2017 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2020 static bool wl12xx_need_fw_change(struct wl1271
*wl
,
2021 struct vif_counter_data vif_counter_data
,
2024 enum wl12xx_fw_type current_fw
= wl
->fw_type
;
2025 u8 vif_count
= vif_counter_data
.counter
;
2027 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
))
2030 /* increase the vif count if this is a new vif */
2031 if (add
&& !vif_counter_data
.cur_vif_running
)
2034 wl
->last_vif_count
= vif_count
;
2036 /* no need for fw change if the device is OFF */
2037 if (wl
->state
== WL1271_STATE_OFF
)
2040 if (vif_count
> 1 && current_fw
== WL12XX_FW_TYPE_NORMAL
)
2042 if (vif_count
<= 1 && current_fw
== WL12XX_FW_TYPE_MULTI
)
2049 * Enter "forced psm". Make sure the sta is in psm against the ap,
2050 * to make the fw switch a bit more disconnection-persistent.
2052 static void wl12xx_force_active_psm(struct wl1271
*wl
)
2054 struct wl12xx_vif
*wlvif
;
2056 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
2057 wl1271_ps_set_mode(wl
, wlvif
, STATION_POWER_SAVE_MODE
);
2061 static int wl1271_op_add_interface(struct ieee80211_hw
*hw
,
2062 struct ieee80211_vif
*vif
)
2064 struct wl1271
*wl
= hw
->priv
;
2065 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2066 struct vif_counter_data vif_count
;
2069 bool booted
= false;
2071 vif
->driver_flags
|= IEEE80211_VIF_BEACON_FILTER
|
2072 IEEE80211_VIF_SUPPORTS_CQM_RSSI
;
2074 wl1271_debug(DEBUG_MAC80211
, "mac80211 add interface type %d mac %pM",
2075 ieee80211_vif_type_p2p(vif
), vif
->addr
);
2077 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2079 mutex_lock(&wl
->mutex
);
2080 ret
= wl1271_ps_elp_wakeup(wl
);
2085 * in some very corner case HW recovery scenarios its possible to
2086 * get here before __wl1271_op_remove_interface is complete, so
2087 * opt out if that is the case.
2089 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) ||
2090 test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)) {
2096 ret
= wl12xx_init_vif_data(wl
, vif
);
2101 role_type
= wl12xx_get_role_type(wl
, wlvif
);
2102 if (role_type
== WL12XX_INVALID_ROLE_TYPE
) {
2107 if (wl12xx_need_fw_change(wl
, vif_count
, true)) {
2108 wl12xx_force_active_psm(wl
);
2109 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2110 mutex_unlock(&wl
->mutex
);
2111 wl1271_recovery_work(&wl
->recovery_work
);
2116 * TODO: after the nvs issue will be solved, move this block
2117 * to start(), and make sure here the driver is ON.
2119 if (wl
->state
== WL1271_STATE_OFF
) {
2121 * we still need this in order to configure the fw
2122 * while uploading the nvs
2124 memcpy(wl
->addresses
[0].addr
, vif
->addr
, ETH_ALEN
);
2126 booted
= wl12xx_init_fw(wl
);
2133 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2134 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2136 * The device role is a special role used for
2137 * rx and tx frames prior to association (as
2138 * the STA role can get packets only from
2139 * its associated bssid)
2141 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2143 &wlvif
->dev_role_id
);
2148 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2149 role_type
, &wlvif
->role_id
);
2153 ret
= wl1271_init_vif_specific(wl
, vif
);
2157 list_add(&wlvif
->list
, &wl
->wlvif_list
);
2158 set_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
);
2160 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2165 wl1271_ps_elp_sleep(wl
);
2167 mutex_unlock(&wl
->mutex
);
2172 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
2173 struct ieee80211_vif
*vif
,
2174 bool reset_tx_queues
)
2176 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2179 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove interface");
2181 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2184 /* because of hardware recovery, we may get here twice */
2185 if (wl
->state
!= WL1271_STATE_ON
)
2188 wl1271_info("down");
2190 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
&&
2191 wl
->scan_vif
== vif
) {
2193 * Rearm the tx watchdog just before idling scan. This
2194 * prevents just-finished scans from triggering the watchdog
2196 wl12xx_rearm_tx_watchdog_locked(wl
);
2198 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
2199 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
2200 wl
->scan_vif
= NULL
;
2201 wl
->scan
.req
= NULL
;
2202 ieee80211_scan_completed(wl
->hw
, true);
2205 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
2206 /* disable active roles */
2207 ret
= wl1271_ps_elp_wakeup(wl
);
2211 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2212 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2213 if (wl12xx_dev_role_started(wlvif
))
2214 wl12xx_stop_dev(wl
, wlvif
);
2216 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->dev_role_id
);
2221 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->role_id
);
2225 wl1271_ps_elp_sleep(wl
);
2228 /* clear all hlids (except system_hlid) */
2229 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2231 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2232 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2233 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2234 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2235 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2236 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2238 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2239 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2240 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2241 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2242 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2243 wl12xx_free_rate_policy(wl
,
2244 &wlvif
->ap
.ucast_rate_idx
[i
]);
2245 wl1271_free_ap_keys(wl
, wlvif
);
2248 dev_kfree_skb(wlvif
->probereq
);
2249 wlvif
->probereq
= NULL
;
2250 wl12xx_tx_reset_wlvif(wl
, wlvif
);
2251 if (wl
->last_wlvif
== wlvif
)
2252 wl
->last_wlvif
= NULL
;
2253 list_del(&wlvif
->list
);
2254 memset(wlvif
->ap
.sta_hlid_map
, 0, sizeof(wlvif
->ap
.sta_hlid_map
));
2255 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2256 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2258 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2263 mutex_unlock(&wl
->mutex
);
2265 del_timer_sync(&wlvif
->rx_streaming_timer
);
2266 cancel_work_sync(&wlvif
->rx_streaming_enable_work
);
2267 cancel_work_sync(&wlvif
->rx_streaming_disable_work
);
2269 mutex_lock(&wl
->mutex
);
2272 static void wl1271_op_remove_interface(struct ieee80211_hw
*hw
,
2273 struct ieee80211_vif
*vif
)
2275 struct wl1271
*wl
= hw
->priv
;
2276 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2277 struct wl12xx_vif
*iter
;
2278 struct vif_counter_data vif_count
;
2279 bool cancel_recovery
= true;
2281 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2282 mutex_lock(&wl
->mutex
);
2284 if (wl
->state
== WL1271_STATE_OFF
||
2285 !test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2289 * wl->vif can be null here if someone shuts down the interface
2290 * just when hardware recovery has been started.
2292 wl12xx_for_each_wlvif(wl
, iter
) {
2296 __wl1271_op_remove_interface(wl
, vif
, true);
2299 WARN_ON(iter
!= wlvif
);
2300 if (wl12xx_need_fw_change(wl
, vif_count
, false)) {
2301 wl12xx_force_active_psm(wl
);
2302 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2303 wl12xx_queue_recovery_work(wl
);
2304 cancel_recovery
= false;
2307 mutex_unlock(&wl
->mutex
);
2308 if (cancel_recovery
)
2309 cancel_work_sync(&wl
->recovery_work
);
2312 static int wl12xx_op_change_interface(struct ieee80211_hw
*hw
,
2313 struct ieee80211_vif
*vif
,
2314 enum nl80211_iftype new_type
, bool p2p
)
2316 struct wl1271
*wl
= hw
->priv
;
2319 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2320 wl1271_op_remove_interface(hw
, vif
);
2322 vif
->type
= new_type
;
2324 ret
= wl1271_op_add_interface(hw
, vif
);
2326 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2330 static int wl1271_join(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2334 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
2337 * One of the side effects of the JOIN command is that is clears
2338 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2339 * to a WPA/WPA2 access point will therefore kill the data-path.
2340 * Currently the only valid scenario for JOIN during association
2341 * is on roaming, in which case we will also be given new keys.
2342 * Keep the below message for now, unless it starts bothering
2343 * users who really like to roam a lot :)
2345 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2346 wl1271_info("JOIN while associated.");
2348 /* clear encryption type */
2349 wlvif
->encryption_type
= KEY_NONE
;
2352 set_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
);
2355 ret
= wl12xx_cmd_role_start_ibss(wl
, wlvif
);
2357 ret
= wl12xx_cmd_role_start_sta(wl
, wlvif
);
2361 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2365 * The join command disable the keep-alive mode, shut down its process,
2366 * and also clear the template config, so we need to reset it all after
2367 * the join. The acx_aid starts the keep-alive process, and the order
2368 * of the commands below is relevant.
2370 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, true);
2374 ret
= wl1271_acx_aid(wl
, wlvif
, wlvif
->aid
);
2378 ret
= wl12xx_cmd_build_klv_null_data(wl
, wlvif
);
2382 ret
= wl1271_acx_keep_alive_config(wl
, wlvif
,
2383 CMD_TEMPL_KLV_IDX_NULL_DATA
,
2384 ACX_KEEP_ALIVE_TPL_VALID
);
2392 static int wl1271_unjoin(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2396 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
)) {
2397 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2399 wl12xx_cmd_stop_channel_switch(wl
);
2400 ieee80211_chswitch_done(vif
, false);
2403 /* to stop listening to a channel, we disconnect */
2404 ret
= wl12xx_cmd_role_stop_sta(wl
, wlvif
);
2408 /* reset TX security counters on a clean disconnect */
2409 wlvif
->tx_security_last_seq_lsb
= 0;
2410 wlvif
->tx_security_seq
= 0;
2416 static void wl1271_set_band_rate(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2418 wlvif
->basic_rate_set
= wlvif
->bitrate_masks
[wlvif
->band
];
2419 wlvif
->rate_set
= wlvif
->basic_rate_set
;
2422 static int wl1271_sta_handle_idle(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2426 bool cur_idle
= !test_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2428 if (idle
== cur_idle
)
2432 /* no need to croc if we weren't busy (e.g. during boot) */
2433 if (wl12xx_dev_role_started(wlvif
)) {
2434 ret
= wl12xx_stop_dev(wl
, wlvif
);
2439 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
2440 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2443 ret
= wl1271_acx_keep_alive_config(
2444 wl
, wlvif
, CMD_TEMPL_KLV_IDX_NULL_DATA
,
2445 ACX_KEEP_ALIVE_TPL_INVALID
);
2448 clear_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2450 /* The current firmware only supports sched_scan in idle */
2451 if (wl
->sched_scanning
) {
2452 wl1271_scan_sched_scan_stop(wl
);
2453 ieee80211_sched_scan_stopped(wl
->hw
);
2456 ret
= wl12xx_start_dev(wl
, wlvif
);
2459 set_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2466 static int wl12xx_config_vif(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2467 struct ieee80211_conf
*conf
, u32 changed
)
2469 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2472 channel
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2474 /* if the channel changes while joined, join again */
2475 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
&&
2476 ((wlvif
->band
!= conf
->channel
->band
) ||
2477 (wlvif
->channel
!= channel
) ||
2478 (wlvif
->channel_type
!= conf
->channel_type
))) {
2479 /* send all pending packets */
2480 wl1271_tx_work_locked(wl
);
2481 wlvif
->band
= conf
->channel
->band
;
2482 wlvif
->channel
= channel
;
2483 wlvif
->channel_type
= conf
->channel_type
;
2486 ret
= wl1271_init_ap_rates(wl
, wlvif
);
2488 wl1271_error("AP rate policy change failed %d",
2492 * FIXME: the mac80211 should really provide a fixed
2493 * rate to use here. for now, just use the smallest
2494 * possible rate for the band as a fixed rate for
2495 * association frames and other control messages.
2497 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2498 wl1271_set_band_rate(wl
, wlvif
);
2501 wl1271_tx_min_rate_get(wl
,
2502 wlvif
->basic_rate_set
);
2503 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2505 wl1271_warning("rate policy for channel "
2509 * change the ROC channel. do it only if we are
2510 * not idle. otherwise, CROC will be called
2513 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
,
2515 wl12xx_dev_role_started(wlvif
) &&
2516 !(conf
->flags
& IEEE80211_CONF_IDLE
)) {
2517 ret
= wl12xx_stop_dev(wl
, wlvif
);
2521 ret
= wl12xx_start_dev(wl
, wlvif
);
2528 if ((changed
& IEEE80211_CONF_CHANGE_PS
) && !is_ap
) {
2530 if ((conf
->flags
& IEEE80211_CONF_PS
) &&
2531 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
2532 !test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
2537 if (wl
->conf
.conn
.forced_ps
) {
2538 ps_mode
= STATION_POWER_SAVE_MODE
;
2539 ps_mode_str
= "forced";
2541 ps_mode
= STATION_AUTO_PS_MODE
;
2542 ps_mode_str
= "auto";
2545 wl1271_debug(DEBUG_PSM
, "%s ps enabled", ps_mode_str
);
2547 ret
= wl1271_ps_set_mode(wl
, wlvif
, ps_mode
);
2550 wl1271_warning("enter %s ps failed %d",
2553 } else if (!(conf
->flags
& IEEE80211_CONF_PS
) &&
2554 test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
2556 wl1271_debug(DEBUG_PSM
, "auto ps disabled");
2558 ret
= wl1271_ps_set_mode(wl
, wlvif
,
2559 STATION_ACTIVE_MODE
);
2561 wl1271_warning("exit auto ps failed %d", ret
);
2565 if (conf
->power_level
!= wlvif
->power_level
) {
2566 ret
= wl1271_acx_tx_power(wl
, wlvif
, conf
->power_level
);
2570 wlvif
->power_level
= conf
->power_level
;
2576 static int wl1271_op_config(struct ieee80211_hw
*hw
, u32 changed
)
2578 struct wl1271
*wl
= hw
->priv
;
2579 struct wl12xx_vif
*wlvif
;
2580 struct ieee80211_conf
*conf
= &hw
->conf
;
2581 int channel
, ret
= 0;
2583 channel
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2585 wl1271_debug(DEBUG_MAC80211
, "mac80211 config ch %d psm %s power %d %s"
2588 conf
->flags
& IEEE80211_CONF_PS
? "on" : "off",
2590 conf
->flags
& IEEE80211_CONF_IDLE
? "idle" : "in use",
2594 * mac80211 will go to idle nearly immediately after transmitting some
2595 * frames, such as the deauth. To make sure those frames reach the air,
2596 * wait here until the TX queue is fully flushed.
2598 if ((changed
& IEEE80211_CONF_CHANGE_IDLE
) &&
2599 (conf
->flags
& IEEE80211_CONF_IDLE
))
2600 wl1271_tx_flush(wl
);
2602 mutex_lock(&wl
->mutex
);
2604 /* we support configuring the channel and band even while off */
2605 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
) {
2606 wl
->band
= conf
->channel
->band
;
2607 wl
->channel
= channel
;
2608 wl
->channel_type
= conf
->channel_type
;
2611 if (changed
& IEEE80211_CONF_CHANGE_POWER
)
2612 wl
->power_level
= conf
->power_level
;
2614 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2617 ret
= wl1271_ps_elp_wakeup(wl
);
2621 /* configure each interface */
2622 wl12xx_for_each_wlvif(wl
, wlvif
) {
2623 ret
= wl12xx_config_vif(wl
, wlvif
, conf
, changed
);
2629 wl1271_ps_elp_sleep(wl
);
2632 mutex_unlock(&wl
->mutex
);
2637 struct wl1271_filter_params
{
2640 u8 mc_list
[ACX_MC_ADDRESS_GROUP_MAX
][ETH_ALEN
];
2643 static u64
wl1271_op_prepare_multicast(struct ieee80211_hw
*hw
,
2644 struct netdev_hw_addr_list
*mc_list
)
2646 struct wl1271_filter_params
*fp
;
2647 struct netdev_hw_addr
*ha
;
2648 struct wl1271
*wl
= hw
->priv
;
2650 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2653 fp
= kzalloc(sizeof(*fp
), GFP_ATOMIC
);
2655 wl1271_error("Out of memory setting filters.");
2659 /* update multicast filtering parameters */
2660 fp
->mc_list_length
= 0;
2661 if (netdev_hw_addr_list_count(mc_list
) > ACX_MC_ADDRESS_GROUP_MAX
) {
2662 fp
->enabled
= false;
2665 netdev_hw_addr_list_for_each(ha
, mc_list
) {
2666 memcpy(fp
->mc_list
[fp
->mc_list_length
],
2667 ha
->addr
, ETH_ALEN
);
2668 fp
->mc_list_length
++;
2672 return (u64
)(unsigned long)fp
;
2675 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2678 FIF_BCN_PRBRESP_PROMISC | \
2682 static void wl1271_op_configure_filter(struct ieee80211_hw
*hw
,
2683 unsigned int changed
,
2684 unsigned int *total
, u64 multicast
)
2686 struct wl1271_filter_params
*fp
= (void *)(unsigned long)multicast
;
2687 struct wl1271
*wl
= hw
->priv
;
2688 struct wl12xx_vif
*wlvif
;
2692 wl1271_debug(DEBUG_MAC80211
, "mac80211 configure filter changed %x"
2693 " total %x", changed
, *total
);
2695 mutex_lock(&wl
->mutex
);
2697 *total
&= WL1271_SUPPORTED_FILTERS
;
2698 changed
&= WL1271_SUPPORTED_FILTERS
;
2700 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2703 ret
= wl1271_ps_elp_wakeup(wl
);
2707 wl12xx_for_each_wlvif(wl
, wlvif
) {
2708 if (wlvif
->bss_type
!= BSS_TYPE_AP_BSS
) {
2709 if (*total
& FIF_ALLMULTI
)
2710 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
2714 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
2717 fp
->mc_list_length
);
2724 * the fw doesn't provide an api to configure the filters. instead,
2725 * the filters configuration is based on the active roles / ROC
2730 wl1271_ps_elp_sleep(wl
);
2733 mutex_unlock(&wl
->mutex
);
2737 static int wl1271_record_ap_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2738 u8 id
, u8 key_type
, u8 key_size
,
2739 const u8
*key
, u8 hlid
, u32 tx_seq_32
,
2742 struct wl1271_ap_key
*ap_key
;
2745 wl1271_debug(DEBUG_CRYPT
, "record ap key id %d", (int)id
);
2747 if (key_size
> MAX_KEY_SIZE
)
2751 * Find next free entry in ap_keys. Also check we are not replacing
2754 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2755 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
2758 if (wlvif
->ap
.recorded_keys
[i
]->id
== id
) {
2759 wl1271_warning("trying to record key replacement");
2764 if (i
== MAX_NUM_KEYS
)
2767 ap_key
= kzalloc(sizeof(*ap_key
), GFP_KERNEL
);
2772 ap_key
->key_type
= key_type
;
2773 ap_key
->key_size
= key_size
;
2774 memcpy(ap_key
->key
, key
, key_size
);
2775 ap_key
->hlid
= hlid
;
2776 ap_key
->tx_seq_32
= tx_seq_32
;
2777 ap_key
->tx_seq_16
= tx_seq_16
;
2779 wlvif
->ap
.recorded_keys
[i
] = ap_key
;
2783 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2787 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2788 kfree(wlvif
->ap
.recorded_keys
[i
]);
2789 wlvif
->ap
.recorded_keys
[i
] = NULL
;
2793 static int wl1271_ap_init_hwenc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2796 struct wl1271_ap_key
*key
;
2797 bool wep_key_added
= false;
2799 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2801 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
2804 key
= wlvif
->ap
.recorded_keys
[i
];
2806 if (hlid
== WL12XX_INVALID_LINK_ID
)
2807 hlid
= wlvif
->ap
.bcast_hlid
;
2809 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
2810 key
->id
, key
->key_type
,
2811 key
->key_size
, key
->key
,
2812 hlid
, key
->tx_seq_32
,
2817 if (key
->key_type
== KEY_WEP
)
2818 wep_key_added
= true;
2821 if (wep_key_added
) {
2822 ret
= wl12xx_cmd_set_default_wep_key(wl
, wlvif
->default_key
,
2823 wlvif
->ap
.bcast_hlid
);
2829 wl1271_free_ap_keys(wl
, wlvif
);
2833 static int wl1271_set_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2834 u16 action
, u8 id
, u8 key_type
,
2835 u8 key_size
, const u8
*key
, u32 tx_seq_32
,
2836 u16 tx_seq_16
, struct ieee80211_sta
*sta
)
2839 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2842 * A role set to GEM cipher requires different Tx settings (namely
2843 * spare blocks). Note when we are in this mode so the HW can adjust.
2845 if (key_type
== KEY_GEM
) {
2846 if (action
== KEY_ADD_OR_REPLACE
)
2847 wlvif
->is_gem
= true;
2848 else if (action
== KEY_REMOVE
)
2849 wlvif
->is_gem
= false;
2853 struct wl1271_station
*wl_sta
;
2857 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
2858 hlid
= wl_sta
->hlid
;
2860 hlid
= wlvif
->ap
.bcast_hlid
;
2863 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
2865 * We do not support removing keys after AP shutdown.
2866 * Pretend we do to make mac80211 happy.
2868 if (action
!= KEY_ADD_OR_REPLACE
)
2871 ret
= wl1271_record_ap_key(wl
, wlvif
, id
,
2873 key
, hlid
, tx_seq_32
,
2876 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, action
,
2877 id
, key_type
, key_size
,
2878 key
, hlid
, tx_seq_32
,
2886 static const u8 bcast_addr
[ETH_ALEN
] = {
2887 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2890 addr
= sta
? sta
->addr
: bcast_addr
;
2892 if (is_zero_ether_addr(addr
)) {
2893 /* We dont support TX only encryption */
2897 /* The wl1271 does not allow to remove unicast keys - they
2898 will be cleared automatically on next CMD_JOIN. Ignore the
2899 request silently, as we dont want the mac80211 to emit
2900 an error message. */
2901 if (action
== KEY_REMOVE
&& !is_broadcast_ether_addr(addr
))
2904 /* don't remove key if hlid was already deleted */
2905 if (action
== KEY_REMOVE
&&
2906 wlvif
->sta
.hlid
== WL12XX_INVALID_LINK_ID
)
2909 ret
= wl1271_cmd_set_sta_key(wl
, wlvif
, action
,
2910 id
, key_type
, key_size
,
2911 key
, addr
, tx_seq_32
,
2916 /* the default WEP key needs to be configured at least once */
2917 if (key_type
== KEY_WEP
) {
2918 ret
= wl12xx_cmd_set_default_wep_key(wl
,
2929 static int wl1271_op_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
2930 struct ieee80211_vif
*vif
,
2931 struct ieee80211_sta
*sta
,
2932 struct ieee80211_key_conf
*key_conf
)
2934 struct wl1271
*wl
= hw
->priv
;
2935 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2941 wl1271_debug(DEBUG_MAC80211
, "mac80211 set key");
2943 wl1271_debug(DEBUG_CRYPT
, "CMD: 0x%x sta: %p", cmd
, sta
);
2944 wl1271_debug(DEBUG_CRYPT
, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
2945 key_conf
->cipher
, key_conf
->keyidx
,
2946 key_conf
->keylen
, key_conf
->flags
);
2947 wl1271_dump(DEBUG_CRYPT
, "KEY: ", key_conf
->key
, key_conf
->keylen
);
2949 mutex_lock(&wl
->mutex
);
2951 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
2956 ret
= wl1271_ps_elp_wakeup(wl
);
2960 switch (key_conf
->cipher
) {
2961 case WLAN_CIPHER_SUITE_WEP40
:
2962 case WLAN_CIPHER_SUITE_WEP104
:
2965 key_conf
->hw_key_idx
= key_conf
->keyidx
;
2967 case WLAN_CIPHER_SUITE_TKIP
:
2968 key_type
= KEY_TKIP
;
2970 key_conf
->hw_key_idx
= key_conf
->keyidx
;
2971 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
2972 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
2974 case WLAN_CIPHER_SUITE_CCMP
:
2977 key_conf
->flags
|= IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
2978 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
2979 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
2981 case WL1271_CIPHER_SUITE_GEM
:
2983 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
2984 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
2987 wl1271_error("Unknown key algo 0x%x", key_conf
->cipher
);
2995 ret
= wl1271_set_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
2996 key_conf
->keyidx
, key_type
,
2997 key_conf
->keylen
, key_conf
->key
,
2998 tx_seq_32
, tx_seq_16
, sta
);
3000 wl1271_error("Could not add or replace key");
3005 * reconfiguring arp response if the unicast (or common)
3006 * encryption key type was changed
3008 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
3009 (sta
|| key_type
== KEY_WEP
) &&
3010 wlvif
->encryption_type
!= key_type
) {
3011 wlvif
->encryption_type
= key_type
;
3012 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3014 wl1271_warning("build arp rsp failed: %d", ret
);
3021 ret
= wl1271_set_key(wl
, wlvif
, KEY_REMOVE
,
3022 key_conf
->keyidx
, key_type
,
3023 key_conf
->keylen
, key_conf
->key
,
3026 wl1271_error("Could not remove key");
3032 wl1271_error("Unsupported key cmd 0x%x", cmd
);
3038 wl1271_ps_elp_sleep(wl
);
3041 mutex_unlock(&wl
->mutex
);
3046 static int wl1271_op_hw_scan(struct ieee80211_hw
*hw
,
3047 struct ieee80211_vif
*vif
,
3048 struct cfg80211_scan_request
*req
)
3050 struct wl1271
*wl
= hw
->priv
;
3055 wl1271_debug(DEBUG_MAC80211
, "mac80211 hw scan");
3058 ssid
= req
->ssids
[0].ssid
;
3059 len
= req
->ssids
[0].ssid_len
;
3062 mutex_lock(&wl
->mutex
);
3064 if (wl
->state
== WL1271_STATE_OFF
) {
3066 * We cannot return -EBUSY here because cfg80211 will expect
3067 * a call to ieee80211_scan_completed if we do - in this case
3068 * there won't be any call.
3074 ret
= wl1271_ps_elp_wakeup(wl
);
3078 /* fail if there is any role in ROC */
3079 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
3080 /* don't allow scanning right now */
3085 ret
= wl1271_scan(hw
->priv
, vif
, ssid
, len
, req
);
3087 wl1271_ps_elp_sleep(wl
);
3089 mutex_unlock(&wl
->mutex
);
3094 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw
*hw
,
3095 struct ieee80211_vif
*vif
)
3097 struct wl1271
*wl
= hw
->priv
;
3100 wl1271_debug(DEBUG_MAC80211
, "mac80211 cancel hw scan");
3102 mutex_lock(&wl
->mutex
);
3104 if (wl
->state
== WL1271_STATE_OFF
)
3107 if (wl
->scan
.state
== WL1271_SCAN_STATE_IDLE
)
3110 ret
= wl1271_ps_elp_wakeup(wl
);
3114 if (wl
->scan
.state
!= WL1271_SCAN_STATE_DONE
) {
3115 ret
= wl1271_scan_stop(wl
);
3121 * Rearm the tx watchdog just before idling scan. This
3122 * prevents just-finished scans from triggering the watchdog
3124 wl12xx_rearm_tx_watchdog_locked(wl
);
3126 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
3127 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
3128 wl
->scan_vif
= NULL
;
3129 wl
->scan
.req
= NULL
;
3130 ieee80211_scan_completed(wl
->hw
, true);
3133 wl1271_ps_elp_sleep(wl
);
3135 mutex_unlock(&wl
->mutex
);
3137 cancel_delayed_work_sync(&wl
->scan_complete_work
);
3140 static int wl1271_op_sched_scan_start(struct ieee80211_hw
*hw
,
3141 struct ieee80211_vif
*vif
,
3142 struct cfg80211_sched_scan_request
*req
,
3143 struct ieee80211_sched_scan_ies
*ies
)
3145 struct wl1271
*wl
= hw
->priv
;
3146 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3149 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_start");
3151 mutex_lock(&wl
->mutex
);
3153 if (wl
->state
== WL1271_STATE_OFF
) {
3158 ret
= wl1271_ps_elp_wakeup(wl
);
3162 ret
= wl1271_scan_sched_scan_config(wl
, wlvif
, req
, ies
);
3166 ret
= wl1271_scan_sched_scan_start(wl
, wlvif
);
3170 wl
->sched_scanning
= true;
3173 wl1271_ps_elp_sleep(wl
);
3175 mutex_unlock(&wl
->mutex
);
3179 static void wl1271_op_sched_scan_stop(struct ieee80211_hw
*hw
,
3180 struct ieee80211_vif
*vif
)
3182 struct wl1271
*wl
= hw
->priv
;
3185 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_stop");
3187 mutex_lock(&wl
->mutex
);
3189 if (wl
->state
== WL1271_STATE_OFF
)
3192 ret
= wl1271_ps_elp_wakeup(wl
);
3196 wl1271_scan_sched_scan_stop(wl
);
3198 wl1271_ps_elp_sleep(wl
);
3200 mutex_unlock(&wl
->mutex
);
3203 static int wl1271_op_set_frag_threshold(struct ieee80211_hw
*hw
, u32 value
)
3205 struct wl1271
*wl
= hw
->priv
;
3208 mutex_lock(&wl
->mutex
);
3210 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
3215 ret
= wl1271_ps_elp_wakeup(wl
);
3219 ret
= wl1271_acx_frag_threshold(wl
, value
);
3221 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret
);
3223 wl1271_ps_elp_sleep(wl
);
3226 mutex_unlock(&wl
->mutex
);
3231 static int wl1271_op_set_rts_threshold(struct ieee80211_hw
*hw
, u32 value
)
3233 struct wl1271
*wl
= hw
->priv
;
3234 struct wl12xx_vif
*wlvif
;
3237 mutex_lock(&wl
->mutex
);
3239 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
3244 ret
= wl1271_ps_elp_wakeup(wl
);
3248 wl12xx_for_each_wlvif(wl
, wlvif
) {
3249 ret
= wl1271_acx_rts_threshold(wl
, wlvif
, value
);
3251 wl1271_warning("set rts threshold failed: %d", ret
);
3253 wl1271_ps_elp_sleep(wl
);
3256 mutex_unlock(&wl
->mutex
);
3261 static int wl1271_ssid_set(struct ieee80211_vif
*vif
, struct sk_buff
*skb
,
3264 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3266 const u8
*ptr
= cfg80211_find_ie(WLAN_EID_SSID
, skb
->data
+ offset
,
3270 wl1271_error("No SSID in IEs!");
3275 if (ssid_len
> IEEE80211_MAX_SSID_LEN
) {
3276 wl1271_error("SSID is too long!");
3280 wlvif
->ssid_len
= ssid_len
;
3281 memcpy(wlvif
->ssid
, ptr
+2, ssid_len
);
3285 static void wl12xx_remove_ie(struct sk_buff
*skb
, u8 eid
, int ieoffset
)
3288 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3289 u8
*ie
= (u8
*)cfg80211_find_ie(eid
, skb
->data
+ ieoffset
,
3290 skb
->len
- ieoffset
);
3295 memmove(ie
, next
, end
- next
);
3296 skb_trim(skb
, skb
->len
- len
);
3299 static void wl12xx_remove_vendor_ie(struct sk_buff
*skb
,
3300 unsigned int oui
, u8 oui_type
,
3304 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3305 u8
*ie
= (u8
*)cfg80211_find_vendor_ie(oui
, oui_type
,
3306 skb
->data
+ ieoffset
,
3307 skb
->len
- ieoffset
);
3312 memmove(ie
, next
, end
- next
);
3313 skb_trim(skb
, skb
->len
- len
);
3316 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271
*wl
, u32 rates
,
3317 struct ieee80211_vif
*vif
)
3319 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3320 struct sk_buff
*skb
;
3323 skb
= ieee80211_proberesp_get(wl
->hw
, vif
);
3327 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3328 CMD_TEMPL_AP_PROBE_RESPONSE
,
3337 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271
*wl
,
3338 struct ieee80211_vif
*vif
,
3340 size_t probe_rsp_len
,
3343 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3344 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
3345 u8 probe_rsp_templ
[WL1271_CMD_TEMPL_MAX_SIZE
];
3346 int ssid_ie_offset
, ie_offset
, templ_len
;
3349 /* no need to change probe response if the SSID is set correctly */
3350 if (wlvif
->ssid_len
> 0)
3351 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3352 CMD_TEMPL_AP_PROBE_RESPONSE
,
3357 if (probe_rsp_len
+ bss_conf
->ssid_len
> WL1271_CMD_TEMPL_MAX_SIZE
) {
3358 wl1271_error("probe_rsp template too big");
3362 /* start searching from IE offset */
3363 ie_offset
= offsetof(struct ieee80211_mgmt
, u
.probe_resp
.variable
);
3365 ptr
= cfg80211_find_ie(WLAN_EID_SSID
, probe_rsp_data
+ ie_offset
,
3366 probe_rsp_len
- ie_offset
);
3368 wl1271_error("No SSID in beacon!");
3372 ssid_ie_offset
= ptr
- probe_rsp_data
;
3373 ptr
+= (ptr
[1] + 2);
3375 memcpy(probe_rsp_templ
, probe_rsp_data
, ssid_ie_offset
);
3377 /* insert SSID from bss_conf */
3378 probe_rsp_templ
[ssid_ie_offset
] = WLAN_EID_SSID
;
3379 probe_rsp_templ
[ssid_ie_offset
+ 1] = bss_conf
->ssid_len
;
3380 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2,
3381 bss_conf
->ssid
, bss_conf
->ssid_len
);
3382 templ_len
= ssid_ie_offset
+ 2 + bss_conf
->ssid_len
;
3384 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2 + bss_conf
->ssid_len
,
3385 ptr
, probe_rsp_len
- (ptr
- probe_rsp_data
));
3386 templ_len
+= probe_rsp_len
- (ptr
- probe_rsp_data
);
3388 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3389 CMD_TEMPL_AP_PROBE_RESPONSE
,
3395 static int wl1271_bss_erp_info_changed(struct wl1271
*wl
,
3396 struct ieee80211_vif
*vif
,
3397 struct ieee80211_bss_conf
*bss_conf
,
3400 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3403 if (changed
& BSS_CHANGED_ERP_SLOT
) {
3404 if (bss_conf
->use_short_slot
)
3405 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_SHORT
);
3407 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_LONG
);
3409 wl1271_warning("Set slot time failed %d", ret
);
3414 if (changed
& BSS_CHANGED_ERP_PREAMBLE
) {
3415 if (bss_conf
->use_short_preamble
)
3416 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_SHORT
);
3418 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_LONG
);
3421 if (changed
& BSS_CHANGED_ERP_CTS_PROT
) {
3422 if (bss_conf
->use_cts_prot
)
3423 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3426 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3427 CTSPROTECT_DISABLE
);
3429 wl1271_warning("Set ctsprotect failed %d", ret
);
3438 static int wl1271_bss_beacon_info_changed(struct wl1271
*wl
,
3439 struct ieee80211_vif
*vif
,
3440 struct ieee80211_bss_conf
*bss_conf
,
3443 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3444 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3447 if ((changed
& BSS_CHANGED_BEACON_INT
)) {
3448 wl1271_debug(DEBUG_MASTER
, "beacon interval updated: %d",
3449 bss_conf
->beacon_int
);
3451 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3454 if ((changed
& BSS_CHANGED_AP_PROBE_RESP
) && is_ap
) {
3455 u32 rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3456 if (!wl1271_ap_set_probe_resp_tmpl(wl
, rate
, vif
)) {
3457 wl1271_debug(DEBUG_AP
, "probe response updated");
3458 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
);
3462 if ((changed
& BSS_CHANGED_BEACON
)) {
3463 struct ieee80211_hdr
*hdr
;
3465 int ieoffset
= offsetof(struct ieee80211_mgmt
,
3467 struct sk_buff
*beacon
= ieee80211_beacon_get(wl
->hw
, vif
);
3475 wl1271_debug(DEBUG_MASTER
, "beacon updated");
3477 ret
= wl1271_ssid_set(vif
, beacon
, ieoffset
);
3479 dev_kfree_skb(beacon
);
3482 min_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3483 tmpl_id
= is_ap
? CMD_TEMPL_AP_BEACON
:
3485 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
, tmpl_id
,
3490 dev_kfree_skb(beacon
);
3495 * In case we already have a probe-resp beacon set explicitly
3496 * by usermode, don't use the beacon data.
3498 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
))
3501 /* remove TIM ie from probe response */
3502 wl12xx_remove_ie(beacon
, WLAN_EID_TIM
, ieoffset
);
3505 * remove p2p ie from probe response.
3506 * the fw reponds to probe requests that don't include
3507 * the p2p ie. probe requests with p2p ie will be passed,
3508 * and will be responded by the supplicant (the spec
3509 * forbids including the p2p ie when responding to probe
3510 * requests that didn't include it).
3512 wl12xx_remove_vendor_ie(beacon
, WLAN_OUI_WFA
,
3513 WLAN_OUI_TYPE_WFA_P2P
, ieoffset
);
3515 hdr
= (struct ieee80211_hdr
*) beacon
->data
;
3516 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
3517 IEEE80211_STYPE_PROBE_RESP
);
3519 ret
= wl1271_ap_set_probe_resp_tmpl_legacy(wl
, vif
,
3524 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3525 CMD_TEMPL_PROBE_RESPONSE
,
3530 dev_kfree_skb(beacon
);
3537 wl1271_error("beacon info change failed: %d", ret
);
3541 /* AP mode changes */
3542 static void wl1271_bss_info_changed_ap(struct wl1271
*wl
,
3543 struct ieee80211_vif
*vif
,
3544 struct ieee80211_bss_conf
*bss_conf
,
3547 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3550 if ((changed
& BSS_CHANGED_BASIC_RATES
)) {
3551 u32 rates
= bss_conf
->basic_rates
;
3553 wlvif
->basic_rate_set
= wl1271_tx_enabled_rates_get(wl
, rates
,
3555 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
,
3556 wlvif
->basic_rate_set
);
3558 ret
= wl1271_init_ap_rates(wl
, wlvif
);
3560 wl1271_error("AP rate policy change failed %d", ret
);
3564 ret
= wl1271_ap_init_templates(wl
, vif
);
3569 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
, changed
);
3573 if ((changed
& BSS_CHANGED_BEACON_ENABLED
)) {
3574 if (bss_conf
->enable_beacon
) {
3575 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3576 ret
= wl12xx_cmd_role_start_ap(wl
, wlvif
);
3580 ret
= wl1271_ap_init_hwenc(wl
, wlvif
);
3584 set_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3585 wl1271_debug(DEBUG_AP
, "started AP");
3588 if (test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3589 ret
= wl12xx_cmd_role_stop_ap(wl
, wlvif
);
3593 clear_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3594 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
,
3596 wl1271_debug(DEBUG_AP
, "stopped AP");
3601 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
3605 /* Handle HT information change */
3606 if ((changed
& BSS_CHANGED_HT
) &&
3607 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3608 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
3609 bss_conf
->ht_operation_mode
);
3611 wl1271_warning("Set ht information failed %d", ret
);
3620 /* STA/IBSS mode changes */
3621 static void wl1271_bss_info_changed_sta(struct wl1271
*wl
,
3622 struct ieee80211_vif
*vif
,
3623 struct ieee80211_bss_conf
*bss_conf
,
3626 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3627 bool do_join
= false, set_assoc
= false;
3628 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
3629 bool ibss_joined
= false;
3630 u32 sta_rate_set
= 0;
3632 struct ieee80211_sta
*sta
;
3633 bool sta_exists
= false;
3634 struct ieee80211_sta_ht_cap sta_ht_cap
;
3637 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
,
3643 if (changed
& BSS_CHANGED_IBSS
) {
3644 if (bss_conf
->ibss_joined
) {
3645 set_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
);
3648 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED
,
3650 wl1271_unjoin(wl
, wlvif
);
3654 if ((changed
& BSS_CHANGED_BEACON_INT
) && ibss_joined
)
3657 /* Need to update the SSID (for filtering etc) */
3658 if ((changed
& BSS_CHANGED_BEACON
) && ibss_joined
)
3661 if ((changed
& BSS_CHANGED_BEACON_ENABLED
) && ibss_joined
) {
3662 wl1271_debug(DEBUG_ADHOC
, "ad-hoc beaconing: %s",
3663 bss_conf
->enable_beacon
? "enabled" : "disabled");
3668 if (changed
& BSS_CHANGED_IDLE
&& !is_ibss
) {
3669 ret
= wl1271_sta_handle_idle(wl
, wlvif
, bss_conf
->idle
);
3671 wl1271_warning("idle mode change failed %d", ret
);
3674 if ((changed
& BSS_CHANGED_CQM
)) {
3675 bool enable
= false;
3676 if (bss_conf
->cqm_rssi_thold
)
3678 ret
= wl1271_acx_rssi_snr_trigger(wl
, wlvif
, enable
,
3679 bss_conf
->cqm_rssi_thold
,
3680 bss_conf
->cqm_rssi_hyst
);
3683 wlvif
->rssi_thold
= bss_conf
->cqm_rssi_thold
;
3686 if (changed
& BSS_CHANGED_BSSID
)
3687 if (!is_zero_ether_addr(bss_conf
->bssid
)) {
3688 ret
= wl12xx_cmd_build_null_data(wl
, wlvif
);
3692 ret
= wl1271_build_qos_null_data(wl
, vif
);
3697 if (changed
& (BSS_CHANGED_ASSOC
| BSS_CHANGED_HT
)) {
3699 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
3703 /* save the supp_rates of the ap */
3704 sta_rate_set
= sta
->supp_rates
[wl
->hw
->conf
.channel
->band
];
3705 if (sta
->ht_cap
.ht_supported
)
3707 (sta
->ht_cap
.mcs
.rx_mask
[0] << HW_HT_RATES_OFFSET
) |
3708 (sta
->ht_cap
.mcs
.rx_mask
[1] << HW_MIMO_RATES_OFFSET
);
3709 sta_ht_cap
= sta
->ht_cap
;
3716 if ((changed
& BSS_CHANGED_ASSOC
)) {
3717 if (bss_conf
->assoc
) {
3720 wlvif
->aid
= bss_conf
->aid
;
3721 wlvif
->channel_type
= bss_conf
->channel_type
;
3722 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3726 /* Cancel connection_loss_work */
3727 cancel_delayed_work_sync(&wl
->connection_loss_work
);
3730 * use basic rates from AP, and determine lowest rate
3731 * to use with control frames.
3733 rates
= bss_conf
->basic_rates
;
3734 wlvif
->basic_rate_set
=
3735 wl1271_tx_enabled_rates_get(wl
, rates
,
3738 wl1271_tx_min_rate_get(wl
,
3739 wlvif
->basic_rate_set
);
3742 wl1271_tx_enabled_rates_get(wl
,
3745 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3750 * with wl1271, we don't need to update the
3751 * beacon_int and dtim_period, because the firmware
3752 * updates it by itself when the first beacon is
3753 * received after a join.
3755 ret
= wl1271_cmd_build_ps_poll(wl
, wlvif
, wlvif
->aid
);
3760 * Get a template for hardware connection maintenance
3762 dev_kfree_skb(wlvif
->probereq
);
3763 wlvif
->probereq
= wl1271_cmd_build_ap_probe_req(wl
,
3766 ieoffset
= offsetof(struct ieee80211_mgmt
,
3767 u
.probe_req
.variable
);
3768 wl1271_ssid_set(vif
, wlvif
->probereq
, ieoffset
);
3770 /* enable the connection monitoring feature */
3771 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, true);
3775 /* use defaults when not associated */
3777 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED
,
3780 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT
,
3784 /* free probe-request template */
3785 dev_kfree_skb(wlvif
->probereq
);
3786 wlvif
->probereq
= NULL
;
3788 /* revert back to minimum rates for the current band */
3789 wl1271_set_band_rate(wl
, wlvif
);
3791 wl1271_tx_min_rate_get(wl
,
3792 wlvif
->basic_rate_set
);
3793 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3797 /* disable connection monitor features */
3798 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, false);
3800 /* Disable the keep-alive feature */
3801 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, false);
3805 /* restore the bssid filter and go to dummy bssid */
3808 * we might have to disable roc, if there was
3809 * no IF_OPER_UP notification.
3812 ret
= wl12xx_croc(wl
, wlvif
->role_id
);
3817 * (we also need to disable roc in case of
3818 * roaming on the same channel. until we will
3819 * have a better flow...)
3821 if (test_bit(wlvif
->dev_role_id
, wl
->roc_map
)) {
3822 ret
= wl12xx_croc(wl
,
3823 wlvif
->dev_role_id
);
3828 wl1271_unjoin(wl
, wlvif
);
3829 if (!bss_conf
->idle
)
3830 wl12xx_start_dev(wl
, wlvif
);
3835 if (changed
& BSS_CHANGED_IBSS
) {
3836 wl1271_debug(DEBUG_ADHOC
, "ibss_joined: %d",
3837 bss_conf
->ibss_joined
);
3839 if (bss_conf
->ibss_joined
) {
3840 u32 rates
= bss_conf
->basic_rates
;
3841 wlvif
->basic_rate_set
=
3842 wl1271_tx_enabled_rates_get(wl
, rates
,
3845 wl1271_tx_min_rate_get(wl
,
3846 wlvif
->basic_rate_set
);
3848 /* by default, use 11b + OFDM rates */
3849 wlvif
->rate_set
= CONF_TX_IBSS_DEFAULT_RATES
;
3850 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3856 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
3861 ret
= wl1271_join(wl
, wlvif
, set_assoc
);
3863 wl1271_warning("cmd join failed %d", ret
);
3867 /* ROC until connected (after EAPOL exchange) */
3869 ret
= wl12xx_roc(wl
, wlvif
, wlvif
->role_id
);
3873 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
))
3874 wl12xx_set_authorized(wl
, wlvif
);
3877 * stop device role if started (we might already be in
3880 if (wl12xx_dev_role_started(wlvif
)) {
3881 ret
= wl12xx_stop_dev(wl
, wlvif
);
3887 /* Handle new association with HT. Do this after join. */
3889 if ((changed
& BSS_CHANGED_HT
) &&
3890 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3891 ret
= wl1271_acx_set_ht_capabilities(wl
,
3896 wl1271_warning("Set ht cap true failed %d",
3901 /* handle new association without HT and disassociation */
3902 else if (changed
& BSS_CHANGED_ASSOC
) {
3903 ret
= wl1271_acx_set_ht_capabilities(wl
,
3908 wl1271_warning("Set ht cap false failed %d",
3915 /* Handle HT information change. Done after join. */
3916 if ((changed
& BSS_CHANGED_HT
) &&
3917 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3918 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
3919 bss_conf
->ht_operation_mode
);
3921 wl1271_warning("Set ht information failed %d", ret
);
3926 /* Handle arp filtering. Done after join. */
3927 if ((changed
& BSS_CHANGED_ARP_FILTER
) ||
3928 (!is_ibss
&& (changed
& BSS_CHANGED_QOS
))) {
3929 __be32 addr
= bss_conf
->arp_addr_list
[0];
3930 wlvif
->sta
.qos
= bss_conf
->qos
;
3931 WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
);
3933 if (bss_conf
->arp_addr_cnt
== 1 &&
3934 bss_conf
->arp_filter_enabled
) {
3935 wlvif
->ip_addr
= addr
;
3937 * The template should have been configured only upon
3938 * association. however, it seems that the correct ip
3939 * isn't being set (when sending), so we have to
3940 * reconfigure the template upon every ip change.
3942 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3944 wl1271_warning("build arp rsp failed: %d", ret
);
3948 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
,
3949 (ACX_ARP_FILTER_ARP_FILTERING
|
3950 ACX_ARP_FILTER_AUTO_ARP
),
3954 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
, 0, addr
);
3965 static void wl1271_op_bss_info_changed(struct ieee80211_hw
*hw
,
3966 struct ieee80211_vif
*vif
,
3967 struct ieee80211_bss_conf
*bss_conf
,
3970 struct wl1271
*wl
= hw
->priv
;
3971 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3972 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3975 wl1271_debug(DEBUG_MAC80211
, "mac80211 bss info changed 0x%x",
3978 mutex_lock(&wl
->mutex
);
3980 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
3983 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
3986 ret
= wl1271_ps_elp_wakeup(wl
);
3991 wl1271_bss_info_changed_ap(wl
, vif
, bss_conf
, changed
);
3993 wl1271_bss_info_changed_sta(wl
, vif
, bss_conf
, changed
);
3995 wl1271_ps_elp_sleep(wl
);
3998 mutex_unlock(&wl
->mutex
);
4001 static int wl1271_op_conf_tx(struct ieee80211_hw
*hw
,
4002 struct ieee80211_vif
*vif
, u16 queue
,
4003 const struct ieee80211_tx_queue_params
*params
)
4005 struct wl1271
*wl
= hw
->priv
;
4006 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4010 mutex_lock(&wl
->mutex
);
4012 wl1271_debug(DEBUG_MAC80211
, "mac80211 conf tx %d", queue
);
4015 ps_scheme
= CONF_PS_SCHEME_UPSD_TRIGGER
;
4017 ps_scheme
= CONF_PS_SCHEME_LEGACY
;
4019 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
4022 ret
= wl1271_ps_elp_wakeup(wl
);
4027 * the txop is confed in units of 32us by the mac80211,
4030 ret
= wl1271_acx_ac_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4031 params
->cw_min
, params
->cw_max
,
4032 params
->aifs
, params
->txop
<< 5);
4036 ret
= wl1271_acx_tid_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4037 CONF_CHANNEL_TYPE_EDCF
,
4038 wl1271_tx_get_queue(queue
),
4039 ps_scheme
, CONF_ACK_POLICY_LEGACY
,
4043 wl1271_ps_elp_sleep(wl
);
4046 mutex_unlock(&wl
->mutex
);
4051 static u64
wl1271_op_get_tsf(struct ieee80211_hw
*hw
,
4052 struct ieee80211_vif
*vif
)
4055 struct wl1271
*wl
= hw
->priv
;
4056 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4057 u64 mactime
= ULLONG_MAX
;
4060 wl1271_debug(DEBUG_MAC80211
, "mac80211 get tsf");
4062 mutex_lock(&wl
->mutex
);
4064 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4067 ret
= wl1271_ps_elp_wakeup(wl
);
4071 ret
= wl12xx_acx_tsf_info(wl
, wlvif
, &mactime
);
4076 wl1271_ps_elp_sleep(wl
);
4079 mutex_unlock(&wl
->mutex
);
4083 static int wl1271_op_get_survey(struct ieee80211_hw
*hw
, int idx
,
4084 struct survey_info
*survey
)
4086 struct wl1271
*wl
= hw
->priv
;
4087 struct ieee80211_conf
*conf
= &hw
->conf
;
4092 survey
->channel
= conf
->channel
;
4093 survey
->filled
= SURVEY_INFO_NOISE_DBM
;
4094 survey
->noise
= wl
->noise
;
4099 static int wl1271_allocate_sta(struct wl1271
*wl
,
4100 struct wl12xx_vif
*wlvif
,
4101 struct ieee80211_sta
*sta
)
4103 struct wl1271_station
*wl_sta
;
4107 if (wl
->active_sta_count
>= AP_MAX_STATIONS
) {
4108 wl1271_warning("could not allocate HLID - too much stations");
4112 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4113 ret
= wl12xx_allocate_link(wl
, wlvif
, &wl_sta
->hlid
);
4115 wl1271_warning("could not allocate HLID - too many links");
4119 set_bit(wl_sta
->hlid
, wlvif
->ap
.sta_hlid_map
);
4120 memcpy(wl
->links
[wl_sta
->hlid
].addr
, sta
->addr
, ETH_ALEN
);
4121 wl
->active_sta_count
++;
4125 void wl1271_free_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
, u8 hlid
)
4127 if (!test_bit(hlid
, wlvif
->ap
.sta_hlid_map
))
4130 clear_bit(hlid
, wlvif
->ap
.sta_hlid_map
);
4131 memset(wl
->links
[hlid
].addr
, 0, ETH_ALEN
);
4132 wl
->links
[hlid
].ba_bitmap
= 0;
4133 __clear_bit(hlid
, &wl
->ap_ps_map
);
4134 __clear_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
4135 wl12xx_free_link(wl
, wlvif
, &hlid
);
4136 wl
->active_sta_count
--;
4139 * rearm the tx watchdog when the last STA is freed - give the FW a
4140 * chance to return STA-buffered packets before complaining.
4142 if (wl
->active_sta_count
== 0)
4143 wl12xx_rearm_tx_watchdog_locked(wl
);
4146 static int wl12xx_sta_add(struct wl1271
*wl
,
4147 struct wl12xx_vif
*wlvif
,
4148 struct ieee80211_sta
*sta
)
4150 struct wl1271_station
*wl_sta
;
4154 wl1271_debug(DEBUG_MAC80211
, "mac80211 add sta %d", (int)sta
->aid
);
4156 ret
= wl1271_allocate_sta(wl
, wlvif
, sta
);
4160 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4161 hlid
= wl_sta
->hlid
;
4163 ret
= wl12xx_cmd_add_peer(wl
, wlvif
, sta
, hlid
);
4165 wl1271_free_sta(wl
, wlvif
, hlid
);
4170 static int wl12xx_sta_remove(struct wl1271
*wl
,
4171 struct wl12xx_vif
*wlvif
,
4172 struct ieee80211_sta
*sta
)
4174 struct wl1271_station
*wl_sta
;
4177 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove sta %d", (int)sta
->aid
);
4179 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4181 if (WARN_ON(!test_bit(id
, wlvif
->ap
.sta_hlid_map
)))
4184 ret
= wl12xx_cmd_remove_peer(wl
, wl_sta
->hlid
);
4188 wl1271_free_sta(wl
, wlvif
, wl_sta
->hlid
);
4192 static int wl12xx_update_sta_state(struct wl1271
*wl
,
4193 struct wl12xx_vif
*wlvif
,
4194 struct ieee80211_sta
*sta
,
4195 enum ieee80211_sta_state old_state
,
4196 enum ieee80211_sta_state new_state
)
4198 struct wl1271_station
*wl_sta
;
4200 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
4201 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
4204 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4205 hlid
= wl_sta
->hlid
;
4207 /* Add station (AP mode) */
4209 old_state
== IEEE80211_STA_NOTEXIST
&&
4210 new_state
== IEEE80211_STA_NONE
)
4211 return wl12xx_sta_add(wl
, wlvif
, sta
);
4213 /* Remove station (AP mode) */
4215 old_state
== IEEE80211_STA_NONE
&&
4216 new_state
== IEEE80211_STA_NOTEXIST
) {
4218 wl12xx_sta_remove(wl
, wlvif
, sta
);
4222 /* Authorize station (AP mode) */
4224 new_state
== IEEE80211_STA_AUTHORIZED
) {
4225 ret
= wl12xx_cmd_set_peer_state(wl
, hlid
);
4229 ret
= wl1271_acx_set_ht_capabilities(wl
, &sta
->ht_cap
, true,
4234 /* Authorize station */
4236 new_state
== IEEE80211_STA_AUTHORIZED
) {
4237 set_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4238 return wl12xx_set_authorized(wl
, wlvif
);
4242 old_state
== IEEE80211_STA_AUTHORIZED
&&
4243 new_state
== IEEE80211_STA_ASSOC
) {
4244 clear_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4251 static int wl12xx_op_sta_state(struct ieee80211_hw
*hw
,
4252 struct ieee80211_vif
*vif
,
4253 struct ieee80211_sta
*sta
,
4254 enum ieee80211_sta_state old_state
,
4255 enum ieee80211_sta_state new_state
)
4257 struct wl1271
*wl
= hw
->priv
;
4258 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4261 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta %d state=%d->%d",
4262 sta
->aid
, old_state
, new_state
);
4264 mutex_lock(&wl
->mutex
);
4266 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4271 ret
= wl1271_ps_elp_wakeup(wl
);
4275 ret
= wl12xx_update_sta_state(wl
, wlvif
, sta
, old_state
, new_state
);
4277 wl1271_ps_elp_sleep(wl
);
4279 mutex_unlock(&wl
->mutex
);
4280 if (new_state
< old_state
)
4285 static int wl1271_op_ampdu_action(struct ieee80211_hw
*hw
,
4286 struct ieee80211_vif
*vif
,
4287 enum ieee80211_ampdu_mlme_action action
,
4288 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
,
4291 struct wl1271
*wl
= hw
->priv
;
4292 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4294 u8 hlid
, *ba_bitmap
;
4296 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu action %d tid %d", action
,
4299 /* sanity check - the fields in FW are only 8bits wide */
4300 if (WARN_ON(tid
> 0xFF))
4303 mutex_lock(&wl
->mutex
);
4305 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4310 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
) {
4311 hlid
= wlvif
->sta
.hlid
;
4312 ba_bitmap
= &wlvif
->sta
.ba_rx_bitmap
;
4313 } else if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
) {
4314 struct wl1271_station
*wl_sta
;
4316 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4317 hlid
= wl_sta
->hlid
;
4318 ba_bitmap
= &wl
->links
[hlid
].ba_bitmap
;
4324 ret
= wl1271_ps_elp_wakeup(wl
);
4328 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu: Rx tid %d action %d",
4332 case IEEE80211_AMPDU_RX_START
:
4333 if (!wlvif
->ba_support
|| !wlvif
->ba_allowed
) {
4338 if (wl
->ba_rx_session_count
>= RX_BA_MAX_SESSIONS
) {
4340 wl1271_error("exceeded max RX BA sessions");
4344 if (*ba_bitmap
& BIT(tid
)) {
4346 wl1271_error("cannot enable RX BA session on active "
4351 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, *ssn
, true,
4354 *ba_bitmap
|= BIT(tid
);
4355 wl
->ba_rx_session_count
++;
4359 case IEEE80211_AMPDU_RX_STOP
:
4360 if (!(*ba_bitmap
& BIT(tid
))) {
4362 wl1271_error("no active RX BA session on tid: %d",
4367 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, 0, false,
4370 *ba_bitmap
&= ~BIT(tid
);
4371 wl
->ba_rx_session_count
--;
4376 * The BA initiator session management in FW independently.
4377 * Falling break here on purpose for all TX APDU commands.
4379 case IEEE80211_AMPDU_TX_START
:
4380 case IEEE80211_AMPDU_TX_STOP
:
4381 case IEEE80211_AMPDU_TX_OPERATIONAL
:
4386 wl1271_error("Incorrect ampdu action id=%x\n", action
);
4390 wl1271_ps_elp_sleep(wl
);
4393 mutex_unlock(&wl
->mutex
);
4398 static int wl12xx_set_bitrate_mask(struct ieee80211_hw
*hw
,
4399 struct ieee80211_vif
*vif
,
4400 const struct cfg80211_bitrate_mask
*mask
)
4402 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4403 struct wl1271
*wl
= hw
->priv
;
4406 wl1271_debug(DEBUG_MAC80211
, "mac80211 set_bitrate_mask 0x%x 0x%x",
4407 mask
->control
[NL80211_BAND_2GHZ
].legacy
,
4408 mask
->control
[NL80211_BAND_5GHZ
].legacy
);
4410 mutex_lock(&wl
->mutex
);
4412 for (i
= 0; i
< IEEE80211_NUM_BANDS
; i
++)
4413 wlvif
->bitrate_masks
[i
] =
4414 wl1271_tx_enabled_rates_get(wl
,
4415 mask
->control
[i
].legacy
,
4418 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4421 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
4422 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
4424 ret
= wl1271_ps_elp_wakeup(wl
);
4428 wl1271_set_band_rate(wl
, wlvif
);
4430 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4431 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4433 wl1271_ps_elp_sleep(wl
);
4436 mutex_unlock(&wl
->mutex
);
4441 static void wl12xx_op_channel_switch(struct ieee80211_hw
*hw
,
4442 struct ieee80211_channel_switch
*ch_switch
)
4444 struct wl1271
*wl
= hw
->priv
;
4445 struct wl12xx_vif
*wlvif
;
4448 wl1271_debug(DEBUG_MAC80211
, "mac80211 channel switch");
4450 wl1271_tx_flush(wl
);
4452 mutex_lock(&wl
->mutex
);
4454 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4455 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4456 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
4457 ieee80211_chswitch_done(vif
, false);
4462 ret
= wl1271_ps_elp_wakeup(wl
);
4466 /* TODO: change mac80211 to pass vif as param */
4467 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4468 ret
= wl12xx_cmd_channel_switch(wl
, wlvif
, ch_switch
);
4471 set_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
);
4474 wl1271_ps_elp_sleep(wl
);
4477 mutex_unlock(&wl
->mutex
);
4480 static bool wl1271_tx_frames_pending(struct ieee80211_hw
*hw
)
4482 struct wl1271
*wl
= hw
->priv
;
4485 mutex_lock(&wl
->mutex
);
4487 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4490 /* packets are considered pending if in the TX queue or the FW */
4491 ret
= (wl1271_tx_total_queue_count(wl
) > 0) || (wl
->tx_frames_cnt
> 0);
4493 mutex_unlock(&wl
->mutex
);
4498 /* can't be const, mac80211 writes to this */
4499 static struct ieee80211_rate wl1271_rates
[] = {
4501 .hw_value
= CONF_HW_BIT_RATE_1MBPS
,
4502 .hw_value_short
= CONF_HW_BIT_RATE_1MBPS
, },
4504 .hw_value
= CONF_HW_BIT_RATE_2MBPS
,
4505 .hw_value_short
= CONF_HW_BIT_RATE_2MBPS
,
4506 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4508 .hw_value
= CONF_HW_BIT_RATE_5_5MBPS
,
4509 .hw_value_short
= CONF_HW_BIT_RATE_5_5MBPS
,
4510 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4512 .hw_value
= CONF_HW_BIT_RATE_11MBPS
,
4513 .hw_value_short
= CONF_HW_BIT_RATE_11MBPS
,
4514 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4516 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
4517 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
4519 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
4520 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
4522 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
4523 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
4525 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
4526 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
4528 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
4529 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
4531 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
4532 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
4534 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
4535 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
4537 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
4538 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
4541 /* can't be const, mac80211 writes to this */
4542 static struct ieee80211_channel wl1271_channels
[] = {
4543 { .hw_value
= 1, .center_freq
= 2412, .max_power
= 25 },
4544 { .hw_value
= 2, .center_freq
= 2417, .max_power
= 25 },
4545 { .hw_value
= 3, .center_freq
= 2422, .max_power
= 25 },
4546 { .hw_value
= 4, .center_freq
= 2427, .max_power
= 25 },
4547 { .hw_value
= 5, .center_freq
= 2432, .max_power
= 25 },
4548 { .hw_value
= 6, .center_freq
= 2437, .max_power
= 25 },
4549 { .hw_value
= 7, .center_freq
= 2442, .max_power
= 25 },
4550 { .hw_value
= 8, .center_freq
= 2447, .max_power
= 25 },
4551 { .hw_value
= 9, .center_freq
= 2452, .max_power
= 25 },
4552 { .hw_value
= 10, .center_freq
= 2457, .max_power
= 25 },
4553 { .hw_value
= 11, .center_freq
= 2462, .max_power
= 25 },
4554 { .hw_value
= 12, .center_freq
= 2467, .max_power
= 25 },
4555 { .hw_value
= 13, .center_freq
= 2472, .max_power
= 25 },
4556 { .hw_value
= 14, .center_freq
= 2484, .max_power
= 25 },
4559 /* can't be const, mac80211 writes to this */
4560 static struct ieee80211_supported_band wl1271_band_2ghz
= {
4561 .channels
= wl1271_channels
,
4562 .n_channels
= ARRAY_SIZE(wl1271_channels
),
4563 .bitrates
= wl1271_rates
,
4564 .n_bitrates
= ARRAY_SIZE(wl1271_rates
),
4567 /* 5 GHz data rates for WL1273 */
4568 static struct ieee80211_rate wl1271_rates_5ghz
[] = {
4570 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
4571 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
4573 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
4574 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
4576 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
4577 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
4579 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
4580 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
4582 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
4583 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
4585 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
4586 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
4588 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
4589 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
4591 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
4592 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
4595 /* 5 GHz band channels for WL1273 */
4596 static struct ieee80211_channel wl1271_channels_5ghz
[] = {
4597 { .hw_value
= 7, .center_freq
= 5035, .max_power
= 25 },
4598 { .hw_value
= 8, .center_freq
= 5040, .max_power
= 25 },
4599 { .hw_value
= 9, .center_freq
= 5045, .max_power
= 25 },
4600 { .hw_value
= 11, .center_freq
= 5055, .max_power
= 25 },
4601 { .hw_value
= 12, .center_freq
= 5060, .max_power
= 25 },
4602 { .hw_value
= 16, .center_freq
= 5080, .max_power
= 25 },
4603 { .hw_value
= 34, .center_freq
= 5170, .max_power
= 25 },
4604 { .hw_value
= 36, .center_freq
= 5180, .max_power
= 25 },
4605 { .hw_value
= 38, .center_freq
= 5190, .max_power
= 25 },
4606 { .hw_value
= 40, .center_freq
= 5200, .max_power
= 25 },
4607 { .hw_value
= 42, .center_freq
= 5210, .max_power
= 25 },
4608 { .hw_value
= 44, .center_freq
= 5220, .max_power
= 25 },
4609 { .hw_value
= 46, .center_freq
= 5230, .max_power
= 25 },
4610 { .hw_value
= 48, .center_freq
= 5240, .max_power
= 25 },
4611 { .hw_value
= 52, .center_freq
= 5260, .max_power
= 25 },
4612 { .hw_value
= 56, .center_freq
= 5280, .max_power
= 25 },
4613 { .hw_value
= 60, .center_freq
= 5300, .max_power
= 25 },
4614 { .hw_value
= 64, .center_freq
= 5320, .max_power
= 25 },
4615 { .hw_value
= 100, .center_freq
= 5500, .max_power
= 25 },
4616 { .hw_value
= 104, .center_freq
= 5520, .max_power
= 25 },
4617 { .hw_value
= 108, .center_freq
= 5540, .max_power
= 25 },
4618 { .hw_value
= 112, .center_freq
= 5560, .max_power
= 25 },
4619 { .hw_value
= 116, .center_freq
= 5580, .max_power
= 25 },
4620 { .hw_value
= 120, .center_freq
= 5600, .max_power
= 25 },
4621 { .hw_value
= 124, .center_freq
= 5620, .max_power
= 25 },
4622 { .hw_value
= 128, .center_freq
= 5640, .max_power
= 25 },
4623 { .hw_value
= 132, .center_freq
= 5660, .max_power
= 25 },
4624 { .hw_value
= 136, .center_freq
= 5680, .max_power
= 25 },
4625 { .hw_value
= 140, .center_freq
= 5700, .max_power
= 25 },
4626 { .hw_value
= 149, .center_freq
= 5745, .max_power
= 25 },
4627 { .hw_value
= 153, .center_freq
= 5765, .max_power
= 25 },
4628 { .hw_value
= 157, .center_freq
= 5785, .max_power
= 25 },
4629 { .hw_value
= 161, .center_freq
= 5805, .max_power
= 25 },
4630 { .hw_value
= 165, .center_freq
= 5825, .max_power
= 25 },
4633 static struct ieee80211_supported_band wl1271_band_5ghz
= {
4634 .channels
= wl1271_channels_5ghz
,
4635 .n_channels
= ARRAY_SIZE(wl1271_channels_5ghz
),
4636 .bitrates
= wl1271_rates_5ghz
,
4637 .n_bitrates
= ARRAY_SIZE(wl1271_rates_5ghz
),
4640 static const struct ieee80211_ops wl1271_ops
= {
4641 .start
= wl1271_op_start
,
4642 .stop
= wl1271_op_stop
,
4643 .add_interface
= wl1271_op_add_interface
,
4644 .remove_interface
= wl1271_op_remove_interface
,
4645 .change_interface
= wl12xx_op_change_interface
,
4647 .suspend
= wl1271_op_suspend
,
4648 .resume
= wl1271_op_resume
,
4650 .config
= wl1271_op_config
,
4651 .prepare_multicast
= wl1271_op_prepare_multicast
,
4652 .configure_filter
= wl1271_op_configure_filter
,
4654 .set_key
= wl1271_op_set_key
,
4655 .hw_scan
= wl1271_op_hw_scan
,
4656 .cancel_hw_scan
= wl1271_op_cancel_hw_scan
,
4657 .sched_scan_start
= wl1271_op_sched_scan_start
,
4658 .sched_scan_stop
= wl1271_op_sched_scan_stop
,
4659 .bss_info_changed
= wl1271_op_bss_info_changed
,
4660 .set_frag_threshold
= wl1271_op_set_frag_threshold
,
4661 .set_rts_threshold
= wl1271_op_set_rts_threshold
,
4662 .conf_tx
= wl1271_op_conf_tx
,
4663 .get_tsf
= wl1271_op_get_tsf
,
4664 .get_survey
= wl1271_op_get_survey
,
4665 .sta_state
= wl12xx_op_sta_state
,
4666 .ampdu_action
= wl1271_op_ampdu_action
,
4667 .tx_frames_pending
= wl1271_tx_frames_pending
,
4668 .set_bitrate_mask
= wl12xx_set_bitrate_mask
,
4669 .channel_switch
= wl12xx_op_channel_switch
,
4670 CFG80211_TESTMODE_CMD(wl1271_tm_cmd
)
4674 u8
wlcore_rate_to_idx(struct wl1271
*wl
, u8 rate
, enum ieee80211_band band
)
4680 if (unlikely(rate
>= wl
->hw_tx_rate_tbl_size
)) {
4681 wl1271_error("Illegal RX rate from HW: %d", rate
);
4685 idx
= wl
->band_rate_to_idx
[band
][rate
];
4686 if (unlikely(idx
== CONF_HW_RXTX_RATE_UNSUPPORTED
)) {
4687 wl1271_error("Unsupported RX rate from HW: %d", rate
);
4694 static ssize_t
wl1271_sysfs_show_bt_coex_state(struct device
*dev
,
4695 struct device_attribute
*attr
,
4698 struct wl1271
*wl
= dev_get_drvdata(dev
);
4703 mutex_lock(&wl
->mutex
);
4704 len
= snprintf(buf
, len
, "%d\n\n0 - off\n1 - on\n",
4706 mutex_unlock(&wl
->mutex
);
4712 static ssize_t
wl1271_sysfs_store_bt_coex_state(struct device
*dev
,
4713 struct device_attribute
*attr
,
4714 const char *buf
, size_t count
)
4716 struct wl1271
*wl
= dev_get_drvdata(dev
);
4720 ret
= kstrtoul(buf
, 10, &res
);
4722 wl1271_warning("incorrect value written to bt_coex_mode");
4726 mutex_lock(&wl
->mutex
);
4730 if (res
== wl
->sg_enabled
)
4733 wl
->sg_enabled
= res
;
4735 if (wl
->state
== WL1271_STATE_OFF
)
4738 ret
= wl1271_ps_elp_wakeup(wl
);
4742 wl1271_acx_sg_enable(wl
, wl
->sg_enabled
);
4743 wl1271_ps_elp_sleep(wl
);
4746 mutex_unlock(&wl
->mutex
);
4750 static DEVICE_ATTR(bt_coex_state
, S_IRUGO
| S_IWUSR
,
4751 wl1271_sysfs_show_bt_coex_state
,
4752 wl1271_sysfs_store_bt_coex_state
);
4754 static ssize_t
wl1271_sysfs_show_hw_pg_ver(struct device
*dev
,
4755 struct device_attribute
*attr
,
4758 struct wl1271
*wl
= dev_get_drvdata(dev
);
4763 mutex_lock(&wl
->mutex
);
4764 if (wl
->hw_pg_ver
>= 0)
4765 len
= snprintf(buf
, len
, "%d\n", wl
->hw_pg_ver
);
4767 len
= snprintf(buf
, len
, "n/a\n");
4768 mutex_unlock(&wl
->mutex
);
4773 static DEVICE_ATTR(hw_pg_ver
, S_IRUGO
,
4774 wl1271_sysfs_show_hw_pg_ver
, NULL
);
4776 static ssize_t
wl1271_sysfs_read_fwlog(struct file
*filp
, struct kobject
*kobj
,
4777 struct bin_attribute
*bin_attr
,
4778 char *buffer
, loff_t pos
, size_t count
)
4780 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
4781 struct wl1271
*wl
= dev_get_drvdata(dev
);
4785 ret
= mutex_lock_interruptible(&wl
->mutex
);
4787 return -ERESTARTSYS
;
4789 /* Let only one thread read the log at a time, blocking others */
4790 while (wl
->fwlog_size
== 0) {
4793 prepare_to_wait_exclusive(&wl
->fwlog_waitq
,
4795 TASK_INTERRUPTIBLE
);
4797 if (wl
->fwlog_size
!= 0) {
4798 finish_wait(&wl
->fwlog_waitq
, &wait
);
4802 mutex_unlock(&wl
->mutex
);
4805 finish_wait(&wl
->fwlog_waitq
, &wait
);
4807 if (signal_pending(current
))
4808 return -ERESTARTSYS
;
4810 ret
= mutex_lock_interruptible(&wl
->mutex
);
4812 return -ERESTARTSYS
;
4815 /* Check if the fwlog is still valid */
4816 if (wl
->fwlog_size
< 0) {
4817 mutex_unlock(&wl
->mutex
);
4821 /* Seeking is not supported - old logs are not kept. Disregard pos. */
4822 len
= min(count
, (size_t)wl
->fwlog_size
);
4823 wl
->fwlog_size
-= len
;
4824 memcpy(buffer
, wl
->fwlog
, len
);
4826 /* Make room for new messages */
4827 memmove(wl
->fwlog
, wl
->fwlog
+ len
, wl
->fwlog_size
);
4829 mutex_unlock(&wl
->mutex
);
4834 static struct bin_attribute fwlog_attr
= {
4835 .attr
= {.name
= "fwlog", .mode
= S_IRUSR
},
4836 .read
= wl1271_sysfs_read_fwlog
,
4839 static void wl1271_connection_loss_work(struct work_struct
*work
)
4841 struct delayed_work
*dwork
;
4843 struct ieee80211_vif
*vif
;
4844 struct wl12xx_vif
*wlvif
;
4846 dwork
= container_of(work
, struct delayed_work
, work
);
4847 wl
= container_of(dwork
, struct wl1271
, connection_loss_work
);
4849 wl1271_info("Connection loss work.");
4851 mutex_lock(&wl
->mutex
);
4853 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4856 /* Call mac80211 connection loss */
4857 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4858 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
4860 vif
= wl12xx_wlvif_to_vif(wlvif
);
4861 ieee80211_connection_loss(vif
);
4864 mutex_unlock(&wl
->mutex
);
4867 static void wl12xx_derive_mac_addresses(struct wl1271
*wl
,
4868 u32 oui
, u32 nic
, int n
)
4872 wl1271_debug(DEBUG_PROBE
, "base address: oui %06x nic %06x, n %d",
4875 if (nic
+ n
- 1 > 0xffffff)
4876 wl1271_warning("NIC part of the MAC address wraps around!");
4878 for (i
= 0; i
< n
; i
++) {
4879 wl
->addresses
[i
].addr
[0] = (u8
)(oui
>> 16);
4880 wl
->addresses
[i
].addr
[1] = (u8
)(oui
>> 8);
4881 wl
->addresses
[i
].addr
[2] = (u8
) oui
;
4882 wl
->addresses
[i
].addr
[3] = (u8
)(nic
>> 16);
4883 wl
->addresses
[i
].addr
[4] = (u8
)(nic
>> 8);
4884 wl
->addresses
[i
].addr
[5] = (u8
) nic
;
4888 wl
->hw
->wiphy
->n_addresses
= n
;
4889 wl
->hw
->wiphy
->addresses
= wl
->addresses
;
4892 static int wl12xx_get_hw_info(struct wl1271
*wl
)
4896 ret
= wl12xx_set_power_on(wl
);
4900 wl
->chip
.id
= wlcore_read_reg(wl
, REG_CHIP_ID_B
);
4902 wl
->fuse_oui_addr
= 0;
4903 wl
->fuse_nic_addr
= 0;
4905 wl
->hw_pg_ver
= wl
->ops
->get_pg_ver(wl
);
4907 if (wl
->ops
->get_mac
)
4908 wl
->ops
->get_mac(wl
);
4910 wl1271_power_off(wl
);
4915 static int wl1271_register_hw(struct wl1271
*wl
)
4918 u32 oui_addr
= 0, nic_addr
= 0;
4920 if (wl
->mac80211_registered
)
4923 ret
= wl12xx_get_hw_info(wl
);
4925 wl1271_error("couldn't get hw info");
4929 ret
= wl1271_fetch_nvs(wl
);
4931 /* NOTE: The wl->nvs->nvs element must be first, in
4932 * order to simplify the casting, we assume it is at
4933 * the beginning of the wl->nvs structure.
4935 u8
*nvs_ptr
= (u8
*)wl
->nvs
;
4938 (nvs_ptr
[11] << 16) + (nvs_ptr
[10] << 8) + nvs_ptr
[6];
4940 (nvs_ptr
[5] << 16) + (nvs_ptr
[4] << 8) + nvs_ptr
[3];
4943 /* if the MAC address is zeroed in the NVS derive from fuse */
4944 if (oui_addr
== 0 && nic_addr
== 0) {
4945 oui_addr
= wl
->fuse_oui_addr
;
4946 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
4947 nic_addr
= wl
->fuse_nic_addr
+ 1;
4950 wl12xx_derive_mac_addresses(wl
, oui_addr
, nic_addr
, 2);
4952 ret
= ieee80211_register_hw(wl
->hw
);
4954 wl1271_error("unable to register mac80211 hw: %d", ret
);
4958 wl
->mac80211_registered
= true;
4960 wl1271_debugfs_init(wl
);
4962 wl1271_notice("loaded");
4968 static void wl1271_unregister_hw(struct wl1271
*wl
)
4971 wl1271_plt_stop(wl
);
4973 ieee80211_unregister_hw(wl
->hw
);
4974 wl
->mac80211_registered
= false;
4978 static int wl1271_init_ieee80211(struct wl1271
*wl
)
4980 static const u32 cipher_suites
[] = {
4981 WLAN_CIPHER_SUITE_WEP40
,
4982 WLAN_CIPHER_SUITE_WEP104
,
4983 WLAN_CIPHER_SUITE_TKIP
,
4984 WLAN_CIPHER_SUITE_CCMP
,
4985 WL1271_CIPHER_SUITE_GEM
,
4988 /* The tx descriptor buffer and the TKIP space. */
4989 wl
->hw
->extra_tx_headroom
= WL1271_EXTRA_SPACE_TKIP
+
4990 sizeof(struct wl1271_tx_hw_descr
);
4993 /* FIXME: find a proper value */
4994 wl
->hw
->channel_change_time
= 10000;
4995 wl
->hw
->max_listen_interval
= wl
->conf
.conn
.max_listen_interval
;
4997 wl
->hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
4998 IEEE80211_HW_SUPPORTS_PS
|
4999 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
|
5000 IEEE80211_HW_SUPPORTS_UAPSD
|
5001 IEEE80211_HW_HAS_RATE_CONTROL
|
5002 IEEE80211_HW_CONNECTION_MONITOR
|
5003 IEEE80211_HW_REPORTS_TX_ACK_STATUS
|
5004 IEEE80211_HW_SPECTRUM_MGMT
|
5005 IEEE80211_HW_AP_LINK_PS
|
5006 IEEE80211_HW_AMPDU_AGGREGATION
|
5007 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW
|
5008 IEEE80211_HW_SCAN_WHILE_IDLE
;
5010 wl
->hw
->wiphy
->cipher_suites
= cipher_suites
;
5011 wl
->hw
->wiphy
->n_cipher_suites
= ARRAY_SIZE(cipher_suites
);
5013 wl
->hw
->wiphy
->interface_modes
= BIT(NL80211_IFTYPE_STATION
) |
5014 BIT(NL80211_IFTYPE_ADHOC
) | BIT(NL80211_IFTYPE_AP
) |
5015 BIT(NL80211_IFTYPE_P2P_CLIENT
) | BIT(NL80211_IFTYPE_P2P_GO
);
5016 wl
->hw
->wiphy
->max_scan_ssids
= 1;
5017 wl
->hw
->wiphy
->max_sched_scan_ssids
= 16;
5018 wl
->hw
->wiphy
->max_match_sets
= 16;
5020 * Maximum length of elements in scanning probe request templates
5021 * should be the maximum length possible for a template, without
5022 * the IEEE80211 header of the template
5024 wl
->hw
->wiphy
->max_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5025 sizeof(struct ieee80211_header
);
5027 wl
->hw
->wiphy
->max_sched_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5028 sizeof(struct ieee80211_header
);
5030 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_UAPSD
|
5031 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL
;
5033 /* make sure all our channels fit in the scanned_ch bitmask */
5034 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels
) +
5035 ARRAY_SIZE(wl1271_channels_5ghz
) >
5036 WL1271_MAX_CHANNELS
);
5038 * We keep local copies of the band structs because we need to
5039 * modify them on a per-device basis.
5041 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
], &wl1271_band_2ghz
,
5042 sizeof(wl1271_band_2ghz
));
5043 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
].ht_cap
, &wl
->ht_cap
,
5044 sizeof(wl
->ht_cap
));
5045 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
], &wl1271_band_5ghz
,
5046 sizeof(wl1271_band_5ghz
));
5047 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
].ht_cap
, &wl
->ht_cap
,
5048 sizeof(wl
->ht_cap
));
5050 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
5051 &wl
->bands
[IEEE80211_BAND_2GHZ
];
5052 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
5053 &wl
->bands
[IEEE80211_BAND_5GHZ
];
5056 wl
->hw
->max_rates
= 1;
5058 wl
->hw
->wiphy
->reg_notifier
= wl1271_reg_notify
;
5060 /* the FW answers probe-requests in AP-mode */
5061 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD
;
5062 wl
->hw
->wiphy
->probe_resp_offload
=
5063 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS
|
5064 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2
|
5065 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P
;
5067 SET_IEEE80211_DEV(wl
->hw
, wl
->dev
);
5069 wl
->hw
->sta_data_size
= sizeof(struct wl1271_station
);
5070 wl
->hw
->vif_data_size
= sizeof(struct wl12xx_vif
);
5072 wl
->hw
->max_rx_aggregation_subframes
= wl
->conf
.ht
.rx_ba_win_size
;
5077 #define WL1271_DEFAULT_CHANNEL 0
5079 struct ieee80211_hw
*wlcore_alloc_hw(size_t priv_size
)
5081 struct ieee80211_hw
*hw
;
5086 BUILD_BUG_ON(AP_MAX_STATIONS
> WL12XX_MAX_LINKS
);
5088 hw
= ieee80211_alloc_hw(sizeof(*wl
), &wl1271_ops
);
5090 wl1271_error("could not alloc ieee80211_hw");
5096 memset(wl
, 0, sizeof(*wl
));
5098 wl
->priv
= kzalloc(priv_size
, GFP_KERNEL
);
5100 wl1271_error("could not alloc wl priv");
5102 goto err_priv_alloc
;
5105 INIT_LIST_HEAD(&wl
->wlvif_list
);
5109 for (i
= 0; i
< NUM_TX_QUEUES
; i
++)
5110 for (j
= 0; j
< WL12XX_MAX_LINKS
; j
++)
5111 skb_queue_head_init(&wl
->links
[j
].tx_queue
[i
]);
5113 skb_queue_head_init(&wl
->deferred_rx_queue
);
5114 skb_queue_head_init(&wl
->deferred_tx_queue
);
5116 INIT_DELAYED_WORK(&wl
->elp_work
, wl1271_elp_work
);
5117 INIT_WORK(&wl
->netstack_work
, wl1271_netstack_work
);
5118 INIT_WORK(&wl
->tx_work
, wl1271_tx_work
);
5119 INIT_WORK(&wl
->recovery_work
, wl1271_recovery_work
);
5120 INIT_DELAYED_WORK(&wl
->scan_complete_work
, wl1271_scan_complete_work
);
5121 INIT_DELAYED_WORK(&wl
->tx_watchdog_work
, wl12xx_tx_watchdog_work
);
5122 INIT_DELAYED_WORK(&wl
->connection_loss_work
,
5123 wl1271_connection_loss_work
);
5125 wl
->freezable_wq
= create_freezable_workqueue("wl12xx_wq");
5126 if (!wl
->freezable_wq
) {
5131 wl
->channel
= WL1271_DEFAULT_CHANNEL
;
5133 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
5134 wl
->band
= IEEE80211_BAND_2GHZ
;
5135 wl
->channel_type
= NL80211_CHAN_NO_HT
;
5137 wl
->sg_enabled
= true;
5140 wl
->ap_fw_ps_map
= 0;
5142 wl
->platform_quirks
= 0;
5143 wl
->sched_scanning
= false;
5144 wl
->system_hlid
= WL12XX_SYSTEM_HLID
;
5145 wl
->active_sta_count
= 0;
5147 init_waitqueue_head(&wl
->fwlog_waitq
);
5149 /* The system link is always allocated */
5150 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
5152 memset(wl
->tx_frames_map
, 0, sizeof(wl
->tx_frames_map
));
5153 for (i
= 0; i
< wl
->num_tx_desc
; i
++)
5154 wl
->tx_frames
[i
] = NULL
;
5156 spin_lock_init(&wl
->wl_lock
);
5158 wl
->state
= WL1271_STATE_OFF
;
5159 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5160 mutex_init(&wl
->mutex
);
5162 order
= get_order(WL1271_AGGR_BUFFER_SIZE
);
5163 wl
->aggr_buf
= (u8
*)__get_free_pages(GFP_KERNEL
, order
);
5164 if (!wl
->aggr_buf
) {
5169 wl
->dummy_packet
= wl12xx_alloc_dummy_packet(wl
);
5170 if (!wl
->dummy_packet
) {
5175 /* Allocate one page for the FW log */
5176 wl
->fwlog
= (u8
*)get_zeroed_page(GFP_KERNEL
);
5179 goto err_dummy_packet
;
5182 wl
->mbox
= kmalloc(sizeof(*wl
->mbox
), GFP_KERNEL
| GFP_DMA
);
5191 free_page((unsigned long)wl
->fwlog
);
5194 dev_kfree_skb(wl
->dummy_packet
);
5197 free_pages((unsigned long)wl
->aggr_buf
, order
);
5200 destroy_workqueue(wl
->freezable_wq
);
5203 wl1271_debugfs_exit(wl
);
5207 ieee80211_free_hw(hw
);
5211 return ERR_PTR(ret
);
5213 EXPORT_SYMBOL_GPL(wlcore_alloc_hw
);
5215 int wlcore_free_hw(struct wl1271
*wl
)
5217 /* Unblock any fwlog readers */
5218 mutex_lock(&wl
->mutex
);
5219 wl
->fwlog_size
= -1;
5220 wake_up_interruptible_all(&wl
->fwlog_waitq
);
5221 mutex_unlock(&wl
->mutex
);
5223 device_remove_bin_file(wl
->dev
, &fwlog_attr
);
5225 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5227 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5228 free_page((unsigned long)wl
->fwlog
);
5229 dev_kfree_skb(wl
->dummy_packet
);
5230 free_pages((unsigned long)wl
->aggr_buf
,
5231 get_order(WL1271_AGGR_BUFFER_SIZE
));
5233 wl1271_debugfs_exit(wl
);
5237 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5241 kfree(wl
->fw_status
);
5242 kfree(wl
->tx_res_if
);
5243 destroy_workqueue(wl
->freezable_wq
);
5246 ieee80211_free_hw(wl
->hw
);
5250 EXPORT_SYMBOL_GPL(wlcore_free_hw
);
5252 static irqreturn_t
wl12xx_hardirq(int irq
, void *cookie
)
5254 struct wl1271
*wl
= cookie
;
5255 unsigned long flags
;
5257 wl1271_debug(DEBUG_IRQ
, "IRQ");
5259 /* complete the ELP completion */
5260 spin_lock_irqsave(&wl
->wl_lock
, flags
);
5261 set_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
5262 if (wl
->elp_compl
) {
5263 complete(wl
->elp_compl
);
5264 wl
->elp_compl
= NULL
;
5267 if (test_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
)) {
5268 /* don't enqueue a work right now. mark it as pending */
5269 set_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
);
5270 wl1271_debug(DEBUG_IRQ
, "should not enqueue work");
5271 disable_irq_nosync(wl
->irq
);
5272 pm_wakeup_event(wl
->dev
, 0);
5273 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5276 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5278 return IRQ_WAKE_THREAD
;
5281 int __devinit
wlcore_probe(struct wl1271
*wl
, struct platform_device
*pdev
)
5283 struct wl12xx_platform_data
*pdata
= pdev
->dev
.platform_data
;
5284 unsigned long irqflags
;
5287 if (!wl
->ops
|| !wl
->ptable
) {
5292 BUG_ON(wl
->num_tx_desc
> WLCORE_MAX_TX_DESCRIPTORS
);
5294 /* adjust some runtime configuration parameters */
5295 wlcore_adjust_conf(wl
);
5297 wl
->irq
= platform_get_irq(pdev
, 0);
5298 wl
->ref_clock
= pdata
->board_ref_clock
;
5299 wl
->tcxo_clock
= pdata
->board_tcxo_clock
;
5300 wl
->platform_quirks
= pdata
->platform_quirks
;
5301 wl
->set_power
= pdata
->set_power
;
5302 wl
->dev
= &pdev
->dev
;
5303 wl
->if_ops
= pdata
->ops
;
5305 platform_set_drvdata(pdev
, wl
);
5307 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
5308 irqflags
= IRQF_TRIGGER_RISING
;
5310 irqflags
= IRQF_TRIGGER_HIGH
| IRQF_ONESHOT
;
5312 ret
= request_threaded_irq(wl
->irq
, wl12xx_hardirq
, wl1271_irq
,
5316 wl1271_error("request_irq() failed: %d", ret
);
5320 ret
= enable_irq_wake(wl
->irq
);
5322 wl
->irq_wake_enabled
= true;
5323 device_init_wakeup(wl
->dev
, 1);
5324 if (pdata
->pwr_in_suspend
) {
5325 wl
->hw
->wiphy
->wowlan
.flags
= WIPHY_WOWLAN_ANY
;
5326 wl
->hw
->wiphy
->wowlan
.n_patterns
=
5327 WL1271_MAX_RX_FILTERS
;
5328 wl
->hw
->wiphy
->wowlan
.pattern_min_len
= 1;
5329 wl
->hw
->wiphy
->wowlan
.pattern_max_len
=
5330 WL1271_RX_FILTER_MAX_PATTERN_SIZE
;
5333 disable_irq(wl
->irq
);
5335 ret
= wl1271_init_ieee80211(wl
);
5339 ret
= wl1271_register_hw(wl
);
5343 /* Create sysfs file to control bt coex state */
5344 ret
= device_create_file(wl
->dev
, &dev_attr_bt_coex_state
);
5346 wl1271_error("failed to create sysfs file bt_coex_state");
5350 /* Create sysfs file to get HW PG version */
5351 ret
= device_create_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5353 wl1271_error("failed to create sysfs file hw_pg_ver");
5354 goto out_bt_coex_state
;
5357 /* Create sysfs file for the FW log */
5358 ret
= device_create_bin_file(wl
->dev
, &fwlog_attr
);
5360 wl1271_error("failed to create sysfs file fwlog");
5367 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5370 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5373 free_irq(wl
->irq
, wl
);
5381 EXPORT_SYMBOL_GPL(wlcore_probe
);
5383 int __devexit
wlcore_remove(struct platform_device
*pdev
)
5385 struct wl1271
*wl
= platform_get_drvdata(pdev
);
5387 if (wl
->irq_wake_enabled
) {
5388 device_init_wakeup(wl
->dev
, 0);
5389 disable_irq_wake(wl
->irq
);
5391 wl1271_unregister_hw(wl
);
5392 free_irq(wl
->irq
, wl
);
5397 EXPORT_SYMBOL_GPL(wlcore_remove
);
5399 u32 wl12xx_debug_level
= DEBUG_NONE
;
5400 EXPORT_SYMBOL_GPL(wl12xx_debug_level
);
5401 module_param_named(debug_level
, wl12xx_debug_level
, uint
, S_IRUSR
| S_IWUSR
);
5402 MODULE_PARM_DESC(debug_level
, "wl12xx debugging level");
5404 module_param_named(fwlog
, fwlog_param
, charp
, 0);
5405 MODULE_PARM_DESC(fwlog
,
5406 "FW logger options: continuous, ondemand, dbgpins or disable");
5408 module_param(bug_on_recovery
, bool, S_IRUSR
| S_IWUSR
);
5409 MODULE_PARM_DESC(bug_on_recovery
, "BUG() on fw recovery");
5411 module_param(no_recovery
, bool, S_IRUSR
| S_IWUSR
);
5412 MODULE_PARM_DESC(no_recovery
, "Prevent HW recovery. FW will remain stuck.");
5414 MODULE_LICENSE("GPL");
5415 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5416 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");