[PATCH] ipw2200: add module_param support for antenna selection
[linux-2.6/x86.git] / drivers / net / wireless / ipw2200.c
blob297dd76177cce53aaea1baa4cfcb0538bf83e50c
1 /******************************************************************************
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 ******************************************************************************/
33 #include "ipw2200.h"
34 #include <linux/version.h>
36 #define IPW2200_VERSION "git-1.1.1"
37 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
38 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
39 #define DRV_VERSION IPW2200_VERSION
41 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
43 MODULE_DESCRIPTION(DRV_DESCRIPTION);
44 MODULE_VERSION(DRV_VERSION);
45 MODULE_AUTHOR(DRV_COPYRIGHT);
46 MODULE_LICENSE("GPL");
48 static int cmdlog = 0;
49 static int debug = 0;
50 static int channel = 0;
51 static int mode = 0;
53 static u32 ipw_debug_level;
54 static int associate = 1;
55 static int auto_create = 1;
56 static int led = 0;
57 static int disable = 0;
58 static int bt_coexist = 0;
59 static int hwcrypto = 0;
60 static int roaming = 1;
61 static const char ipw_modes[] = {
62 'a', 'b', 'g', '?'
64 static int antenna = CFG_SYS_ANTENNA_BOTH;
66 #ifdef CONFIG_IPW_QOS
67 static int qos_enable = 0;
68 static int qos_burst_enable = 0;
69 static int qos_no_ack_mask = 0;
70 static int burst_duration_CCK = 0;
71 static int burst_duration_OFDM = 0;
73 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
74 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
75 QOS_TX3_CW_MIN_OFDM},
76 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
77 QOS_TX3_CW_MAX_OFDM},
78 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
79 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
80 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
81 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
84 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
85 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
86 QOS_TX3_CW_MIN_CCK},
87 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
88 QOS_TX3_CW_MAX_CCK},
89 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
90 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
91 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
92 QOS_TX3_TXOP_LIMIT_CCK}
95 static struct ieee80211_qos_parameters def_parameters_OFDM = {
96 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
97 DEF_TX3_CW_MIN_OFDM},
98 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
99 DEF_TX3_CW_MAX_OFDM},
100 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
101 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
102 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
103 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
106 static struct ieee80211_qos_parameters def_parameters_CCK = {
107 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
108 DEF_TX3_CW_MIN_CCK},
109 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
110 DEF_TX3_CW_MAX_CCK},
111 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
112 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
113 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
114 DEF_TX3_TXOP_LIMIT_CCK}
117 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
119 static int from_priority_to_tx_queue[] = {
120 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
121 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
124 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
126 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
127 *qos_param);
128 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
129 *qos_param);
130 #endif /* CONFIG_IPW_QOS */
132 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
133 static void ipw_remove_current_network(struct ipw_priv *priv);
134 static void ipw_rx(struct ipw_priv *priv);
135 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
136 struct clx2_tx_queue *txq, int qindex);
137 static int ipw_queue_reset(struct ipw_priv *priv);
139 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
140 int len, int sync);
142 static void ipw_tx_queue_free(struct ipw_priv *);
144 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
145 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
146 static void ipw_rx_queue_replenish(void *);
147 static int ipw_up(struct ipw_priv *);
148 static void ipw_bg_up(void *);
149 static void ipw_down(struct ipw_priv *);
150 static void ipw_bg_down(void *);
151 static int ipw_config(struct ipw_priv *);
152 static int init_supported_rates(struct ipw_priv *priv,
153 struct ipw_supported_rates *prates);
154 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
155 static void ipw_send_wep_keys(struct ipw_priv *, int);
157 static int snprint_line(char *buf, size_t count,
158 const u8 * data, u32 len, u32 ofs)
160 int out, i, j, l;
161 char c;
163 out = snprintf(buf, count, "%08X", ofs);
165 for (l = 0, i = 0; i < 2; i++) {
166 out += snprintf(buf + out, count - out, " ");
167 for (j = 0; j < 8 && l < len; j++, l++)
168 out += snprintf(buf + out, count - out, "%02X ",
169 data[(i * 8 + j)]);
170 for (; j < 8; j++)
171 out += snprintf(buf + out, count - out, " ");
174 out += snprintf(buf + out, count - out, " ");
175 for (l = 0, i = 0; i < 2; i++) {
176 out += snprintf(buf + out, count - out, " ");
177 for (j = 0; j < 8 && l < len; j++, l++) {
178 c = data[(i * 8 + j)];
179 if (!isascii(c) || !isprint(c))
180 c = '.';
182 out += snprintf(buf + out, count - out, "%c", c);
185 for (; j < 8; j++)
186 out += snprintf(buf + out, count - out, " ");
189 return out;
192 static void printk_buf(int level, const u8 * data, u32 len)
194 char line[81];
195 u32 ofs = 0;
196 if (!(ipw_debug_level & level))
197 return;
199 while (len) {
200 snprint_line(line, sizeof(line), &data[ofs],
201 min(len, 16U), ofs);
202 printk(KERN_DEBUG "%s\n", line);
203 ofs += 16;
204 len -= min(len, 16U);
208 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
210 size_t out = size;
211 u32 ofs = 0;
212 int total = 0;
214 while (size && len) {
215 out = snprint_line(output, size, &data[ofs],
216 min_t(size_t, len, 16U), ofs);
218 ofs += 16;
219 output += out;
220 size -= out;
221 len -= min_t(size_t, len, 16U);
222 total += out;
224 return total;
227 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
228 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
229 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
231 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
232 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
233 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
235 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
236 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
237 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
239 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
240 __LINE__, (u32) (b), (u32) (c));
241 _ipw_write_reg8(a, b, c);
244 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
245 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
246 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
248 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
249 __LINE__, (u32) (b), (u32) (c));
250 _ipw_write_reg16(a, b, c);
253 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
254 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
255 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
257 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
258 __LINE__, (u32) (b), (u32) (c));
259 _ipw_write_reg32(a, b, c);
262 /* 8-bit direct write (low 4K) */
263 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
265 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
266 #define ipw_write8(ipw, ofs, val) \
267 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
268 _ipw_write8(ipw, ofs, val)
270 /* 16-bit direct write (low 4K) */
271 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
273 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
274 #define ipw_write16(ipw, ofs, val) \
275 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
276 _ipw_write16(ipw, ofs, val)
278 /* 32-bit direct write (low 4K) */
279 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
281 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
282 #define ipw_write32(ipw, ofs, val) \
283 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
284 _ipw_write32(ipw, ofs, val)
286 /* 8-bit direct read (low 4K) */
287 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
289 /* 8-bit direct read (low 4K), with debug wrapper */
290 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
292 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
293 return _ipw_read8(ipw, ofs);
296 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
297 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
299 /* 16-bit direct read (low 4K) */
300 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
302 /* 16-bit direct read (low 4K), with debug wrapper */
303 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
305 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
306 return _ipw_read16(ipw, ofs);
309 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
310 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
312 /* 32-bit direct read (low 4K) */
313 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
315 /* 32-bit direct read (low 4K), with debug wrapper */
316 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
318 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
319 return _ipw_read32(ipw, ofs);
322 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
323 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
325 /* multi-byte read (above 4K), with debug wrapper */
326 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
327 static inline void __ipw_read_indirect(const char *f, int l,
328 struct ipw_priv *a, u32 b, u8 * c, int d)
330 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
332 _ipw_read_indirect(a, b, c, d);
335 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
336 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
338 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
339 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
340 int num);
341 #define ipw_write_indirect(a, b, c, d) \
342 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
343 _ipw_write_indirect(a, b, c, d)
345 /* 32-bit indirect write (above 4K) */
346 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
348 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
349 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
350 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
353 /* 8-bit indirect write (above 4K) */
354 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
356 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
357 u32 dif_len = reg - aligned_addr;
359 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
360 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
361 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
364 /* 16-bit indirect write (above 4K) */
365 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
367 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
368 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
370 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
371 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
372 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
375 /* 8-bit indirect read (above 4K) */
376 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
378 u32 word;
379 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
380 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
381 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
382 return (word >> ((reg & 0x3) * 8)) & 0xff;
385 /* 32-bit indirect read (above 4K) */
386 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
388 u32 value;
390 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
392 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
393 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
394 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
395 return value;
398 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
399 /* for area above 1st 4K of SRAM/reg space */
400 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
401 int num)
403 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
404 u32 dif_len = addr - aligned_addr;
405 u32 i;
407 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
409 if (num <= 0) {
410 return;
413 /* Read the first dword (or portion) byte by byte */
414 if (unlikely(dif_len)) {
415 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
416 /* Start reading at aligned_addr + dif_len */
417 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
418 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
419 aligned_addr += 4;
422 /* Read all of the middle dwords as dwords, with auto-increment */
423 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
424 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
425 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
427 /* Read the last dword (or portion) byte by byte */
428 if (unlikely(num)) {
429 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
430 for (i = 0; num > 0; i++, num--)
431 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
435 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
436 /* for area above 1st 4K of SRAM/reg space */
437 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
438 int num)
440 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
441 u32 dif_len = addr - aligned_addr;
442 u32 i;
444 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
446 if (num <= 0) {
447 return;
450 /* Write the first dword (or portion) byte by byte */
451 if (unlikely(dif_len)) {
452 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
453 /* Start writing at aligned_addr + dif_len */
454 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
455 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
456 aligned_addr += 4;
459 /* Write all of the middle dwords as dwords, with auto-increment */
460 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
461 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
462 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
464 /* Write the last dword (or portion) byte by byte */
465 if (unlikely(num)) {
466 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
467 for (i = 0; num > 0; i++, num--, buf++)
468 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
472 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
473 /* for 1st 4K of SRAM/regs space */
474 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
475 int num)
477 memcpy_toio((priv->hw_base + addr), buf, num);
480 /* Set bit(s) in low 4K of SRAM/regs */
481 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
483 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
486 /* Clear bit(s) in low 4K of SRAM/regs */
487 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
489 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
492 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
494 if (priv->status & STATUS_INT_ENABLED)
495 return;
496 priv->status |= STATUS_INT_ENABLED;
497 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
500 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
502 if (!(priv->status & STATUS_INT_ENABLED))
503 return;
504 priv->status &= ~STATUS_INT_ENABLED;
505 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
508 #ifdef CONFIG_IPW2200_DEBUG
509 static char *ipw_error_desc(u32 val)
511 switch (val) {
512 case IPW_FW_ERROR_OK:
513 return "ERROR_OK";
514 case IPW_FW_ERROR_FAIL:
515 return "ERROR_FAIL";
516 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
517 return "MEMORY_UNDERFLOW";
518 case IPW_FW_ERROR_MEMORY_OVERFLOW:
519 return "MEMORY_OVERFLOW";
520 case IPW_FW_ERROR_BAD_PARAM:
521 return "BAD_PARAM";
522 case IPW_FW_ERROR_BAD_CHECKSUM:
523 return "BAD_CHECKSUM";
524 case IPW_FW_ERROR_NMI_INTERRUPT:
525 return "NMI_INTERRUPT";
526 case IPW_FW_ERROR_BAD_DATABASE:
527 return "BAD_DATABASE";
528 case IPW_FW_ERROR_ALLOC_FAIL:
529 return "ALLOC_FAIL";
530 case IPW_FW_ERROR_DMA_UNDERRUN:
531 return "DMA_UNDERRUN";
532 case IPW_FW_ERROR_DMA_STATUS:
533 return "DMA_STATUS";
534 case IPW_FW_ERROR_DINO_ERROR:
535 return "DINO_ERROR";
536 case IPW_FW_ERROR_EEPROM_ERROR:
537 return "EEPROM_ERROR";
538 case IPW_FW_ERROR_SYSASSERT:
539 return "SYSASSERT";
540 case IPW_FW_ERROR_FATAL_ERROR:
541 return "FATAL_ERROR";
542 default:
543 return "UNKNOWN_ERROR";
547 static void ipw_dump_error_log(struct ipw_priv *priv,
548 struct ipw_fw_error *error)
550 u32 i;
552 if (!error) {
553 IPW_ERROR("Error allocating and capturing error log. "
554 "Nothing to dump.\n");
555 return;
558 IPW_ERROR("Start IPW Error Log Dump:\n");
559 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
560 error->status, error->config);
562 for (i = 0; i < error->elem_len; i++)
563 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
564 ipw_error_desc(error->elem[i].desc),
565 error->elem[i].time,
566 error->elem[i].blink1,
567 error->elem[i].blink2,
568 error->elem[i].link1,
569 error->elem[i].link2, error->elem[i].data);
570 for (i = 0; i < error->log_len; i++)
571 IPW_ERROR("%i\t0x%08x\t%i\n",
572 error->log[i].time,
573 error->log[i].data, error->log[i].event);
575 #endif
577 static inline int ipw_is_init(struct ipw_priv *priv)
579 return (priv->status & STATUS_INIT) ? 1 : 0;
582 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
584 u32 addr, field_info, field_len, field_count, total_len;
586 IPW_DEBUG_ORD("ordinal = %i\n", ord);
588 if (!priv || !val || !len) {
589 IPW_DEBUG_ORD("Invalid argument\n");
590 return -EINVAL;
593 /* verify device ordinal tables have been initialized */
594 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
595 IPW_DEBUG_ORD("Access ordinals before initialization\n");
596 return -EINVAL;
599 switch (IPW_ORD_TABLE_ID_MASK & ord) {
600 case IPW_ORD_TABLE_0_MASK:
602 * TABLE 0: Direct access to a table of 32 bit values
604 * This is a very simple table with the data directly
605 * read from the table
608 /* remove the table id from the ordinal */
609 ord &= IPW_ORD_TABLE_VALUE_MASK;
611 /* boundary check */
612 if (ord > priv->table0_len) {
613 IPW_DEBUG_ORD("ordinal value (%i) longer then "
614 "max (%i)\n", ord, priv->table0_len);
615 return -EINVAL;
618 /* verify we have enough room to store the value */
619 if (*len < sizeof(u32)) {
620 IPW_DEBUG_ORD("ordinal buffer length too small, "
621 "need %zd\n", sizeof(u32));
622 return -EINVAL;
625 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
626 ord, priv->table0_addr + (ord << 2));
628 *len = sizeof(u32);
629 ord <<= 2;
630 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
631 break;
633 case IPW_ORD_TABLE_1_MASK:
635 * TABLE 1: Indirect access to a table of 32 bit values
637 * This is a fairly large table of u32 values each
638 * representing starting addr for the data (which is
639 * also a u32)
642 /* remove the table id from the ordinal */
643 ord &= IPW_ORD_TABLE_VALUE_MASK;
645 /* boundary check */
646 if (ord > priv->table1_len) {
647 IPW_DEBUG_ORD("ordinal value too long\n");
648 return -EINVAL;
651 /* verify we have enough room to store the value */
652 if (*len < sizeof(u32)) {
653 IPW_DEBUG_ORD("ordinal buffer length too small, "
654 "need %zd\n", sizeof(u32));
655 return -EINVAL;
658 *((u32 *) val) =
659 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
660 *len = sizeof(u32);
661 break;
663 case IPW_ORD_TABLE_2_MASK:
665 * TABLE 2: Indirect access to a table of variable sized values
667 * This table consist of six values, each containing
668 * - dword containing the starting offset of the data
669 * - dword containing the lengh in the first 16bits
670 * and the count in the second 16bits
673 /* remove the table id from the ordinal */
674 ord &= IPW_ORD_TABLE_VALUE_MASK;
676 /* boundary check */
677 if (ord > priv->table2_len) {
678 IPW_DEBUG_ORD("ordinal value too long\n");
679 return -EINVAL;
682 /* get the address of statistic */
683 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
685 /* get the second DW of statistics ;
686 * two 16-bit words - first is length, second is count */
687 field_info =
688 ipw_read_reg32(priv,
689 priv->table2_addr + (ord << 3) +
690 sizeof(u32));
692 /* get each entry length */
693 field_len = *((u16 *) & field_info);
695 /* get number of entries */
696 field_count = *(((u16 *) & field_info) + 1);
698 /* abort if not enought memory */
699 total_len = field_len * field_count;
700 if (total_len > *len) {
701 *len = total_len;
702 return -EINVAL;
705 *len = total_len;
706 if (!total_len)
707 return 0;
709 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
710 "field_info = 0x%08x\n",
711 addr, total_len, field_info);
712 ipw_read_indirect(priv, addr, val, total_len);
713 break;
715 default:
716 IPW_DEBUG_ORD("Invalid ordinal!\n");
717 return -EINVAL;
721 return 0;
724 static void ipw_init_ordinals(struct ipw_priv *priv)
726 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
727 priv->table0_len = ipw_read32(priv, priv->table0_addr);
729 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
730 priv->table0_addr, priv->table0_len);
732 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
733 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
735 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
736 priv->table1_addr, priv->table1_len);
738 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
739 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
740 priv->table2_len &= 0x0000ffff; /* use first two bytes */
742 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
743 priv->table2_addr, priv->table2_len);
747 static u32 ipw_register_toggle(u32 reg)
749 reg &= ~IPW_START_STANDBY;
750 if (reg & IPW_GATE_ODMA)
751 reg &= ~IPW_GATE_ODMA;
752 if (reg & IPW_GATE_IDMA)
753 reg &= ~IPW_GATE_IDMA;
754 if (reg & IPW_GATE_ADMA)
755 reg &= ~IPW_GATE_ADMA;
756 return reg;
760 * LED behavior:
761 * - On radio ON, turn on any LEDs that require to be on during start
762 * - On initialization, start unassociated blink
763 * - On association, disable unassociated blink
764 * - On disassociation, start unassociated blink
765 * - On radio OFF, turn off any LEDs started during radio on
768 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
769 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
770 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
772 static void ipw_led_link_on(struct ipw_priv *priv)
774 unsigned long flags;
775 u32 led;
777 /* If configured to not use LEDs, or nic_type is 1,
778 * then we don't toggle a LINK led */
779 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
780 return;
782 spin_lock_irqsave(&priv->lock, flags);
784 if (!(priv->status & STATUS_RF_KILL_MASK) &&
785 !(priv->status & STATUS_LED_LINK_ON)) {
786 IPW_DEBUG_LED("Link LED On\n");
787 led = ipw_read_reg32(priv, IPW_EVENT_REG);
788 led |= priv->led_association_on;
790 led = ipw_register_toggle(led);
792 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
793 ipw_write_reg32(priv, IPW_EVENT_REG, led);
795 priv->status |= STATUS_LED_LINK_ON;
797 /* If we aren't associated, schedule turning the LED off */
798 if (!(priv->status & STATUS_ASSOCIATED))
799 queue_delayed_work(priv->workqueue,
800 &priv->led_link_off,
801 LD_TIME_LINK_ON);
804 spin_unlock_irqrestore(&priv->lock, flags);
807 static void ipw_bg_led_link_on(void *data)
809 struct ipw_priv *priv = data;
810 mutex_lock(&priv->mutex);
811 ipw_led_link_on(data);
812 mutex_unlock(&priv->mutex);
815 static void ipw_led_link_off(struct ipw_priv *priv)
817 unsigned long flags;
818 u32 led;
820 /* If configured not to use LEDs, or nic type is 1,
821 * then we don't goggle the LINK led. */
822 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
823 return;
825 spin_lock_irqsave(&priv->lock, flags);
827 if (priv->status & STATUS_LED_LINK_ON) {
828 led = ipw_read_reg32(priv, IPW_EVENT_REG);
829 led &= priv->led_association_off;
830 led = ipw_register_toggle(led);
832 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
833 ipw_write_reg32(priv, IPW_EVENT_REG, led);
835 IPW_DEBUG_LED("Link LED Off\n");
837 priv->status &= ~STATUS_LED_LINK_ON;
839 /* If we aren't associated and the radio is on, schedule
840 * turning the LED on (blink while unassociated) */
841 if (!(priv->status & STATUS_RF_KILL_MASK) &&
842 !(priv->status & STATUS_ASSOCIATED))
843 queue_delayed_work(priv->workqueue, &priv->led_link_on,
844 LD_TIME_LINK_OFF);
848 spin_unlock_irqrestore(&priv->lock, flags);
851 static void ipw_bg_led_link_off(void *data)
853 struct ipw_priv *priv = data;
854 mutex_lock(&priv->mutex);
855 ipw_led_link_off(data);
856 mutex_unlock(&priv->mutex);
859 static void __ipw_led_activity_on(struct ipw_priv *priv)
861 u32 led;
863 if (priv->config & CFG_NO_LED)
864 return;
866 if (priv->status & STATUS_RF_KILL_MASK)
867 return;
869 if (!(priv->status & STATUS_LED_ACT_ON)) {
870 led = ipw_read_reg32(priv, IPW_EVENT_REG);
871 led |= priv->led_activity_on;
873 led = ipw_register_toggle(led);
875 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
876 ipw_write_reg32(priv, IPW_EVENT_REG, led);
878 IPW_DEBUG_LED("Activity LED On\n");
880 priv->status |= STATUS_LED_ACT_ON;
882 cancel_delayed_work(&priv->led_act_off);
883 queue_delayed_work(priv->workqueue, &priv->led_act_off,
884 LD_TIME_ACT_ON);
885 } else {
886 /* Reschedule LED off for full time period */
887 cancel_delayed_work(&priv->led_act_off);
888 queue_delayed_work(priv->workqueue, &priv->led_act_off,
889 LD_TIME_ACT_ON);
893 #if 0
894 void ipw_led_activity_on(struct ipw_priv *priv)
896 unsigned long flags;
897 spin_lock_irqsave(&priv->lock, flags);
898 __ipw_led_activity_on(priv);
899 spin_unlock_irqrestore(&priv->lock, flags);
901 #endif /* 0 */
903 static void ipw_led_activity_off(struct ipw_priv *priv)
905 unsigned long flags;
906 u32 led;
908 if (priv->config & CFG_NO_LED)
909 return;
911 spin_lock_irqsave(&priv->lock, flags);
913 if (priv->status & STATUS_LED_ACT_ON) {
914 led = ipw_read_reg32(priv, IPW_EVENT_REG);
915 led &= priv->led_activity_off;
917 led = ipw_register_toggle(led);
919 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
920 ipw_write_reg32(priv, IPW_EVENT_REG, led);
922 IPW_DEBUG_LED("Activity LED Off\n");
924 priv->status &= ~STATUS_LED_ACT_ON;
927 spin_unlock_irqrestore(&priv->lock, flags);
930 static void ipw_bg_led_activity_off(void *data)
932 struct ipw_priv *priv = data;
933 mutex_lock(&priv->mutex);
934 ipw_led_activity_off(data);
935 mutex_unlock(&priv->mutex);
938 static void ipw_led_band_on(struct ipw_priv *priv)
940 unsigned long flags;
941 u32 led;
943 /* Only nic type 1 supports mode LEDs */
944 if (priv->config & CFG_NO_LED ||
945 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
946 return;
948 spin_lock_irqsave(&priv->lock, flags);
950 led = ipw_read_reg32(priv, IPW_EVENT_REG);
951 if (priv->assoc_network->mode == IEEE_A) {
952 led |= priv->led_ofdm_on;
953 led &= priv->led_association_off;
954 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
955 } else if (priv->assoc_network->mode == IEEE_G) {
956 led |= priv->led_ofdm_on;
957 led |= priv->led_association_on;
958 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
959 } else {
960 led &= priv->led_ofdm_off;
961 led |= priv->led_association_on;
962 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
965 led = ipw_register_toggle(led);
967 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
968 ipw_write_reg32(priv, IPW_EVENT_REG, led);
970 spin_unlock_irqrestore(&priv->lock, flags);
973 static void ipw_led_band_off(struct ipw_priv *priv)
975 unsigned long flags;
976 u32 led;
978 /* Only nic type 1 supports mode LEDs */
979 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
980 return;
982 spin_lock_irqsave(&priv->lock, flags);
984 led = ipw_read_reg32(priv, IPW_EVENT_REG);
985 led &= priv->led_ofdm_off;
986 led &= priv->led_association_off;
988 led = ipw_register_toggle(led);
990 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
991 ipw_write_reg32(priv, IPW_EVENT_REG, led);
993 spin_unlock_irqrestore(&priv->lock, flags);
996 static void ipw_led_radio_on(struct ipw_priv *priv)
998 ipw_led_link_on(priv);
1001 static void ipw_led_radio_off(struct ipw_priv *priv)
1003 ipw_led_activity_off(priv);
1004 ipw_led_link_off(priv);
1007 static void ipw_led_link_up(struct ipw_priv *priv)
1009 /* Set the Link Led on for all nic types */
1010 ipw_led_link_on(priv);
1013 static void ipw_led_link_down(struct ipw_priv *priv)
1015 ipw_led_activity_off(priv);
1016 ipw_led_link_off(priv);
1018 if (priv->status & STATUS_RF_KILL_MASK)
1019 ipw_led_radio_off(priv);
1022 static void ipw_led_init(struct ipw_priv *priv)
1024 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1026 /* Set the default PINs for the link and activity leds */
1027 priv->led_activity_on = IPW_ACTIVITY_LED;
1028 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1030 priv->led_association_on = IPW_ASSOCIATED_LED;
1031 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1033 /* Set the default PINs for the OFDM leds */
1034 priv->led_ofdm_on = IPW_OFDM_LED;
1035 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1037 switch (priv->nic_type) {
1038 case EEPROM_NIC_TYPE_1:
1039 /* In this NIC type, the LEDs are reversed.... */
1040 priv->led_activity_on = IPW_ASSOCIATED_LED;
1041 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1042 priv->led_association_on = IPW_ACTIVITY_LED;
1043 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1045 if (!(priv->config & CFG_NO_LED))
1046 ipw_led_band_on(priv);
1048 /* And we don't blink link LEDs for this nic, so
1049 * just return here */
1050 return;
1052 case EEPROM_NIC_TYPE_3:
1053 case EEPROM_NIC_TYPE_2:
1054 case EEPROM_NIC_TYPE_4:
1055 case EEPROM_NIC_TYPE_0:
1056 break;
1058 default:
1059 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1060 priv->nic_type);
1061 priv->nic_type = EEPROM_NIC_TYPE_0;
1062 break;
1065 if (!(priv->config & CFG_NO_LED)) {
1066 if (priv->status & STATUS_ASSOCIATED)
1067 ipw_led_link_on(priv);
1068 else
1069 ipw_led_link_off(priv);
1073 static void ipw_led_shutdown(struct ipw_priv *priv)
1075 ipw_led_activity_off(priv);
1076 ipw_led_link_off(priv);
1077 ipw_led_band_off(priv);
1078 cancel_delayed_work(&priv->led_link_on);
1079 cancel_delayed_work(&priv->led_link_off);
1080 cancel_delayed_work(&priv->led_act_off);
1084 * The following adds a new attribute to the sysfs representation
1085 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1086 * used for controling the debug level.
1088 * See the level definitions in ipw for details.
1090 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1092 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1095 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1096 size_t count)
1098 char *p = (char *)buf;
1099 u32 val;
1101 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1102 p++;
1103 if (p[0] == 'x' || p[0] == 'X')
1104 p++;
1105 val = simple_strtoul(p, &p, 16);
1106 } else
1107 val = simple_strtoul(p, &p, 10);
1108 if (p == buf)
1109 printk(KERN_INFO DRV_NAME
1110 ": %s is not in hex or decimal form.\n", buf);
1111 else
1112 ipw_debug_level = val;
1114 return strnlen(buf, count);
1117 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1118 show_debug_level, store_debug_level);
1120 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1122 /* length = 1st dword in log */
1123 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1126 static void ipw_capture_event_log(struct ipw_priv *priv,
1127 u32 log_len, struct ipw_event *log)
1129 u32 base;
1131 if (log_len) {
1132 base = ipw_read32(priv, IPW_EVENT_LOG);
1133 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1134 (u8 *) log, sizeof(*log) * log_len);
1138 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1140 struct ipw_fw_error *error;
1141 u32 log_len = ipw_get_event_log_len(priv);
1142 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1143 u32 elem_len = ipw_read_reg32(priv, base);
1145 error = kmalloc(sizeof(*error) +
1146 sizeof(*error->elem) * elem_len +
1147 sizeof(*error->log) * log_len, GFP_ATOMIC);
1148 if (!error) {
1149 IPW_ERROR("Memory allocation for firmware error log "
1150 "failed.\n");
1151 return NULL;
1153 error->jiffies = jiffies;
1154 error->status = priv->status;
1155 error->config = priv->config;
1156 error->elem_len = elem_len;
1157 error->log_len = log_len;
1158 error->elem = (struct ipw_error_elem *)error->payload;
1159 error->log = (struct ipw_event *)(error->elem + elem_len);
1161 ipw_capture_event_log(priv, log_len, error->log);
1163 if (elem_len)
1164 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1165 sizeof(*error->elem) * elem_len);
1167 return error;
1170 static void ipw_free_error_log(struct ipw_fw_error *error)
1172 if (error)
1173 kfree(error);
1176 static ssize_t show_event_log(struct device *d,
1177 struct device_attribute *attr, char *buf)
1179 struct ipw_priv *priv = dev_get_drvdata(d);
1180 u32 log_len = ipw_get_event_log_len(priv);
1181 struct ipw_event log[log_len];
1182 u32 len = 0, i;
1184 ipw_capture_event_log(priv, log_len, log);
1186 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1187 for (i = 0; i < log_len; i++)
1188 len += snprintf(buf + len, PAGE_SIZE - len,
1189 "\n%08X%08X%08X",
1190 log[i].time, log[i].event, log[i].data);
1191 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1192 return len;
1195 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1197 static ssize_t show_error(struct device *d,
1198 struct device_attribute *attr, char *buf)
1200 struct ipw_priv *priv = dev_get_drvdata(d);
1201 u32 len = 0, i;
1202 if (!priv->error)
1203 return 0;
1204 len += snprintf(buf + len, PAGE_SIZE - len,
1205 "%08lX%08X%08X%08X",
1206 priv->error->jiffies,
1207 priv->error->status,
1208 priv->error->config, priv->error->elem_len);
1209 for (i = 0; i < priv->error->elem_len; i++)
1210 len += snprintf(buf + len, PAGE_SIZE - len,
1211 "\n%08X%08X%08X%08X%08X%08X%08X",
1212 priv->error->elem[i].time,
1213 priv->error->elem[i].desc,
1214 priv->error->elem[i].blink1,
1215 priv->error->elem[i].blink2,
1216 priv->error->elem[i].link1,
1217 priv->error->elem[i].link2,
1218 priv->error->elem[i].data);
1220 len += snprintf(buf + len, PAGE_SIZE - len,
1221 "\n%08X", priv->error->log_len);
1222 for (i = 0; i < priv->error->log_len; i++)
1223 len += snprintf(buf + len, PAGE_SIZE - len,
1224 "\n%08X%08X%08X",
1225 priv->error->log[i].time,
1226 priv->error->log[i].event,
1227 priv->error->log[i].data);
1228 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1229 return len;
1232 static ssize_t clear_error(struct device *d,
1233 struct device_attribute *attr,
1234 const char *buf, size_t count)
1236 struct ipw_priv *priv = dev_get_drvdata(d);
1237 if (priv->error) {
1238 ipw_free_error_log(priv->error);
1239 priv->error = NULL;
1241 return count;
1244 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1246 static ssize_t show_cmd_log(struct device *d,
1247 struct device_attribute *attr, char *buf)
1249 struct ipw_priv *priv = dev_get_drvdata(d);
1250 u32 len = 0, i;
1251 if (!priv->cmdlog)
1252 return 0;
1253 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1254 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1255 i = (i + 1) % priv->cmdlog_len) {
1256 len +=
1257 snprintf(buf + len, PAGE_SIZE - len,
1258 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1259 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1260 priv->cmdlog[i].cmd.len);
1261 len +=
1262 snprintk_buf(buf + len, PAGE_SIZE - len,
1263 (u8 *) priv->cmdlog[i].cmd.param,
1264 priv->cmdlog[i].cmd.len);
1265 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1267 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1268 return len;
1271 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1273 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1274 char *buf)
1276 struct ipw_priv *priv = dev_get_drvdata(d);
1277 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1280 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1281 const char *buf, size_t count)
1283 struct ipw_priv *priv = dev_get_drvdata(d);
1284 #ifdef CONFIG_IPW2200_DEBUG
1285 struct net_device *dev = priv->net_dev;
1286 #endif
1287 char buffer[] = "00000000";
1288 unsigned long len =
1289 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1290 unsigned long val;
1291 char *p = buffer;
1293 IPW_DEBUG_INFO("enter\n");
1295 strncpy(buffer, buf, len);
1296 buffer[len] = 0;
1298 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1299 p++;
1300 if (p[0] == 'x' || p[0] == 'X')
1301 p++;
1302 val = simple_strtoul(p, &p, 16);
1303 } else
1304 val = simple_strtoul(p, &p, 10);
1305 if (p == buffer) {
1306 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1307 } else {
1308 priv->ieee->scan_age = val;
1309 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1312 IPW_DEBUG_INFO("exit\n");
1313 return len;
1316 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1318 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1319 char *buf)
1321 struct ipw_priv *priv = dev_get_drvdata(d);
1322 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1325 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1326 const char *buf, size_t count)
1328 struct ipw_priv *priv = dev_get_drvdata(d);
1330 IPW_DEBUG_INFO("enter\n");
1332 if (count == 0)
1333 return 0;
1335 if (*buf == 0) {
1336 IPW_DEBUG_LED("Disabling LED control.\n");
1337 priv->config |= CFG_NO_LED;
1338 ipw_led_shutdown(priv);
1339 } else {
1340 IPW_DEBUG_LED("Enabling LED control.\n");
1341 priv->config &= ~CFG_NO_LED;
1342 ipw_led_init(priv);
1345 IPW_DEBUG_INFO("exit\n");
1346 return count;
1349 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1351 static ssize_t show_status(struct device *d,
1352 struct device_attribute *attr, char *buf)
1354 struct ipw_priv *p = d->driver_data;
1355 return sprintf(buf, "0x%08x\n", (int)p->status);
1358 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1360 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1361 char *buf)
1363 struct ipw_priv *p = d->driver_data;
1364 return sprintf(buf, "0x%08x\n", (int)p->config);
1367 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1369 static ssize_t show_nic_type(struct device *d,
1370 struct device_attribute *attr, char *buf)
1372 struct ipw_priv *priv = d->driver_data;
1373 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1376 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1378 static ssize_t show_ucode_version(struct device *d,
1379 struct device_attribute *attr, char *buf)
1381 u32 len = sizeof(u32), tmp = 0;
1382 struct ipw_priv *p = d->driver_data;
1384 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1385 return 0;
1387 return sprintf(buf, "0x%08x\n", tmp);
1390 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1392 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1393 char *buf)
1395 u32 len = sizeof(u32), tmp = 0;
1396 struct ipw_priv *p = d->driver_data;
1398 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1399 return 0;
1401 return sprintf(buf, "0x%08x\n", tmp);
1404 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1407 * Add a device attribute to view/control the delay between eeprom
1408 * operations.
1410 static ssize_t show_eeprom_delay(struct device *d,
1411 struct device_attribute *attr, char *buf)
1413 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1414 return sprintf(buf, "%i\n", n);
1416 static ssize_t store_eeprom_delay(struct device *d,
1417 struct device_attribute *attr,
1418 const char *buf, size_t count)
1420 struct ipw_priv *p = d->driver_data;
1421 sscanf(buf, "%i", &p->eeprom_delay);
1422 return strnlen(buf, count);
1425 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1426 show_eeprom_delay, store_eeprom_delay);
1428 static ssize_t show_command_event_reg(struct device *d,
1429 struct device_attribute *attr, char *buf)
1431 u32 reg = 0;
1432 struct ipw_priv *p = d->driver_data;
1434 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1435 return sprintf(buf, "0x%08x\n", reg);
1437 static ssize_t store_command_event_reg(struct device *d,
1438 struct device_attribute *attr,
1439 const char *buf, size_t count)
1441 u32 reg;
1442 struct ipw_priv *p = d->driver_data;
1444 sscanf(buf, "%x", &reg);
1445 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1446 return strnlen(buf, count);
1449 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1450 show_command_event_reg, store_command_event_reg);
1452 static ssize_t show_mem_gpio_reg(struct device *d,
1453 struct device_attribute *attr, char *buf)
1455 u32 reg = 0;
1456 struct ipw_priv *p = d->driver_data;
1458 reg = ipw_read_reg32(p, 0x301100);
1459 return sprintf(buf, "0x%08x\n", reg);
1461 static ssize_t store_mem_gpio_reg(struct device *d,
1462 struct device_attribute *attr,
1463 const char *buf, size_t count)
1465 u32 reg;
1466 struct ipw_priv *p = d->driver_data;
1468 sscanf(buf, "%x", &reg);
1469 ipw_write_reg32(p, 0x301100, reg);
1470 return strnlen(buf, count);
1473 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1474 show_mem_gpio_reg, store_mem_gpio_reg);
1476 static ssize_t show_indirect_dword(struct device *d,
1477 struct device_attribute *attr, char *buf)
1479 u32 reg = 0;
1480 struct ipw_priv *priv = d->driver_data;
1482 if (priv->status & STATUS_INDIRECT_DWORD)
1483 reg = ipw_read_reg32(priv, priv->indirect_dword);
1484 else
1485 reg = 0;
1487 return sprintf(buf, "0x%08x\n", reg);
1489 static ssize_t store_indirect_dword(struct device *d,
1490 struct device_attribute *attr,
1491 const char *buf, size_t count)
1493 struct ipw_priv *priv = d->driver_data;
1495 sscanf(buf, "%x", &priv->indirect_dword);
1496 priv->status |= STATUS_INDIRECT_DWORD;
1497 return strnlen(buf, count);
1500 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1501 show_indirect_dword, store_indirect_dword);
1503 static ssize_t show_indirect_byte(struct device *d,
1504 struct device_attribute *attr, char *buf)
1506 u8 reg = 0;
1507 struct ipw_priv *priv = d->driver_data;
1509 if (priv->status & STATUS_INDIRECT_BYTE)
1510 reg = ipw_read_reg8(priv, priv->indirect_byte);
1511 else
1512 reg = 0;
1514 return sprintf(buf, "0x%02x\n", reg);
1516 static ssize_t store_indirect_byte(struct device *d,
1517 struct device_attribute *attr,
1518 const char *buf, size_t count)
1520 struct ipw_priv *priv = d->driver_data;
1522 sscanf(buf, "%x", &priv->indirect_byte);
1523 priv->status |= STATUS_INDIRECT_BYTE;
1524 return strnlen(buf, count);
1527 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1528 show_indirect_byte, store_indirect_byte);
1530 static ssize_t show_direct_dword(struct device *d,
1531 struct device_attribute *attr, char *buf)
1533 u32 reg = 0;
1534 struct ipw_priv *priv = d->driver_data;
1536 if (priv->status & STATUS_DIRECT_DWORD)
1537 reg = ipw_read32(priv, priv->direct_dword);
1538 else
1539 reg = 0;
1541 return sprintf(buf, "0x%08x\n", reg);
1543 static ssize_t store_direct_dword(struct device *d,
1544 struct device_attribute *attr,
1545 const char *buf, size_t count)
1547 struct ipw_priv *priv = d->driver_data;
1549 sscanf(buf, "%x", &priv->direct_dword);
1550 priv->status |= STATUS_DIRECT_DWORD;
1551 return strnlen(buf, count);
1554 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1555 show_direct_dword, store_direct_dword);
1557 static int rf_kill_active(struct ipw_priv *priv)
1559 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1560 priv->status |= STATUS_RF_KILL_HW;
1561 else
1562 priv->status &= ~STATUS_RF_KILL_HW;
1564 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1567 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1568 char *buf)
1570 /* 0 - RF kill not enabled
1571 1 - SW based RF kill active (sysfs)
1572 2 - HW based RF kill active
1573 3 - Both HW and SW baed RF kill active */
1574 struct ipw_priv *priv = d->driver_data;
1575 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1576 (rf_kill_active(priv) ? 0x2 : 0x0);
1577 return sprintf(buf, "%i\n", val);
1580 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1582 if ((disable_radio ? 1 : 0) ==
1583 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1584 return 0;
1586 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1587 disable_radio ? "OFF" : "ON");
1589 if (disable_radio) {
1590 priv->status |= STATUS_RF_KILL_SW;
1592 if (priv->workqueue)
1593 cancel_delayed_work(&priv->request_scan);
1594 queue_work(priv->workqueue, &priv->down);
1595 } else {
1596 priv->status &= ~STATUS_RF_KILL_SW;
1597 if (rf_kill_active(priv)) {
1598 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1599 "disabled by HW switch\n");
1600 /* Make sure the RF_KILL check timer is running */
1601 cancel_delayed_work(&priv->rf_kill);
1602 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1603 2 * HZ);
1604 } else
1605 queue_work(priv->workqueue, &priv->up);
1608 return 1;
1611 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1612 const char *buf, size_t count)
1614 struct ipw_priv *priv = d->driver_data;
1616 ipw_radio_kill_sw(priv, buf[0] == '1');
1618 return count;
1621 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1623 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1624 char *buf)
1626 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1627 int pos = 0, len = 0;
1628 if (priv->config & CFG_SPEED_SCAN) {
1629 while (priv->speed_scan[pos] != 0)
1630 len += sprintf(&buf[len], "%d ",
1631 priv->speed_scan[pos++]);
1632 return len + sprintf(&buf[len], "\n");
1635 return sprintf(buf, "0\n");
1638 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1639 const char *buf, size_t count)
1641 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1642 int channel, pos = 0;
1643 const char *p = buf;
1645 /* list of space separated channels to scan, optionally ending with 0 */
1646 while ((channel = simple_strtol(p, NULL, 0))) {
1647 if (pos == MAX_SPEED_SCAN - 1) {
1648 priv->speed_scan[pos] = 0;
1649 break;
1652 if (ieee80211_is_valid_channel(priv->ieee, channel))
1653 priv->speed_scan[pos++] = channel;
1654 else
1655 IPW_WARNING("Skipping invalid channel request: %d\n",
1656 channel);
1657 p = strchr(p, ' ');
1658 if (!p)
1659 break;
1660 while (*p == ' ' || *p == '\t')
1661 p++;
1664 if (pos == 0)
1665 priv->config &= ~CFG_SPEED_SCAN;
1666 else {
1667 priv->speed_scan_pos = 0;
1668 priv->config |= CFG_SPEED_SCAN;
1671 return count;
1674 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1675 store_speed_scan);
1677 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1678 char *buf)
1680 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1681 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1684 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1685 const char *buf, size_t count)
1687 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1688 if (buf[0] == '1')
1689 priv->config |= CFG_NET_STATS;
1690 else
1691 priv->config &= ~CFG_NET_STATS;
1693 return count;
1696 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1697 show_net_stats, store_net_stats);
1699 static void notify_wx_assoc_event(struct ipw_priv *priv)
1701 union iwreq_data wrqu;
1702 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1703 if (priv->status & STATUS_ASSOCIATED)
1704 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1705 else
1706 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1707 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1710 static void ipw_irq_tasklet(struct ipw_priv *priv)
1712 u32 inta, inta_mask, handled = 0;
1713 unsigned long flags;
1714 int rc = 0;
1716 spin_lock_irqsave(&priv->lock, flags);
1718 inta = ipw_read32(priv, IPW_INTA_RW);
1719 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1720 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1722 /* Add any cached INTA values that need to be handled */
1723 inta |= priv->isr_inta;
1725 /* handle all the justifications for the interrupt */
1726 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1727 ipw_rx(priv);
1728 handled |= IPW_INTA_BIT_RX_TRANSFER;
1731 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1732 IPW_DEBUG_HC("Command completed.\n");
1733 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1734 priv->status &= ~STATUS_HCMD_ACTIVE;
1735 wake_up_interruptible(&priv->wait_command_queue);
1736 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1739 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1740 IPW_DEBUG_TX("TX_QUEUE_1\n");
1741 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1742 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1745 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1746 IPW_DEBUG_TX("TX_QUEUE_2\n");
1747 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1748 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1751 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1752 IPW_DEBUG_TX("TX_QUEUE_3\n");
1753 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1754 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1757 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1758 IPW_DEBUG_TX("TX_QUEUE_4\n");
1759 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1760 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1763 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1764 IPW_WARNING("STATUS_CHANGE\n");
1765 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1768 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1769 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1770 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1773 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1774 IPW_WARNING("HOST_CMD_DONE\n");
1775 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1778 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1779 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1780 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1783 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1784 IPW_WARNING("PHY_OFF_DONE\n");
1785 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1788 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1789 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1790 priv->status |= STATUS_RF_KILL_HW;
1791 wake_up_interruptible(&priv->wait_command_queue);
1792 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1793 cancel_delayed_work(&priv->request_scan);
1794 schedule_work(&priv->link_down);
1795 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1796 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1799 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1800 IPW_WARNING("Firmware error detected. Restarting.\n");
1801 if (priv->error) {
1802 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
1803 #ifdef CONFIG_IPW2200_DEBUG
1804 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1805 struct ipw_fw_error *error =
1806 ipw_alloc_error_log(priv);
1807 ipw_dump_error_log(priv, error);
1808 if (error)
1809 ipw_free_error_log(error);
1811 #endif
1812 } else {
1813 priv->error = ipw_alloc_error_log(priv);
1814 if (priv->error)
1815 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
1816 else
1817 IPW_DEBUG_FW("Error allocating sysfs 'error' "
1818 "log.\n");
1819 #ifdef CONFIG_IPW2200_DEBUG
1820 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1821 ipw_dump_error_log(priv, priv->error);
1822 #endif
1825 /* XXX: If hardware encryption is for WPA/WPA2,
1826 * we have to notify the supplicant. */
1827 if (priv->ieee->sec.encrypt) {
1828 priv->status &= ~STATUS_ASSOCIATED;
1829 notify_wx_assoc_event(priv);
1832 /* Keep the restart process from trying to send host
1833 * commands by clearing the INIT status bit */
1834 priv->status &= ~STATUS_INIT;
1836 /* Cancel currently queued command. */
1837 priv->status &= ~STATUS_HCMD_ACTIVE;
1838 wake_up_interruptible(&priv->wait_command_queue);
1840 queue_work(priv->workqueue, &priv->adapter_restart);
1841 handled |= IPW_INTA_BIT_FATAL_ERROR;
1844 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
1845 IPW_ERROR("Parity error\n");
1846 handled |= IPW_INTA_BIT_PARITY_ERROR;
1849 if (handled != inta) {
1850 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1853 /* enable all interrupts */
1854 ipw_enable_interrupts(priv);
1856 spin_unlock_irqrestore(&priv->lock, flags);
1859 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
1860 static char *get_cmd_string(u8 cmd)
1862 switch (cmd) {
1863 IPW_CMD(HOST_COMPLETE);
1864 IPW_CMD(POWER_DOWN);
1865 IPW_CMD(SYSTEM_CONFIG);
1866 IPW_CMD(MULTICAST_ADDRESS);
1867 IPW_CMD(SSID);
1868 IPW_CMD(ADAPTER_ADDRESS);
1869 IPW_CMD(PORT_TYPE);
1870 IPW_CMD(RTS_THRESHOLD);
1871 IPW_CMD(FRAG_THRESHOLD);
1872 IPW_CMD(POWER_MODE);
1873 IPW_CMD(WEP_KEY);
1874 IPW_CMD(TGI_TX_KEY);
1875 IPW_CMD(SCAN_REQUEST);
1876 IPW_CMD(SCAN_REQUEST_EXT);
1877 IPW_CMD(ASSOCIATE);
1878 IPW_CMD(SUPPORTED_RATES);
1879 IPW_CMD(SCAN_ABORT);
1880 IPW_CMD(TX_FLUSH);
1881 IPW_CMD(QOS_PARAMETERS);
1882 IPW_CMD(DINO_CONFIG);
1883 IPW_CMD(RSN_CAPABILITIES);
1884 IPW_CMD(RX_KEY);
1885 IPW_CMD(CARD_DISABLE);
1886 IPW_CMD(SEED_NUMBER);
1887 IPW_CMD(TX_POWER);
1888 IPW_CMD(COUNTRY_INFO);
1889 IPW_CMD(AIRONET_INFO);
1890 IPW_CMD(AP_TX_POWER);
1891 IPW_CMD(CCKM_INFO);
1892 IPW_CMD(CCX_VER_INFO);
1893 IPW_CMD(SET_CALIBRATION);
1894 IPW_CMD(SENSITIVITY_CALIB);
1895 IPW_CMD(RETRY_LIMIT);
1896 IPW_CMD(IPW_PRE_POWER_DOWN);
1897 IPW_CMD(VAP_BEACON_TEMPLATE);
1898 IPW_CMD(VAP_DTIM_PERIOD);
1899 IPW_CMD(EXT_SUPPORTED_RATES);
1900 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
1901 IPW_CMD(VAP_QUIET_INTERVALS);
1902 IPW_CMD(VAP_CHANNEL_SWITCH);
1903 IPW_CMD(VAP_MANDATORY_CHANNELS);
1904 IPW_CMD(VAP_CELL_PWR_LIMIT);
1905 IPW_CMD(VAP_CF_PARAM_SET);
1906 IPW_CMD(VAP_SET_BEACONING_STATE);
1907 IPW_CMD(MEASUREMENT);
1908 IPW_CMD(POWER_CAPABILITY);
1909 IPW_CMD(SUPPORTED_CHANNELS);
1910 IPW_CMD(TPC_REPORT);
1911 IPW_CMD(WME_INFO);
1912 IPW_CMD(PRODUCTION_COMMAND);
1913 default:
1914 return "UNKNOWN";
1918 #define HOST_COMPLETE_TIMEOUT HZ
1920 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1922 int rc = 0;
1923 unsigned long flags;
1925 spin_lock_irqsave(&priv->lock, flags);
1926 if (priv->status & STATUS_HCMD_ACTIVE) {
1927 IPW_ERROR("Failed to send %s: Already sending a command.\n",
1928 get_cmd_string(cmd->cmd));
1929 spin_unlock_irqrestore(&priv->lock, flags);
1930 return -EAGAIN;
1933 priv->status |= STATUS_HCMD_ACTIVE;
1935 if (priv->cmdlog) {
1936 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
1937 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
1938 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
1939 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
1940 cmd->len);
1941 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
1944 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
1945 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
1946 priv->status);
1948 #ifndef DEBUG_CMD_WEP_KEY
1949 if (cmd->cmd == IPW_CMD_WEP_KEY)
1950 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
1951 else
1952 #endif
1953 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
1955 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
1956 if (rc) {
1957 priv->status &= ~STATUS_HCMD_ACTIVE;
1958 IPW_ERROR("Failed to send %s: Reason %d\n",
1959 get_cmd_string(cmd->cmd), rc);
1960 spin_unlock_irqrestore(&priv->lock, flags);
1961 goto exit;
1963 spin_unlock_irqrestore(&priv->lock, flags);
1965 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
1966 !(priv->
1967 status & STATUS_HCMD_ACTIVE),
1968 HOST_COMPLETE_TIMEOUT);
1969 if (rc == 0) {
1970 spin_lock_irqsave(&priv->lock, flags);
1971 if (priv->status & STATUS_HCMD_ACTIVE) {
1972 IPW_ERROR("Failed to send %s: Command timed out.\n",
1973 get_cmd_string(cmd->cmd));
1974 priv->status &= ~STATUS_HCMD_ACTIVE;
1975 spin_unlock_irqrestore(&priv->lock, flags);
1976 rc = -EIO;
1977 goto exit;
1979 spin_unlock_irqrestore(&priv->lock, flags);
1980 } else
1981 rc = 0;
1983 if (priv->status & STATUS_RF_KILL_HW) {
1984 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
1985 get_cmd_string(cmd->cmd));
1986 rc = -EIO;
1987 goto exit;
1990 exit:
1991 if (priv->cmdlog) {
1992 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
1993 priv->cmdlog_pos %= priv->cmdlog_len;
1995 return rc;
1998 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2000 struct host_cmd cmd = {
2001 .cmd = command,
2004 return __ipw_send_cmd(priv, &cmd);
2007 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2008 void *data)
2010 struct host_cmd cmd = {
2011 .cmd = command,
2012 .len = len,
2013 .param = data,
2016 return __ipw_send_cmd(priv, &cmd);
2019 static int ipw_send_host_complete(struct ipw_priv *priv)
2021 if (!priv) {
2022 IPW_ERROR("Invalid args\n");
2023 return -1;
2026 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2029 static int ipw_send_system_config(struct ipw_priv *priv,
2030 struct ipw_sys_config *config)
2032 if (!priv || !config) {
2033 IPW_ERROR("Invalid args\n");
2034 return -1;
2037 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, sizeof(*config),
2038 config);
2041 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2043 if (!priv || !ssid) {
2044 IPW_ERROR("Invalid args\n");
2045 return -1;
2048 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2049 ssid);
2052 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2054 if (!priv || !mac) {
2055 IPW_ERROR("Invalid args\n");
2056 return -1;
2059 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2060 priv->net_dev->name, MAC_ARG(mac));
2062 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2066 * NOTE: This must be executed from our workqueue as it results in udelay
2067 * being called which may corrupt the keyboard if executed on default
2068 * workqueue
2070 static void ipw_adapter_restart(void *adapter)
2072 struct ipw_priv *priv = adapter;
2074 if (priv->status & STATUS_RF_KILL_MASK)
2075 return;
2077 ipw_down(priv);
2079 if (priv->assoc_network &&
2080 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2081 ipw_remove_current_network(priv);
2083 if (ipw_up(priv)) {
2084 IPW_ERROR("Failed to up device\n");
2085 return;
2089 static void ipw_bg_adapter_restart(void *data)
2091 struct ipw_priv *priv = data;
2092 mutex_lock(&priv->mutex);
2093 ipw_adapter_restart(data);
2094 mutex_unlock(&priv->mutex);
2097 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2099 static void ipw_scan_check(void *data)
2101 struct ipw_priv *priv = data;
2102 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2103 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2104 "adapter after (%dms).\n",
2105 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2106 queue_work(priv->workqueue, &priv->adapter_restart);
2110 static void ipw_bg_scan_check(void *data)
2112 struct ipw_priv *priv = data;
2113 mutex_lock(&priv->mutex);
2114 ipw_scan_check(data);
2115 mutex_unlock(&priv->mutex);
2118 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2119 struct ipw_scan_request_ext *request)
2121 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2122 sizeof(*request), request);
2125 static int ipw_send_scan_abort(struct ipw_priv *priv)
2127 if (!priv) {
2128 IPW_ERROR("Invalid args\n");
2129 return -1;
2132 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2135 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2137 struct ipw_sensitivity_calib calib = {
2138 .beacon_rssi_raw = sens,
2141 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2142 &calib);
2145 static int ipw_send_associate(struct ipw_priv *priv,
2146 struct ipw_associate *associate)
2148 struct ipw_associate tmp_associate;
2150 if (!priv || !associate) {
2151 IPW_ERROR("Invalid args\n");
2152 return -1;
2155 memcpy(&tmp_associate, associate, sizeof(*associate));
2156 tmp_associate.policy_support =
2157 cpu_to_le16(tmp_associate.policy_support);
2158 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2159 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2160 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2161 tmp_associate.listen_interval =
2162 cpu_to_le16(tmp_associate.listen_interval);
2163 tmp_associate.beacon_interval =
2164 cpu_to_le16(tmp_associate.beacon_interval);
2165 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2167 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2168 &tmp_associate);
2171 static int ipw_send_supported_rates(struct ipw_priv *priv,
2172 struct ipw_supported_rates *rates)
2174 if (!priv || !rates) {
2175 IPW_ERROR("Invalid args\n");
2176 return -1;
2179 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2180 rates);
2183 static int ipw_set_random_seed(struct ipw_priv *priv)
2185 u32 val;
2187 if (!priv) {
2188 IPW_ERROR("Invalid args\n");
2189 return -1;
2192 get_random_bytes(&val, sizeof(val));
2194 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2197 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2199 if (!priv) {
2200 IPW_ERROR("Invalid args\n");
2201 return -1;
2204 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2205 &phy_off);
2208 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2210 if (!priv || !power) {
2211 IPW_ERROR("Invalid args\n");
2212 return -1;
2215 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2218 static int ipw_set_tx_power(struct ipw_priv *priv)
2220 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2221 struct ipw_tx_power tx_power;
2222 s8 max_power;
2223 int i;
2225 memset(&tx_power, 0, sizeof(tx_power));
2227 /* configure device for 'G' band */
2228 tx_power.ieee_mode = IPW_G_MODE;
2229 tx_power.num_channels = geo->bg_channels;
2230 for (i = 0; i < geo->bg_channels; i++) {
2231 max_power = geo->bg[i].max_power;
2232 tx_power.channels_tx_power[i].channel_number =
2233 geo->bg[i].channel;
2234 tx_power.channels_tx_power[i].tx_power = max_power ?
2235 min(max_power, priv->tx_power) : priv->tx_power;
2237 if (ipw_send_tx_power(priv, &tx_power))
2238 return -EIO;
2240 /* configure device to also handle 'B' band */
2241 tx_power.ieee_mode = IPW_B_MODE;
2242 if (ipw_send_tx_power(priv, &tx_power))
2243 return -EIO;
2245 /* configure device to also handle 'A' band */
2246 if (priv->ieee->abg_true) {
2247 tx_power.ieee_mode = IPW_A_MODE;
2248 tx_power.num_channels = geo->a_channels;
2249 for (i = 0; i < tx_power.num_channels; i++) {
2250 max_power = geo->a[i].max_power;
2251 tx_power.channels_tx_power[i].channel_number =
2252 geo->a[i].channel;
2253 tx_power.channels_tx_power[i].tx_power = max_power ?
2254 min(max_power, priv->tx_power) : priv->tx_power;
2256 if (ipw_send_tx_power(priv, &tx_power))
2257 return -EIO;
2259 return 0;
2262 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2264 struct ipw_rts_threshold rts_threshold = {
2265 .rts_threshold = rts,
2268 if (!priv) {
2269 IPW_ERROR("Invalid args\n");
2270 return -1;
2273 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2274 sizeof(rts_threshold), &rts_threshold);
2277 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2279 struct ipw_frag_threshold frag_threshold = {
2280 .frag_threshold = frag,
2283 if (!priv) {
2284 IPW_ERROR("Invalid args\n");
2285 return -1;
2288 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2289 sizeof(frag_threshold), &frag_threshold);
2292 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2294 u32 param;
2296 if (!priv) {
2297 IPW_ERROR("Invalid args\n");
2298 return -1;
2301 /* If on battery, set to 3, if AC set to CAM, else user
2302 * level */
2303 switch (mode) {
2304 case IPW_POWER_BATTERY:
2305 param = IPW_POWER_INDEX_3;
2306 break;
2307 case IPW_POWER_AC:
2308 param = IPW_POWER_MODE_CAM;
2309 break;
2310 default:
2311 param = mode;
2312 break;
2315 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2316 &param);
2319 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2321 struct ipw_retry_limit retry_limit = {
2322 .short_retry_limit = slimit,
2323 .long_retry_limit = llimit
2326 if (!priv) {
2327 IPW_ERROR("Invalid args\n");
2328 return -1;
2331 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2332 &retry_limit);
2336 * The IPW device contains a Microwire compatible EEPROM that stores
2337 * various data like the MAC address. Usually the firmware has exclusive
2338 * access to the eeprom, but during device initialization (before the
2339 * device driver has sent the HostComplete command to the firmware) the
2340 * device driver has read access to the EEPROM by way of indirect addressing
2341 * through a couple of memory mapped registers.
2343 * The following is a simplified implementation for pulling data out of the
2344 * the eeprom, along with some helper functions to find information in
2345 * the per device private data's copy of the eeprom.
2347 * NOTE: To better understand how these functions work (i.e what is a chip
2348 * select and why do have to keep driving the eeprom clock?), read
2349 * just about any data sheet for a Microwire compatible EEPROM.
2352 /* write a 32 bit value into the indirect accessor register */
2353 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2355 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2357 /* the eeprom requires some time to complete the operation */
2358 udelay(p->eeprom_delay);
2360 return;
2363 /* perform a chip select operation */
2364 static void eeprom_cs(struct ipw_priv *priv)
2366 eeprom_write_reg(priv, 0);
2367 eeprom_write_reg(priv, EEPROM_BIT_CS);
2368 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2369 eeprom_write_reg(priv, EEPROM_BIT_CS);
2372 /* perform a chip select operation */
2373 static void eeprom_disable_cs(struct ipw_priv *priv)
2375 eeprom_write_reg(priv, EEPROM_BIT_CS);
2376 eeprom_write_reg(priv, 0);
2377 eeprom_write_reg(priv, EEPROM_BIT_SK);
2380 /* push a single bit down to the eeprom */
2381 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2383 int d = (bit ? EEPROM_BIT_DI : 0);
2384 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2385 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2388 /* push an opcode followed by an address down to the eeprom */
2389 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2391 int i;
2393 eeprom_cs(priv);
2394 eeprom_write_bit(priv, 1);
2395 eeprom_write_bit(priv, op & 2);
2396 eeprom_write_bit(priv, op & 1);
2397 for (i = 7; i >= 0; i--) {
2398 eeprom_write_bit(priv, addr & (1 << i));
2402 /* pull 16 bits off the eeprom, one bit at a time */
2403 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2405 int i;
2406 u16 r = 0;
2408 /* Send READ Opcode */
2409 eeprom_op(priv, EEPROM_CMD_READ, addr);
2411 /* Send dummy bit */
2412 eeprom_write_reg(priv, EEPROM_BIT_CS);
2414 /* Read the byte off the eeprom one bit at a time */
2415 for (i = 0; i < 16; i++) {
2416 u32 data = 0;
2417 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2418 eeprom_write_reg(priv, EEPROM_BIT_CS);
2419 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2420 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2423 /* Send another dummy bit */
2424 eeprom_write_reg(priv, 0);
2425 eeprom_disable_cs(priv);
2427 return r;
2430 /* helper function for pulling the mac address out of the private */
2431 /* data's copy of the eeprom data */
2432 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2434 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2438 * Either the device driver (i.e. the host) or the firmware can
2439 * load eeprom data into the designated region in SRAM. If neither
2440 * happens then the FW will shutdown with a fatal error.
2442 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2443 * bit needs region of shared SRAM needs to be non-zero.
2445 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2447 int i;
2448 u16 *eeprom = (u16 *) priv->eeprom;
2450 IPW_DEBUG_TRACE(">>\n");
2452 /* read entire contents of eeprom into private buffer */
2453 for (i = 0; i < 128; i++)
2454 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2457 If the data looks correct, then copy it to our private
2458 copy. Otherwise let the firmware know to perform the operation
2459 on its own.
2461 if (priv->eeprom[EEPROM_VERSION] != 0) {
2462 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2464 /* write the eeprom data to sram */
2465 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2466 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2468 /* Do not load eeprom data on fatal error or suspend */
2469 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2470 } else {
2471 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2473 /* Load eeprom data on fatal error or suspend */
2474 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2477 IPW_DEBUG_TRACE("<<\n");
2480 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2482 count >>= 2;
2483 if (!count)
2484 return;
2485 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2486 while (count--)
2487 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2490 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2492 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2493 CB_NUMBER_OF_ELEMENTS_SMALL *
2494 sizeof(struct command_block));
2497 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2498 { /* start dma engine but no transfers yet */
2500 IPW_DEBUG_FW(">> : \n");
2502 /* Start the dma */
2503 ipw_fw_dma_reset_command_blocks(priv);
2505 /* Write CB base address */
2506 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2508 IPW_DEBUG_FW("<< : \n");
2509 return 0;
2512 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2514 u32 control = 0;
2516 IPW_DEBUG_FW(">> :\n");
2518 //set the Stop and Abort bit
2519 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2520 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2521 priv->sram_desc.last_cb_index = 0;
2523 IPW_DEBUG_FW("<< \n");
2526 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2527 struct command_block *cb)
2529 u32 address =
2530 IPW_SHARED_SRAM_DMA_CONTROL +
2531 (sizeof(struct command_block) * index);
2532 IPW_DEBUG_FW(">> :\n");
2534 ipw_write_indirect(priv, address, (u8 *) cb,
2535 (int)sizeof(struct command_block));
2537 IPW_DEBUG_FW("<< :\n");
2538 return 0;
2542 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2544 u32 control = 0;
2545 u32 index = 0;
2547 IPW_DEBUG_FW(">> :\n");
2549 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2550 ipw_fw_dma_write_command_block(priv, index,
2551 &priv->sram_desc.cb_list[index]);
2553 /* Enable the DMA in the CSR register */
2554 ipw_clear_bit(priv, IPW_RESET_REG,
2555 IPW_RESET_REG_MASTER_DISABLED |
2556 IPW_RESET_REG_STOP_MASTER);
2558 /* Set the Start bit. */
2559 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2560 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2562 IPW_DEBUG_FW("<< :\n");
2563 return 0;
2566 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2568 u32 address;
2569 u32 register_value = 0;
2570 u32 cb_fields_address = 0;
2572 IPW_DEBUG_FW(">> :\n");
2573 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2574 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2576 /* Read the DMA Controlor register */
2577 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2578 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2580 /* Print the CB values */
2581 cb_fields_address = address;
2582 register_value = ipw_read_reg32(priv, cb_fields_address);
2583 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2585 cb_fields_address += sizeof(u32);
2586 register_value = ipw_read_reg32(priv, cb_fields_address);
2587 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2589 cb_fields_address += sizeof(u32);
2590 register_value = ipw_read_reg32(priv, cb_fields_address);
2591 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2592 register_value);
2594 cb_fields_address += sizeof(u32);
2595 register_value = ipw_read_reg32(priv, cb_fields_address);
2596 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2598 IPW_DEBUG_FW(">> :\n");
2601 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2603 u32 current_cb_address = 0;
2604 u32 current_cb_index = 0;
2606 IPW_DEBUG_FW("<< :\n");
2607 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2609 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2610 sizeof(struct command_block);
2612 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2613 current_cb_index, current_cb_address);
2615 IPW_DEBUG_FW(">> :\n");
2616 return current_cb_index;
2620 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2621 u32 src_address,
2622 u32 dest_address,
2623 u32 length,
2624 int interrupt_enabled, int is_last)
2627 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2628 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2629 CB_DEST_SIZE_LONG;
2630 struct command_block *cb;
2631 u32 last_cb_element = 0;
2633 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2634 src_address, dest_address, length);
2636 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2637 return -1;
2639 last_cb_element = priv->sram_desc.last_cb_index;
2640 cb = &priv->sram_desc.cb_list[last_cb_element];
2641 priv->sram_desc.last_cb_index++;
2643 /* Calculate the new CB control word */
2644 if (interrupt_enabled)
2645 control |= CB_INT_ENABLED;
2647 if (is_last)
2648 control |= CB_LAST_VALID;
2650 control |= length;
2652 /* Calculate the CB Element's checksum value */
2653 cb->status = control ^ src_address ^ dest_address;
2655 /* Copy the Source and Destination addresses */
2656 cb->dest_addr = dest_address;
2657 cb->source_addr = src_address;
2659 /* Copy the Control Word last */
2660 cb->control = control;
2662 return 0;
2665 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2666 u32 src_phys, u32 dest_address, u32 length)
2668 u32 bytes_left = length;
2669 u32 src_offset = 0;
2670 u32 dest_offset = 0;
2671 int status = 0;
2672 IPW_DEBUG_FW(">> \n");
2673 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2674 src_phys, dest_address, length);
2675 while (bytes_left > CB_MAX_LENGTH) {
2676 status = ipw_fw_dma_add_command_block(priv,
2677 src_phys + src_offset,
2678 dest_address +
2679 dest_offset,
2680 CB_MAX_LENGTH, 0, 0);
2681 if (status) {
2682 IPW_DEBUG_FW_INFO(": Failed\n");
2683 return -1;
2684 } else
2685 IPW_DEBUG_FW_INFO(": Added new cb\n");
2687 src_offset += CB_MAX_LENGTH;
2688 dest_offset += CB_MAX_LENGTH;
2689 bytes_left -= CB_MAX_LENGTH;
2692 /* add the buffer tail */
2693 if (bytes_left > 0) {
2694 status =
2695 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2696 dest_address + dest_offset,
2697 bytes_left, 0, 0);
2698 if (status) {
2699 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2700 return -1;
2701 } else
2702 IPW_DEBUG_FW_INFO
2703 (": Adding new cb - the buffer tail\n");
2706 IPW_DEBUG_FW("<< \n");
2707 return 0;
2710 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2712 u32 current_index = 0, previous_index;
2713 u32 watchdog = 0;
2715 IPW_DEBUG_FW(">> : \n");
2717 current_index = ipw_fw_dma_command_block_index(priv);
2718 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2719 (int)priv->sram_desc.last_cb_index);
2721 while (current_index < priv->sram_desc.last_cb_index) {
2722 udelay(50);
2723 previous_index = current_index;
2724 current_index = ipw_fw_dma_command_block_index(priv);
2726 if (previous_index < current_index) {
2727 watchdog = 0;
2728 continue;
2730 if (++watchdog > 400) {
2731 IPW_DEBUG_FW_INFO("Timeout\n");
2732 ipw_fw_dma_dump_command_block(priv);
2733 ipw_fw_dma_abort(priv);
2734 return -1;
2738 ipw_fw_dma_abort(priv);
2740 /*Disable the DMA in the CSR register */
2741 ipw_set_bit(priv, IPW_RESET_REG,
2742 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2744 IPW_DEBUG_FW("<< dmaWaitSync \n");
2745 return 0;
2748 static void ipw_remove_current_network(struct ipw_priv *priv)
2750 struct list_head *element, *safe;
2751 struct ieee80211_network *network = NULL;
2752 unsigned long flags;
2754 spin_lock_irqsave(&priv->ieee->lock, flags);
2755 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2756 network = list_entry(element, struct ieee80211_network, list);
2757 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2758 list_del(element);
2759 list_add_tail(&network->list,
2760 &priv->ieee->network_free_list);
2763 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2767 * Check that card is still alive.
2768 * Reads debug register from domain0.
2769 * If card is present, pre-defined value should
2770 * be found there.
2772 * @param priv
2773 * @return 1 if card is present, 0 otherwise
2775 static inline int ipw_alive(struct ipw_priv *priv)
2777 return ipw_read32(priv, 0x90) == 0xd55555d5;
2780 /* timeout in msec, attempted in 10-msec quanta */
2781 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2782 int timeout)
2784 int i = 0;
2786 do {
2787 if ((ipw_read32(priv, addr) & mask) == mask)
2788 return i;
2789 mdelay(10);
2790 i += 10;
2791 } while (i < timeout);
2793 return -ETIME;
2796 /* These functions load the firmware and micro code for the operation of
2797 * the ipw hardware. It assumes the buffer has all the bits for the
2798 * image and the caller is handling the memory allocation and clean up.
2801 static int ipw_stop_master(struct ipw_priv *priv)
2803 int rc;
2805 IPW_DEBUG_TRACE(">> \n");
2806 /* stop master. typical delay - 0 */
2807 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2809 /* timeout is in msec, polled in 10-msec quanta */
2810 rc = ipw_poll_bit(priv, IPW_RESET_REG,
2811 IPW_RESET_REG_MASTER_DISABLED, 100);
2812 if (rc < 0) {
2813 IPW_ERROR("wait for stop master failed after 100ms\n");
2814 return -1;
2817 IPW_DEBUG_INFO("stop master %dms\n", rc);
2819 return rc;
2822 static void ipw_arc_release(struct ipw_priv *priv)
2824 IPW_DEBUG_TRACE(">> \n");
2825 mdelay(5);
2827 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2829 /* no one knows timing, for safety add some delay */
2830 mdelay(5);
2833 struct fw_chunk {
2834 u32 address;
2835 u32 length;
2838 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2840 int rc = 0, i, addr;
2841 u8 cr = 0;
2842 u16 *image;
2844 image = (u16 *) data;
2846 IPW_DEBUG_TRACE(">> \n");
2848 rc = ipw_stop_master(priv);
2850 if (rc < 0)
2851 return rc;
2853 // spin_lock_irqsave(&priv->lock, flags);
2855 for (addr = IPW_SHARED_LOWER_BOUND;
2856 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
2857 ipw_write32(priv, addr, 0);
2860 /* no ucode (yet) */
2861 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
2862 /* destroy DMA queues */
2863 /* reset sequence */
2865 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
2866 ipw_arc_release(priv);
2867 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
2868 mdelay(1);
2870 /* reset PHY */
2871 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
2872 mdelay(1);
2874 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
2875 mdelay(1);
2877 /* enable ucode store */
2878 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
2879 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
2880 mdelay(1);
2882 /* write ucode */
2884 * @bug
2885 * Do NOT set indirect address register once and then
2886 * store data to indirect data register in the loop.
2887 * It seems very reasonable, but in this case DINO do not
2888 * accept ucode. It is essential to set address each time.
2890 /* load new ipw uCode */
2891 for (i = 0; i < len / 2; i++)
2892 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
2893 cpu_to_le16(image[i]));
2895 /* enable DINO */
2896 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
2897 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
2899 /* this is where the igx / win driver deveates from the VAP driver. */
2901 /* wait for alive response */
2902 for (i = 0; i < 100; i++) {
2903 /* poll for incoming data */
2904 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
2905 if (cr & DINO_RXFIFO_DATA)
2906 break;
2907 mdelay(1);
2910 if (cr & DINO_RXFIFO_DATA) {
2911 /* alive_command_responce size is NOT multiple of 4 */
2912 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
2914 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
2915 response_buffer[i] =
2916 le32_to_cpu(ipw_read_reg32(priv,
2917 IPW_BASEBAND_RX_FIFO_READ));
2918 memcpy(&priv->dino_alive, response_buffer,
2919 sizeof(priv->dino_alive));
2920 if (priv->dino_alive.alive_command == 1
2921 && priv->dino_alive.ucode_valid == 1) {
2922 rc = 0;
2923 IPW_DEBUG_INFO
2924 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
2925 "of %02d/%02d/%02d %02d:%02d\n",
2926 priv->dino_alive.software_revision,
2927 priv->dino_alive.software_revision,
2928 priv->dino_alive.device_identifier,
2929 priv->dino_alive.device_identifier,
2930 priv->dino_alive.time_stamp[0],
2931 priv->dino_alive.time_stamp[1],
2932 priv->dino_alive.time_stamp[2],
2933 priv->dino_alive.time_stamp[3],
2934 priv->dino_alive.time_stamp[4]);
2935 } else {
2936 IPW_DEBUG_INFO("Microcode is not alive\n");
2937 rc = -EINVAL;
2939 } else {
2940 IPW_DEBUG_INFO("No alive response from DINO\n");
2941 rc = -ETIME;
2944 /* disable DINO, otherwise for some reason
2945 firmware have problem getting alive resp. */
2946 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
2948 // spin_unlock_irqrestore(&priv->lock, flags);
2950 return rc;
2953 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
2955 int rc = -1;
2956 int offset = 0;
2957 struct fw_chunk *chunk;
2958 dma_addr_t shared_phys;
2959 u8 *shared_virt;
2961 IPW_DEBUG_TRACE("<< : \n");
2962 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
2964 if (!shared_virt)
2965 return -ENOMEM;
2967 memmove(shared_virt, data, len);
2969 /* Start the Dma */
2970 rc = ipw_fw_dma_enable(priv);
2972 if (priv->sram_desc.last_cb_index > 0) {
2973 /* the DMA is already ready this would be a bug. */
2974 BUG();
2975 goto out;
2978 do {
2979 chunk = (struct fw_chunk *)(data + offset);
2980 offset += sizeof(struct fw_chunk);
2981 /* build DMA packet and queue up for sending */
2982 /* dma to chunk->address, the chunk->length bytes from data +
2983 * offeset*/
2984 /* Dma loading */
2985 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
2986 le32_to_cpu(chunk->address),
2987 le32_to_cpu(chunk->length));
2988 if (rc) {
2989 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
2990 goto out;
2993 offset += le32_to_cpu(chunk->length);
2994 } while (offset < len);
2996 /* Run the DMA and wait for the answer */
2997 rc = ipw_fw_dma_kick(priv);
2998 if (rc) {
2999 IPW_ERROR("dmaKick Failed\n");
3000 goto out;
3003 rc = ipw_fw_dma_wait(priv);
3004 if (rc) {
3005 IPW_ERROR("dmaWaitSync Failed\n");
3006 goto out;
3008 out:
3009 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3010 return rc;
3013 /* stop nic */
3014 static int ipw_stop_nic(struct ipw_priv *priv)
3016 int rc = 0;
3018 /* stop */
3019 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3021 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3022 IPW_RESET_REG_MASTER_DISABLED, 500);
3023 if (rc < 0) {
3024 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3025 return rc;
3028 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3030 return rc;
3033 static void ipw_start_nic(struct ipw_priv *priv)
3035 IPW_DEBUG_TRACE(">>\n");
3037 /* prvHwStartNic release ARC */
3038 ipw_clear_bit(priv, IPW_RESET_REG,
3039 IPW_RESET_REG_MASTER_DISABLED |
3040 IPW_RESET_REG_STOP_MASTER |
3041 CBD_RESET_REG_PRINCETON_RESET);
3043 /* enable power management */
3044 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3045 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3047 IPW_DEBUG_TRACE("<<\n");
3050 static int ipw_init_nic(struct ipw_priv *priv)
3052 int rc;
3054 IPW_DEBUG_TRACE(">>\n");
3055 /* reset */
3056 /*prvHwInitNic */
3057 /* set "initialization complete" bit to move adapter to D0 state */
3058 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3060 /* low-level PLL activation */
3061 ipw_write32(priv, IPW_READ_INT_REGISTER,
3062 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3064 /* wait for clock stabilization */
3065 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3066 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3067 if (rc < 0)
3068 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3070 /* assert SW reset */
3071 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3073 udelay(10);
3075 /* set "initialization complete" bit to move adapter to D0 state */
3076 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3078 IPW_DEBUG_TRACE(">>\n");
3079 return 0;
3082 /* Call this function from process context, it will sleep in request_firmware.
3083 * Probe is an ok place to call this from.
3085 static int ipw_reset_nic(struct ipw_priv *priv)
3087 int rc = 0;
3088 unsigned long flags;
3090 IPW_DEBUG_TRACE(">>\n");
3092 rc = ipw_init_nic(priv);
3094 spin_lock_irqsave(&priv->lock, flags);
3095 /* Clear the 'host command active' bit... */
3096 priv->status &= ~STATUS_HCMD_ACTIVE;
3097 wake_up_interruptible(&priv->wait_command_queue);
3098 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3099 wake_up_interruptible(&priv->wait_state);
3100 spin_unlock_irqrestore(&priv->lock, flags);
3102 IPW_DEBUG_TRACE("<<\n");
3103 return rc;
3107 struct ipw_fw {
3108 u32 ver;
3109 u32 boot_size;
3110 u32 ucode_size;
3111 u32 fw_size;
3112 u8 data[0];
3115 static int ipw_get_fw(struct ipw_priv *priv,
3116 const struct firmware **raw, const char *name)
3118 struct ipw_fw *fw;
3119 int rc;
3121 /* ask firmware_class module to get the boot firmware off disk */
3122 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3123 if (rc < 0) {
3124 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3125 return rc;
3128 if ((*raw)->size < sizeof(*fw)) {
3129 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3130 return -EINVAL;
3133 fw = (void *)(*raw)->data;
3135 if ((*raw)->size < sizeof(*fw) +
3136 fw->boot_size + fw->ucode_size + fw->fw_size) {
3137 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3138 name, (*raw)->size);
3139 return -EINVAL;
3142 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3143 name,
3144 le32_to_cpu(fw->ver) >> 16,
3145 le32_to_cpu(fw->ver) & 0xff,
3146 (*raw)->size - sizeof(*fw));
3147 return 0;
3150 #define IPW_RX_BUF_SIZE (3000)
3152 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3153 struct ipw_rx_queue *rxq)
3155 unsigned long flags;
3156 int i;
3158 spin_lock_irqsave(&rxq->lock, flags);
3160 INIT_LIST_HEAD(&rxq->rx_free);
3161 INIT_LIST_HEAD(&rxq->rx_used);
3163 /* Fill the rx_used queue with _all_ of the Rx buffers */
3164 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3165 /* In the reset function, these buffers may have been allocated
3166 * to an SKB, so we need to unmap and free potential storage */
3167 if (rxq->pool[i].skb != NULL) {
3168 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3169 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3170 dev_kfree_skb(rxq->pool[i].skb);
3171 rxq->pool[i].skb = NULL;
3173 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3176 /* Set us so that we have processed and used all buffers, but have
3177 * not restocked the Rx queue with fresh buffers */
3178 rxq->read = rxq->write = 0;
3179 rxq->processed = RX_QUEUE_SIZE - 1;
3180 rxq->free_count = 0;
3181 spin_unlock_irqrestore(&rxq->lock, flags);
3184 #ifdef CONFIG_PM
3185 static int fw_loaded = 0;
3186 static const struct firmware *raw = NULL;
3188 static void free_firmware(void)
3190 if (fw_loaded) {
3191 release_firmware(raw);
3192 raw = NULL;
3193 fw_loaded = 0;
3196 #else
3197 #define free_firmware() do {} while (0)
3198 #endif
3200 static int ipw_load(struct ipw_priv *priv)
3202 #ifndef CONFIG_PM
3203 const struct firmware *raw = NULL;
3204 #endif
3205 struct ipw_fw *fw;
3206 u8 *boot_img, *ucode_img, *fw_img;
3207 u8 *name = NULL;
3208 int rc = 0, retries = 3;
3210 switch (priv->ieee->iw_mode) {
3211 case IW_MODE_ADHOC:
3212 name = "ipw2200-ibss.fw";
3213 break;
3214 #ifdef CONFIG_IPW2200_MONITOR
3215 case IW_MODE_MONITOR:
3216 name = "ipw2200-sniffer.fw";
3217 break;
3218 #endif
3219 case IW_MODE_INFRA:
3220 name = "ipw2200-bss.fw";
3221 break;
3224 if (!name) {
3225 rc = -EINVAL;
3226 goto error;
3229 #ifdef CONFIG_PM
3230 if (!fw_loaded) {
3231 #endif
3232 rc = ipw_get_fw(priv, &raw, name);
3233 if (rc < 0)
3234 goto error;
3235 #ifdef CONFIG_PM
3237 #endif
3239 fw = (void *)raw->data;
3240 boot_img = &fw->data[0];
3241 ucode_img = &fw->data[fw->boot_size];
3242 fw_img = &fw->data[fw->boot_size + fw->ucode_size];
3244 if (rc < 0)
3245 goto error;
3247 if (!priv->rxq)
3248 priv->rxq = ipw_rx_queue_alloc(priv);
3249 else
3250 ipw_rx_queue_reset(priv, priv->rxq);
3251 if (!priv->rxq) {
3252 IPW_ERROR("Unable to initialize Rx queue\n");
3253 goto error;
3256 retry:
3257 /* Ensure interrupts are disabled */
3258 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3259 priv->status &= ~STATUS_INT_ENABLED;
3261 /* ack pending interrupts */
3262 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3264 ipw_stop_nic(priv);
3266 rc = ipw_reset_nic(priv);
3267 if (rc < 0) {
3268 IPW_ERROR("Unable to reset NIC\n");
3269 goto error;
3272 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3273 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3275 /* DMA the initial boot firmware into the device */
3276 rc = ipw_load_firmware(priv, boot_img, fw->boot_size);
3277 if (rc < 0) {
3278 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3279 goto error;
3282 /* kick start the device */
3283 ipw_start_nic(priv);
3285 /* wait for the device to finish its initial startup sequence */
3286 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3287 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3288 if (rc < 0) {
3289 IPW_ERROR("device failed to boot initial fw image\n");
3290 goto error;
3292 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3294 /* ack fw init done interrupt */
3295 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3297 /* DMA the ucode into the device */
3298 rc = ipw_load_ucode(priv, ucode_img, fw->ucode_size);
3299 if (rc < 0) {
3300 IPW_ERROR("Unable to load ucode: %d\n", rc);
3301 goto error;
3304 /* stop nic */
3305 ipw_stop_nic(priv);
3307 /* DMA bss firmware into the device */
3308 rc = ipw_load_firmware(priv, fw_img, fw->fw_size);
3309 if (rc < 0) {
3310 IPW_ERROR("Unable to load firmware: %d\n", rc);
3311 goto error;
3313 #ifdef CONFIG_PM
3314 fw_loaded = 1;
3315 #endif
3317 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3319 rc = ipw_queue_reset(priv);
3320 if (rc < 0) {
3321 IPW_ERROR("Unable to initialize queues\n");
3322 goto error;
3325 /* Ensure interrupts are disabled */
3326 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3327 /* ack pending interrupts */
3328 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3330 /* kick start the device */
3331 ipw_start_nic(priv);
3333 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3334 if (retries > 0) {
3335 IPW_WARNING("Parity error. Retrying init.\n");
3336 retries--;
3337 goto retry;
3340 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3341 rc = -EIO;
3342 goto error;
3345 /* wait for the device */
3346 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3347 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3348 if (rc < 0) {
3349 IPW_ERROR("device failed to start within 500ms\n");
3350 goto error;
3352 IPW_DEBUG_INFO("device response after %dms\n", rc);
3354 /* ack fw init done interrupt */
3355 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3357 /* read eeprom data and initialize the eeprom region of sram */
3358 priv->eeprom_delay = 1;
3359 ipw_eeprom_init_sram(priv);
3361 /* enable interrupts */
3362 ipw_enable_interrupts(priv);
3364 /* Ensure our queue has valid packets */
3365 ipw_rx_queue_replenish(priv);
3367 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3369 /* ack pending interrupts */
3370 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3372 #ifndef CONFIG_PM
3373 release_firmware(raw);
3374 #endif
3375 return 0;
3377 error:
3378 if (priv->rxq) {
3379 ipw_rx_queue_free(priv, priv->rxq);
3380 priv->rxq = NULL;
3382 ipw_tx_queue_free(priv);
3383 if (raw)
3384 release_firmware(raw);
3385 #ifdef CONFIG_PM
3386 fw_loaded = 0;
3387 raw = NULL;
3388 #endif
3390 return rc;
3394 * DMA services
3396 * Theory of operation
3398 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3399 * 2 empty entries always kept in the buffer to protect from overflow.
3401 * For Tx queue, there are low mark and high mark limits. If, after queuing
3402 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3403 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3404 * Tx queue resumed.
3406 * The IPW operates with six queues, one receive queue in the device's
3407 * sram, one transmit queue for sending commands to the device firmware,
3408 * and four transmit queues for data.
3410 * The four transmit queues allow for performing quality of service (qos)
3411 * transmissions as per the 802.11 protocol. Currently Linux does not
3412 * provide a mechanism to the user for utilizing prioritized queues, so
3413 * we only utilize the first data transmit queue (queue1).
3417 * Driver allocates buffers of this size for Rx
3420 static inline int ipw_queue_space(const struct clx2_queue *q)
3422 int s = q->last_used - q->first_empty;
3423 if (s <= 0)
3424 s += q->n_bd;
3425 s -= 2; /* keep some reserve to not confuse empty and full situations */
3426 if (s < 0)
3427 s = 0;
3428 return s;
3431 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3433 return (++index == n_bd) ? 0 : index;
3437 * Initialize common DMA queue structure
3439 * @param q queue to init
3440 * @param count Number of BD's to allocate. Should be power of 2
3441 * @param read_register Address for 'read' register
3442 * (not offset within BAR, full address)
3443 * @param write_register Address for 'write' register
3444 * (not offset within BAR, full address)
3445 * @param base_register Address for 'base' register
3446 * (not offset within BAR, full address)
3447 * @param size Address for 'size' register
3448 * (not offset within BAR, full address)
3450 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3451 int count, u32 read, u32 write, u32 base, u32 size)
3453 q->n_bd = count;
3455 q->low_mark = q->n_bd / 4;
3456 if (q->low_mark < 4)
3457 q->low_mark = 4;
3459 q->high_mark = q->n_bd / 8;
3460 if (q->high_mark < 2)
3461 q->high_mark = 2;
3463 q->first_empty = q->last_used = 0;
3464 q->reg_r = read;
3465 q->reg_w = write;
3467 ipw_write32(priv, base, q->dma_addr);
3468 ipw_write32(priv, size, count);
3469 ipw_write32(priv, read, 0);
3470 ipw_write32(priv, write, 0);
3472 _ipw_read32(priv, 0x90);
3475 static int ipw_queue_tx_init(struct ipw_priv *priv,
3476 struct clx2_tx_queue *q,
3477 int count, u32 read, u32 write, u32 base, u32 size)
3479 struct pci_dev *dev = priv->pci_dev;
3481 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3482 if (!q->txb) {
3483 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3484 return -ENOMEM;
3487 q->bd =
3488 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3489 if (!q->bd) {
3490 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3491 sizeof(q->bd[0]) * count);
3492 kfree(q->txb);
3493 q->txb = NULL;
3494 return -ENOMEM;
3497 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3498 return 0;
3502 * Free one TFD, those at index [txq->q.last_used].
3503 * Do NOT advance any indexes
3505 * @param dev
3506 * @param txq
3508 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3509 struct clx2_tx_queue *txq)
3511 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3512 struct pci_dev *dev = priv->pci_dev;
3513 int i;
3515 /* classify bd */
3516 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3517 /* nothing to cleanup after for host commands */
3518 return;
3520 /* sanity check */
3521 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3522 IPW_ERROR("Too many chunks: %i\n",
3523 le32_to_cpu(bd->u.data.num_chunks));
3524 /** @todo issue fatal error, it is quite serious situation */
3525 return;
3528 /* unmap chunks if any */
3529 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3530 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3531 le16_to_cpu(bd->u.data.chunk_len[i]),
3532 PCI_DMA_TODEVICE);
3533 if (txq->txb[txq->q.last_used]) {
3534 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3535 txq->txb[txq->q.last_used] = NULL;
3541 * Deallocate DMA queue.
3543 * Empty queue by removing and destroying all BD's.
3544 * Free all buffers.
3546 * @param dev
3547 * @param q
3549 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3551 struct clx2_queue *q = &txq->q;
3552 struct pci_dev *dev = priv->pci_dev;
3554 if (q->n_bd == 0)
3555 return;
3557 /* first, empty all BD's */
3558 for (; q->first_empty != q->last_used;
3559 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3560 ipw_queue_tx_free_tfd(priv, txq);
3563 /* free buffers belonging to queue itself */
3564 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3565 q->dma_addr);
3566 kfree(txq->txb);
3568 /* 0 fill whole structure */
3569 memset(txq, 0, sizeof(*txq));
3573 * Destroy all DMA queues and structures
3575 * @param priv
3577 static void ipw_tx_queue_free(struct ipw_priv *priv)
3579 /* Tx CMD queue */
3580 ipw_queue_tx_free(priv, &priv->txq_cmd);
3582 /* Tx queues */
3583 ipw_queue_tx_free(priv, &priv->txq[0]);
3584 ipw_queue_tx_free(priv, &priv->txq[1]);
3585 ipw_queue_tx_free(priv, &priv->txq[2]);
3586 ipw_queue_tx_free(priv, &priv->txq[3]);
3589 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3591 /* First 3 bytes are manufacturer */
3592 bssid[0] = priv->mac_addr[0];
3593 bssid[1] = priv->mac_addr[1];
3594 bssid[2] = priv->mac_addr[2];
3596 /* Last bytes are random */
3597 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3599 bssid[0] &= 0xfe; /* clear multicast bit */
3600 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3603 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3605 struct ipw_station_entry entry;
3606 int i;
3608 for (i = 0; i < priv->num_stations; i++) {
3609 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3610 /* Another node is active in network */
3611 priv->missed_adhoc_beacons = 0;
3612 if (!(priv->config & CFG_STATIC_CHANNEL))
3613 /* when other nodes drop out, we drop out */
3614 priv->config &= ~CFG_ADHOC_PERSIST;
3616 return i;
3620 if (i == MAX_STATIONS)
3621 return IPW_INVALID_STATION;
3623 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3625 entry.reserved = 0;
3626 entry.support_mode = 0;
3627 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3628 memcpy(priv->stations[i], bssid, ETH_ALEN);
3629 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3630 &entry, sizeof(entry));
3631 priv->num_stations++;
3633 return i;
3636 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3638 int i;
3640 for (i = 0; i < priv->num_stations; i++)
3641 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3642 return i;
3644 return IPW_INVALID_STATION;
3647 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3649 int err;
3651 if (priv->status & STATUS_ASSOCIATING) {
3652 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3653 queue_work(priv->workqueue, &priv->disassociate);
3654 return;
3657 if (!(priv->status & STATUS_ASSOCIATED)) {
3658 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3659 return;
3662 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3663 "on channel %d.\n",
3664 MAC_ARG(priv->assoc_request.bssid),
3665 priv->assoc_request.channel);
3667 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3668 priv->status |= STATUS_DISASSOCIATING;
3670 if (quiet)
3671 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3672 else
3673 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3675 err = ipw_send_associate(priv, &priv->assoc_request);
3676 if (err) {
3677 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3678 "failed.\n");
3679 return;
3684 static int ipw_disassociate(void *data)
3686 struct ipw_priv *priv = data;
3687 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3688 return 0;
3689 ipw_send_disassociate(data, 0);
3690 return 1;
3693 static void ipw_bg_disassociate(void *data)
3695 struct ipw_priv *priv = data;
3696 mutex_lock(&priv->mutex);
3697 ipw_disassociate(data);
3698 mutex_unlock(&priv->mutex);
3701 static void ipw_system_config(void *data)
3703 struct ipw_priv *priv = data;
3704 ipw_send_system_config(priv, &priv->sys_config);
3707 struct ipw_status_code {
3708 u16 status;
3709 const char *reason;
3712 static const struct ipw_status_code ipw_status_codes[] = {
3713 {0x00, "Successful"},
3714 {0x01, "Unspecified failure"},
3715 {0x0A, "Cannot support all requested capabilities in the "
3716 "Capability information field"},
3717 {0x0B, "Reassociation denied due to inability to confirm that "
3718 "association exists"},
3719 {0x0C, "Association denied due to reason outside the scope of this "
3720 "standard"},
3721 {0x0D,
3722 "Responding station does not support the specified authentication "
3723 "algorithm"},
3724 {0x0E,
3725 "Received an Authentication frame with authentication sequence "
3726 "transaction sequence number out of expected sequence"},
3727 {0x0F, "Authentication rejected because of challenge failure"},
3728 {0x10, "Authentication rejected due to timeout waiting for next "
3729 "frame in sequence"},
3730 {0x11, "Association denied because AP is unable to handle additional "
3731 "associated stations"},
3732 {0x12,
3733 "Association denied due to requesting station not supporting all "
3734 "of the datarates in the BSSBasicServiceSet Parameter"},
3735 {0x13,
3736 "Association denied due to requesting station not supporting "
3737 "short preamble operation"},
3738 {0x14,
3739 "Association denied due to requesting station not supporting "
3740 "PBCC encoding"},
3741 {0x15,
3742 "Association denied due to requesting station not supporting "
3743 "channel agility"},
3744 {0x19,
3745 "Association denied due to requesting station not supporting "
3746 "short slot operation"},
3747 {0x1A,
3748 "Association denied due to requesting station not supporting "
3749 "DSSS-OFDM operation"},
3750 {0x28, "Invalid Information Element"},
3751 {0x29, "Group Cipher is not valid"},
3752 {0x2A, "Pairwise Cipher is not valid"},
3753 {0x2B, "AKMP is not valid"},
3754 {0x2C, "Unsupported RSN IE version"},
3755 {0x2D, "Invalid RSN IE Capabilities"},
3756 {0x2E, "Cipher suite is rejected per security policy"},
3759 #ifdef CONFIG_IPW2200_DEBUG
3760 static const char *ipw_get_status_code(u16 status)
3762 int i;
3763 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3764 if (ipw_status_codes[i].status == (status & 0xff))
3765 return ipw_status_codes[i].reason;
3766 return "Unknown status value.";
3768 #endif
3770 static void inline average_init(struct average *avg)
3772 memset(avg, 0, sizeof(*avg));
3775 #define DEPTH_RSSI 8
3776 #define DEPTH_NOISE 16
3777 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3779 return ((depth-1)*prev_avg + val)/depth;
3782 static void average_add(struct average *avg, s16 val)
3784 avg->sum -= avg->entries[avg->pos];
3785 avg->sum += val;
3786 avg->entries[avg->pos++] = val;
3787 if (unlikely(avg->pos == AVG_ENTRIES)) {
3788 avg->init = 1;
3789 avg->pos = 0;
3793 static s16 average_value(struct average *avg)
3795 if (!unlikely(avg->init)) {
3796 if (avg->pos)
3797 return avg->sum / avg->pos;
3798 return 0;
3801 return avg->sum / AVG_ENTRIES;
3804 static void ipw_reset_stats(struct ipw_priv *priv)
3806 u32 len = sizeof(u32);
3808 priv->quality = 0;
3810 average_init(&priv->average_missed_beacons);
3811 priv->exp_avg_rssi = -60;
3812 priv->exp_avg_noise = -85 + 0x100;
3814 priv->last_rate = 0;
3815 priv->last_missed_beacons = 0;
3816 priv->last_rx_packets = 0;
3817 priv->last_tx_packets = 0;
3818 priv->last_tx_failures = 0;
3820 /* Firmware managed, reset only when NIC is restarted, so we have to
3821 * normalize on the current value */
3822 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3823 &priv->last_rx_err, &len);
3824 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3825 &priv->last_tx_failures, &len);
3827 /* Driver managed, reset with each association */
3828 priv->missed_adhoc_beacons = 0;
3829 priv->missed_beacons = 0;
3830 priv->tx_packets = 0;
3831 priv->rx_packets = 0;
3835 static u32 ipw_get_max_rate(struct ipw_priv *priv)
3837 u32 i = 0x80000000;
3838 u32 mask = priv->rates_mask;
3839 /* If currently associated in B mode, restrict the maximum
3840 * rate match to B rates */
3841 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3842 mask &= IEEE80211_CCK_RATES_MASK;
3844 /* TODO: Verify that the rate is supported by the current rates
3845 * list. */
3847 while (i && !(mask & i))
3848 i >>= 1;
3849 switch (i) {
3850 case IEEE80211_CCK_RATE_1MB_MASK:
3851 return 1000000;
3852 case IEEE80211_CCK_RATE_2MB_MASK:
3853 return 2000000;
3854 case IEEE80211_CCK_RATE_5MB_MASK:
3855 return 5500000;
3856 case IEEE80211_OFDM_RATE_6MB_MASK:
3857 return 6000000;
3858 case IEEE80211_OFDM_RATE_9MB_MASK:
3859 return 9000000;
3860 case IEEE80211_CCK_RATE_11MB_MASK:
3861 return 11000000;
3862 case IEEE80211_OFDM_RATE_12MB_MASK:
3863 return 12000000;
3864 case IEEE80211_OFDM_RATE_18MB_MASK:
3865 return 18000000;
3866 case IEEE80211_OFDM_RATE_24MB_MASK:
3867 return 24000000;
3868 case IEEE80211_OFDM_RATE_36MB_MASK:
3869 return 36000000;
3870 case IEEE80211_OFDM_RATE_48MB_MASK:
3871 return 48000000;
3872 case IEEE80211_OFDM_RATE_54MB_MASK:
3873 return 54000000;
3876 if (priv->ieee->mode == IEEE_B)
3877 return 11000000;
3878 else
3879 return 54000000;
3882 static u32 ipw_get_current_rate(struct ipw_priv *priv)
3884 u32 rate, len = sizeof(rate);
3885 int err;
3887 if (!(priv->status & STATUS_ASSOCIATED))
3888 return 0;
3890 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
3891 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
3892 &len);
3893 if (err) {
3894 IPW_DEBUG_INFO("failed querying ordinals.\n");
3895 return 0;
3897 } else
3898 return ipw_get_max_rate(priv);
3900 switch (rate) {
3901 case IPW_TX_RATE_1MB:
3902 return 1000000;
3903 case IPW_TX_RATE_2MB:
3904 return 2000000;
3905 case IPW_TX_RATE_5MB:
3906 return 5500000;
3907 case IPW_TX_RATE_6MB:
3908 return 6000000;
3909 case IPW_TX_RATE_9MB:
3910 return 9000000;
3911 case IPW_TX_RATE_11MB:
3912 return 11000000;
3913 case IPW_TX_RATE_12MB:
3914 return 12000000;
3915 case IPW_TX_RATE_18MB:
3916 return 18000000;
3917 case IPW_TX_RATE_24MB:
3918 return 24000000;
3919 case IPW_TX_RATE_36MB:
3920 return 36000000;
3921 case IPW_TX_RATE_48MB:
3922 return 48000000;
3923 case IPW_TX_RATE_54MB:
3924 return 54000000;
3927 return 0;
3930 #define IPW_STATS_INTERVAL (2 * HZ)
3931 static void ipw_gather_stats(struct ipw_priv *priv)
3933 u32 rx_err, rx_err_delta, rx_packets_delta;
3934 u32 tx_failures, tx_failures_delta, tx_packets_delta;
3935 u32 missed_beacons_percent, missed_beacons_delta;
3936 u32 quality = 0;
3937 u32 len = sizeof(u32);
3938 s16 rssi;
3939 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
3940 rate_quality;
3941 u32 max_rate;
3943 if (!(priv->status & STATUS_ASSOCIATED)) {
3944 priv->quality = 0;
3945 return;
3948 /* Update the statistics */
3949 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
3950 &priv->missed_beacons, &len);
3951 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
3952 priv->last_missed_beacons = priv->missed_beacons;
3953 if (priv->assoc_request.beacon_interval) {
3954 missed_beacons_percent = missed_beacons_delta *
3955 (HZ * priv->assoc_request.beacon_interval) /
3956 (IPW_STATS_INTERVAL * 10);
3957 } else {
3958 missed_beacons_percent = 0;
3960 average_add(&priv->average_missed_beacons, missed_beacons_percent);
3962 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
3963 rx_err_delta = rx_err - priv->last_rx_err;
3964 priv->last_rx_err = rx_err;
3966 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
3967 tx_failures_delta = tx_failures - priv->last_tx_failures;
3968 priv->last_tx_failures = tx_failures;
3970 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
3971 priv->last_rx_packets = priv->rx_packets;
3973 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
3974 priv->last_tx_packets = priv->tx_packets;
3976 /* Calculate quality based on the following:
3978 * Missed beacon: 100% = 0, 0% = 70% missed
3979 * Rate: 60% = 1Mbs, 100% = Max
3980 * Rx and Tx errors represent a straight % of total Rx/Tx
3981 * RSSI: 100% = > -50, 0% = < -80
3982 * Rx errors: 100% = 0, 0% = 50% missed
3984 * The lowest computed quality is used.
3987 #define BEACON_THRESHOLD 5
3988 beacon_quality = 100 - missed_beacons_percent;
3989 if (beacon_quality < BEACON_THRESHOLD)
3990 beacon_quality = 0;
3991 else
3992 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
3993 (100 - BEACON_THRESHOLD);
3994 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
3995 beacon_quality, missed_beacons_percent);
3997 priv->last_rate = ipw_get_current_rate(priv);
3998 max_rate = ipw_get_max_rate(priv);
3999 rate_quality = priv->last_rate * 40 / max_rate + 60;
4000 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4001 rate_quality, priv->last_rate / 1000000);
4003 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4004 rx_quality = 100 - (rx_err_delta * 100) /
4005 (rx_packets_delta + rx_err_delta);
4006 else
4007 rx_quality = 100;
4008 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4009 rx_quality, rx_err_delta, rx_packets_delta);
4011 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4012 tx_quality = 100 - (tx_failures_delta * 100) /
4013 (tx_packets_delta + tx_failures_delta);
4014 else
4015 tx_quality = 100;
4016 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4017 tx_quality, tx_failures_delta, tx_packets_delta);
4019 rssi = priv->exp_avg_rssi;
4020 signal_quality =
4021 (100 *
4022 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4023 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4024 (priv->ieee->perfect_rssi - rssi) *
4025 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4026 62 * (priv->ieee->perfect_rssi - rssi))) /
4027 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4028 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4029 if (signal_quality > 100)
4030 signal_quality = 100;
4031 else if (signal_quality < 1)
4032 signal_quality = 0;
4034 IPW_ERROR("Signal level : %3d%% (%d dBm)\n",
4035 signal_quality, rssi);
4037 quality = min(beacon_quality,
4038 min(rate_quality,
4039 min(tx_quality, min(rx_quality, signal_quality))));
4040 if (quality == beacon_quality)
4041 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4042 quality);
4043 if (quality == rate_quality)
4044 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4045 quality);
4046 if (quality == tx_quality)
4047 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4048 quality);
4049 if (quality == rx_quality)
4050 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4051 quality);
4052 if (quality == signal_quality)
4053 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4054 quality);
4056 priv->quality = quality;
4058 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4059 IPW_STATS_INTERVAL);
4062 static void ipw_bg_gather_stats(void *data)
4064 struct ipw_priv *priv = data;
4065 mutex_lock(&priv->mutex);
4066 ipw_gather_stats(data);
4067 mutex_unlock(&priv->mutex);
4070 /* Missed beacon behavior:
4071 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4072 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4073 * Above disassociate threshold, give up and stop scanning.
4074 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4075 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4076 int missed_count)
4078 priv->notif_missed_beacons = missed_count;
4080 if (missed_count > priv->disassociate_threshold &&
4081 priv->status & STATUS_ASSOCIATED) {
4082 /* If associated and we've hit the missed
4083 * beacon threshold, disassociate, turn
4084 * off roaming, and abort any active scans */
4085 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4086 IPW_DL_STATE | IPW_DL_ASSOC,
4087 "Missed beacon: %d - disassociate\n", missed_count);
4088 priv->status &= ~STATUS_ROAMING;
4089 if (priv->status & STATUS_SCANNING) {
4090 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4091 IPW_DL_STATE,
4092 "Aborting scan with missed beacon.\n");
4093 queue_work(priv->workqueue, &priv->abort_scan);
4096 queue_work(priv->workqueue, &priv->disassociate);
4097 return;
4100 if (priv->status & STATUS_ROAMING) {
4101 /* If we are currently roaming, then just
4102 * print a debug statement... */
4103 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4104 "Missed beacon: %d - roam in progress\n",
4105 missed_count);
4106 return;
4109 if (roaming &&
4110 (missed_count > priv->roaming_threshold &&
4111 missed_count <= priv->disassociate_threshold)) {
4112 /* If we are not already roaming, set the ROAM
4113 * bit in the status and kick off a scan.
4114 * This can happen several times before we reach
4115 * disassociate_threshold. */
4116 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4117 "Missed beacon: %d - initiate "
4118 "roaming\n", missed_count);
4119 if (!(priv->status & STATUS_ROAMING)) {
4120 priv->status |= STATUS_ROAMING;
4121 if (!(priv->status & STATUS_SCANNING))
4122 queue_work(priv->workqueue,
4123 &priv->request_scan);
4125 return;
4128 if (priv->status & STATUS_SCANNING) {
4129 /* Stop scan to keep fw from getting
4130 * stuck (only if we aren't roaming --
4131 * otherwise we'll never scan more than 2 or 3
4132 * channels..) */
4133 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4134 "Aborting scan with missed beacon.\n");
4135 queue_work(priv->workqueue, &priv->abort_scan);
4138 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4142 * Handle host notification packet.
4143 * Called from interrupt routine
4145 static void ipw_rx_notification(struct ipw_priv *priv,
4146 struct ipw_rx_notification *notif)
4148 notif->size = le16_to_cpu(notif->size);
4150 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4152 switch (notif->subtype) {
4153 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4154 struct notif_association *assoc = &notif->u.assoc;
4156 switch (assoc->state) {
4157 case CMAS_ASSOCIATED:{
4158 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4159 IPW_DL_ASSOC,
4160 "associated: '%s' " MAC_FMT
4161 " \n",
4162 escape_essid(priv->essid,
4163 priv->essid_len),
4164 MAC_ARG(priv->bssid));
4166 switch (priv->ieee->iw_mode) {
4167 case IW_MODE_INFRA:
4168 memcpy(priv->ieee->bssid,
4169 priv->bssid, ETH_ALEN);
4170 break;
4172 case IW_MODE_ADHOC:
4173 memcpy(priv->ieee->bssid,
4174 priv->bssid, ETH_ALEN);
4176 /* clear out the station table */
4177 priv->num_stations = 0;
4179 IPW_DEBUG_ASSOC
4180 ("queueing adhoc check\n");
4181 queue_delayed_work(priv->
4182 workqueue,
4183 &priv->
4184 adhoc_check,
4185 priv->
4186 assoc_request.
4187 beacon_interval);
4188 break;
4191 priv->status &= ~STATUS_ASSOCIATING;
4192 priv->status |= STATUS_ASSOCIATED;
4193 queue_work(priv->workqueue,
4194 &priv->system_config);
4196 #ifdef CONFIG_IPW_QOS
4197 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4198 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4199 if ((priv->status & STATUS_AUTH) &&
4200 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4201 == IEEE80211_STYPE_ASSOC_RESP)) {
4202 if ((sizeof
4203 (struct
4204 ieee80211_assoc_response)
4205 <= notif->size)
4206 && (notif->size <= 2314)) {
4207 struct
4208 ieee80211_rx_stats
4209 stats = {
4210 .len =
4211 notif->
4212 size - 1,
4215 IPW_DEBUG_QOS
4216 ("QoS Associate "
4217 "size %d\n",
4218 notif->size);
4219 ieee80211_rx_mgt(priv->
4220 ieee,
4221 (struct
4222 ieee80211_hdr_4addr
4224 &notif->u.raw, &stats);
4227 #endif
4229 schedule_work(&priv->link_up);
4231 break;
4234 case CMAS_AUTHENTICATED:{
4235 if (priv->
4236 status & (STATUS_ASSOCIATED |
4237 STATUS_AUTH)) {
4238 #ifdef CONFIG_IPW2200_DEBUG
4239 struct notif_authenticate *auth
4240 = &notif->u.auth;
4241 IPW_DEBUG(IPW_DL_NOTIF |
4242 IPW_DL_STATE |
4243 IPW_DL_ASSOC,
4244 "deauthenticated: '%s' "
4245 MAC_FMT
4246 ": (0x%04X) - %s \n",
4247 escape_essid(priv->
4248 essid,
4249 priv->
4250 essid_len),
4251 MAC_ARG(priv->bssid),
4252 ntohs(auth->status),
4253 ipw_get_status_code
4254 (ntohs
4255 (auth->status)));
4256 #endif
4258 priv->status &=
4259 ~(STATUS_ASSOCIATING |
4260 STATUS_AUTH |
4261 STATUS_ASSOCIATED);
4263 schedule_work(&priv->link_down);
4264 break;
4267 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4268 IPW_DL_ASSOC,
4269 "authenticated: '%s' " MAC_FMT
4270 "\n",
4271 escape_essid(priv->essid,
4272 priv->essid_len),
4273 MAC_ARG(priv->bssid));
4274 break;
4277 case CMAS_INIT:{
4278 if (priv->status & STATUS_AUTH) {
4279 struct
4280 ieee80211_assoc_response
4281 *resp;
4282 resp =
4283 (struct
4284 ieee80211_assoc_response
4285 *)&notif->u.raw;
4286 IPW_DEBUG(IPW_DL_NOTIF |
4287 IPW_DL_STATE |
4288 IPW_DL_ASSOC,
4289 "association failed (0x%04X): %s\n",
4290 ntohs(resp->status),
4291 ipw_get_status_code
4292 (ntohs
4293 (resp->status)));
4296 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4297 IPW_DL_ASSOC,
4298 "disassociated: '%s' " MAC_FMT
4299 " \n",
4300 escape_essid(priv->essid,
4301 priv->essid_len),
4302 MAC_ARG(priv->bssid));
4304 priv->status &=
4305 ~(STATUS_DISASSOCIATING |
4306 STATUS_ASSOCIATING |
4307 STATUS_ASSOCIATED | STATUS_AUTH);
4308 if (priv->assoc_network
4309 && (priv->assoc_network->
4310 capability &
4311 WLAN_CAPABILITY_IBSS))
4312 ipw_remove_current_network
4313 (priv);
4315 schedule_work(&priv->link_down);
4317 break;
4320 case CMAS_RX_ASSOC_RESP:
4321 break;
4323 default:
4324 IPW_ERROR("assoc: unknown (%d)\n",
4325 assoc->state);
4326 break;
4329 break;
4332 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4333 struct notif_authenticate *auth = &notif->u.auth;
4334 switch (auth->state) {
4335 case CMAS_AUTHENTICATED:
4336 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4337 "authenticated: '%s' " MAC_FMT " \n",
4338 escape_essid(priv->essid,
4339 priv->essid_len),
4340 MAC_ARG(priv->bssid));
4341 priv->status |= STATUS_AUTH;
4342 break;
4344 case CMAS_INIT:
4345 if (priv->status & STATUS_AUTH) {
4346 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4347 IPW_DL_ASSOC,
4348 "authentication failed (0x%04X): %s\n",
4349 ntohs(auth->status),
4350 ipw_get_status_code(ntohs
4351 (auth->
4352 status)));
4354 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4355 IPW_DL_ASSOC,
4356 "deauthenticated: '%s' " MAC_FMT "\n",
4357 escape_essid(priv->essid,
4358 priv->essid_len),
4359 MAC_ARG(priv->bssid));
4361 priv->status &= ~(STATUS_ASSOCIATING |
4362 STATUS_AUTH |
4363 STATUS_ASSOCIATED);
4365 schedule_work(&priv->link_down);
4366 break;
4368 case CMAS_TX_AUTH_SEQ_1:
4369 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4370 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4371 break;
4372 case CMAS_RX_AUTH_SEQ_2:
4373 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4374 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4375 break;
4376 case CMAS_AUTH_SEQ_1_PASS:
4377 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4378 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4379 break;
4380 case CMAS_AUTH_SEQ_1_FAIL:
4381 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4382 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4383 break;
4384 case CMAS_TX_AUTH_SEQ_3:
4385 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4386 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4387 break;
4388 case CMAS_RX_AUTH_SEQ_4:
4389 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4390 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4391 break;
4392 case CMAS_AUTH_SEQ_2_PASS:
4393 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4394 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4395 break;
4396 case CMAS_AUTH_SEQ_2_FAIL:
4397 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4398 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4399 break;
4400 case CMAS_TX_ASSOC:
4401 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4402 IPW_DL_ASSOC, "TX_ASSOC\n");
4403 break;
4404 case CMAS_RX_ASSOC_RESP:
4405 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4406 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4408 break;
4409 case CMAS_ASSOCIATED:
4410 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4411 IPW_DL_ASSOC, "ASSOCIATED\n");
4412 break;
4413 default:
4414 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4415 auth->state);
4416 break;
4418 break;
4421 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4422 struct notif_channel_result *x =
4423 &notif->u.channel_result;
4425 if (notif->size == sizeof(*x)) {
4426 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4427 x->channel_num);
4428 } else {
4429 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4430 "(should be %zd)\n",
4431 notif->size, sizeof(*x));
4433 break;
4436 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4437 struct notif_scan_complete *x = &notif->u.scan_complete;
4438 if (notif->size == sizeof(*x)) {
4439 IPW_DEBUG_SCAN
4440 ("Scan completed: type %d, %d channels, "
4441 "%d status\n", x->scan_type,
4442 x->num_channels, x->status);
4443 } else {
4444 IPW_ERROR("Scan completed of wrong size %d "
4445 "(should be %zd)\n",
4446 notif->size, sizeof(*x));
4449 priv->status &=
4450 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4452 wake_up_interruptible(&priv->wait_state);
4453 cancel_delayed_work(&priv->scan_check);
4455 if (priv->status & STATUS_EXIT_PENDING)
4456 break;
4458 priv->ieee->scans++;
4460 #ifdef CONFIG_IPW2200_MONITOR
4461 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4462 priv->status |= STATUS_SCAN_FORCED;
4463 queue_work(priv->workqueue,
4464 &priv->request_scan);
4465 break;
4467 priv->status &= ~STATUS_SCAN_FORCED;
4468 #endif /* CONFIG_IPW2200_MONITOR */
4470 if (!(priv->status & (STATUS_ASSOCIATED |
4471 STATUS_ASSOCIATING |
4472 STATUS_ROAMING |
4473 STATUS_DISASSOCIATING)))
4474 queue_work(priv->workqueue, &priv->associate);
4475 else if (priv->status & STATUS_ROAMING) {
4476 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4477 /* If a scan completed and we are in roam mode, then
4478 * the scan that completed was the one requested as a
4479 * result of entering roam... so, schedule the
4480 * roam work */
4481 queue_work(priv->workqueue,
4482 &priv->roam);
4483 else
4484 /* Don't schedule if we aborted the scan */
4485 priv->status &= ~STATUS_ROAMING;
4486 } else if (priv->status & STATUS_SCAN_PENDING)
4487 queue_work(priv->workqueue,
4488 &priv->request_scan);
4489 else if (priv->config & CFG_BACKGROUND_SCAN
4490 && priv->status & STATUS_ASSOCIATED)
4491 queue_delayed_work(priv->workqueue,
4492 &priv->request_scan, HZ);
4494 /* Send an empty event to user space.
4495 * We don't send the received data on the event because
4496 * it would require us to do complex transcoding, and
4497 * we want to minimise the work done in the irq handler
4498 * Use a request to extract the data.
4499 * Also, we generate this even for any scan, regardless
4500 * on how the scan was initiated. User space can just
4501 * sync on periodic scan to get fresh data...
4502 * Jean II */
4503 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) {
4504 union iwreq_data wrqu;
4506 wrqu.data.length = 0;
4507 wrqu.data.flags = 0;
4508 wireless_send_event(priv->net_dev, SIOCGIWSCAN,
4509 &wrqu, NULL);
4511 break;
4514 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4515 struct notif_frag_length *x = &notif->u.frag_len;
4517 if (notif->size == sizeof(*x))
4518 IPW_ERROR("Frag length: %d\n",
4519 le16_to_cpu(x->frag_length));
4520 else
4521 IPW_ERROR("Frag length of wrong size %d "
4522 "(should be %zd)\n",
4523 notif->size, sizeof(*x));
4524 break;
4527 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4528 struct notif_link_deterioration *x =
4529 &notif->u.link_deterioration;
4531 if (notif->size == sizeof(*x)) {
4532 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4533 "link deterioration: type %d, cnt %d\n",
4534 x->silence_notification_type,
4535 x->silence_count);
4536 memcpy(&priv->last_link_deterioration, x,
4537 sizeof(*x));
4538 } else {
4539 IPW_ERROR("Link Deterioration of wrong size %d "
4540 "(should be %zd)\n",
4541 notif->size, sizeof(*x));
4543 break;
4546 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4547 IPW_ERROR("Dino config\n");
4548 if (priv->hcmd
4549 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4550 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4552 break;
4555 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4556 struct notif_beacon_state *x = &notif->u.beacon_state;
4557 if (notif->size != sizeof(*x)) {
4558 IPW_ERROR
4559 ("Beacon state of wrong size %d (should "
4560 "be %zd)\n", notif->size, sizeof(*x));
4561 break;
4564 if (le32_to_cpu(x->state) ==
4565 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4566 ipw_handle_missed_beacon(priv,
4567 le32_to_cpu(x->
4568 number));
4570 break;
4573 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4574 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4575 if (notif->size == sizeof(*x)) {
4576 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4577 "0x%02x station %d\n",
4578 x->key_state, x->security_type,
4579 x->station_index);
4580 break;
4583 IPW_ERROR
4584 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4585 notif->size, sizeof(*x));
4586 break;
4589 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4590 struct notif_calibration *x = &notif->u.calibration;
4592 if (notif->size == sizeof(*x)) {
4593 memcpy(&priv->calib, x, sizeof(*x));
4594 IPW_DEBUG_INFO("TODO: Calibration\n");
4595 break;
4598 IPW_ERROR
4599 ("Calibration of wrong size %d (should be %zd)\n",
4600 notif->size, sizeof(*x));
4601 break;
4604 case HOST_NOTIFICATION_NOISE_STATS:{
4605 if (notif->size == sizeof(u32)) {
4606 priv->exp_avg_noise =
4607 exponential_average(priv->exp_avg_noise,
4608 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4609 DEPTH_NOISE);
4610 break;
4613 IPW_ERROR
4614 ("Noise stat is wrong size %d (should be %zd)\n",
4615 notif->size, sizeof(u32));
4616 break;
4619 default:
4620 IPW_DEBUG_NOTIF("Unknown notification: "
4621 "subtype=%d,flags=0x%2x,size=%d\n",
4622 notif->subtype, notif->flags, notif->size);
4627 * Destroys all DMA structures and initialise them again
4629 * @param priv
4630 * @return error code
4632 static int ipw_queue_reset(struct ipw_priv *priv)
4634 int rc = 0;
4635 /** @todo customize queue sizes */
4636 int nTx = 64, nTxCmd = 8;
4637 ipw_tx_queue_free(priv);
4638 /* Tx CMD queue */
4639 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4640 IPW_TX_CMD_QUEUE_READ_INDEX,
4641 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4642 IPW_TX_CMD_QUEUE_BD_BASE,
4643 IPW_TX_CMD_QUEUE_BD_SIZE);
4644 if (rc) {
4645 IPW_ERROR("Tx Cmd queue init failed\n");
4646 goto error;
4648 /* Tx queue(s) */
4649 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4650 IPW_TX_QUEUE_0_READ_INDEX,
4651 IPW_TX_QUEUE_0_WRITE_INDEX,
4652 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4653 if (rc) {
4654 IPW_ERROR("Tx 0 queue init failed\n");
4655 goto error;
4657 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4658 IPW_TX_QUEUE_1_READ_INDEX,
4659 IPW_TX_QUEUE_1_WRITE_INDEX,
4660 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4661 if (rc) {
4662 IPW_ERROR("Tx 1 queue init failed\n");
4663 goto error;
4665 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4666 IPW_TX_QUEUE_2_READ_INDEX,
4667 IPW_TX_QUEUE_2_WRITE_INDEX,
4668 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4669 if (rc) {
4670 IPW_ERROR("Tx 2 queue init failed\n");
4671 goto error;
4673 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4674 IPW_TX_QUEUE_3_READ_INDEX,
4675 IPW_TX_QUEUE_3_WRITE_INDEX,
4676 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4677 if (rc) {
4678 IPW_ERROR("Tx 3 queue init failed\n");
4679 goto error;
4681 /* statistics */
4682 priv->rx_bufs_min = 0;
4683 priv->rx_pend_max = 0;
4684 return rc;
4686 error:
4687 ipw_tx_queue_free(priv);
4688 return rc;
4692 * Reclaim Tx queue entries no more used by NIC.
4694 * When FW adwances 'R' index, all entries between old and
4695 * new 'R' index need to be reclaimed. As result, some free space
4696 * forms. If there is enough free space (> low mark), wake Tx queue.
4698 * @note Need to protect against garbage in 'R' index
4699 * @param priv
4700 * @param txq
4701 * @param qindex
4702 * @return Number of used entries remains in the queue
4704 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4705 struct clx2_tx_queue *txq, int qindex)
4707 u32 hw_tail;
4708 int used;
4709 struct clx2_queue *q = &txq->q;
4711 hw_tail = ipw_read32(priv, q->reg_r);
4712 if (hw_tail >= q->n_bd) {
4713 IPW_ERROR
4714 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4715 hw_tail, q->n_bd);
4716 goto done;
4718 for (; q->last_used != hw_tail;
4719 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4720 ipw_queue_tx_free_tfd(priv, txq);
4721 priv->tx_packets++;
4723 done:
4724 if ((ipw_queue_space(q) > q->low_mark) &&
4725 (qindex >= 0) &&
4726 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4727 netif_wake_queue(priv->net_dev);
4728 used = q->first_empty - q->last_used;
4729 if (used < 0)
4730 used += q->n_bd;
4732 return used;
4735 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4736 int len, int sync)
4738 struct clx2_tx_queue *txq = &priv->txq_cmd;
4739 struct clx2_queue *q = &txq->q;
4740 struct tfd_frame *tfd;
4742 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4743 IPW_ERROR("No space for Tx\n");
4744 return -EBUSY;
4747 tfd = &txq->bd[q->first_empty];
4748 txq->txb[q->first_empty] = NULL;
4750 memset(tfd, 0, sizeof(*tfd));
4751 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4752 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4753 priv->hcmd_seq++;
4754 tfd->u.cmd.index = hcmd;
4755 tfd->u.cmd.length = len;
4756 memcpy(tfd->u.cmd.payload, buf, len);
4757 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4758 ipw_write32(priv, q->reg_w, q->first_empty);
4759 _ipw_read32(priv, 0x90);
4761 return 0;
4765 * Rx theory of operation
4767 * The host allocates 32 DMA target addresses and passes the host address
4768 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4769 * 0 to 31
4771 * Rx Queue Indexes
4772 * The host/firmware share two index registers for managing the Rx buffers.
4774 * The READ index maps to the first position that the firmware may be writing
4775 * to -- the driver can read up to (but not including) this position and get
4776 * good data.
4777 * The READ index is managed by the firmware once the card is enabled.
4779 * The WRITE index maps to the last position the driver has read from -- the
4780 * position preceding WRITE is the last slot the firmware can place a packet.
4782 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4783 * WRITE = READ.
4785 * During initialization the host sets up the READ queue position to the first
4786 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4788 * When the firmware places a packet in a buffer it will advance the READ index
4789 * and fire the RX interrupt. The driver can then query the READ index and
4790 * process as many packets as possible, moving the WRITE index forward as it
4791 * resets the Rx queue buffers with new memory.
4793 * The management in the driver is as follows:
4794 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
4795 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4796 * to replensish the ipw->rxq->rx_free.
4797 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4798 * ipw->rxq is replenished and the READ INDEX is updated (updating the
4799 * 'processed' and 'read' driver indexes as well)
4800 * + A received packet is processed and handed to the kernel network stack,
4801 * detached from the ipw->rxq. The driver 'processed' index is updated.
4802 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4803 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4804 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
4805 * were enough free buffers and RX_STALLED is set it is cleared.
4808 * Driver sequence:
4810 * ipw_rx_queue_alloc() Allocates rx_free
4811 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
4812 * ipw_rx_queue_restock
4813 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
4814 * queue, updates firmware pointers, and updates
4815 * the WRITE index. If insufficient rx_free buffers
4816 * are available, schedules ipw_rx_queue_replenish
4818 * -- enable interrupts --
4819 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
4820 * READ INDEX, detaching the SKB from the pool.
4821 * Moves the packet buffer from queue to rx_used.
4822 * Calls ipw_rx_queue_restock to refill any empty
4823 * slots.
4824 * ...
4829 * If there are slots in the RX queue that need to be restocked,
4830 * and we have free pre-allocated buffers, fill the ranks as much
4831 * as we can pulling from rx_free.
4833 * This moves the 'write' index forward to catch up with 'processed', and
4834 * also updates the memory address in the firmware to reference the new
4835 * target buffer.
4837 static void ipw_rx_queue_restock(struct ipw_priv *priv)
4839 struct ipw_rx_queue *rxq = priv->rxq;
4840 struct list_head *element;
4841 struct ipw_rx_mem_buffer *rxb;
4842 unsigned long flags;
4843 int write;
4845 spin_lock_irqsave(&rxq->lock, flags);
4846 write = rxq->write;
4847 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
4848 element = rxq->rx_free.next;
4849 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4850 list_del(element);
4852 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
4853 rxb->dma_addr);
4854 rxq->queue[rxq->write] = rxb;
4855 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
4856 rxq->free_count--;
4858 spin_unlock_irqrestore(&rxq->lock, flags);
4860 /* If the pre-allocated buffer pool is dropping low, schedule to
4861 * refill it */
4862 if (rxq->free_count <= RX_LOW_WATERMARK)
4863 queue_work(priv->workqueue, &priv->rx_replenish);
4865 /* If we've added more space for the firmware to place data, tell it */
4866 if (write != rxq->write)
4867 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
4871 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
4872 * Also restock the Rx queue via ipw_rx_queue_restock.
4874 * This is called as a scheduled work item (except for during intialization)
4876 static void ipw_rx_queue_replenish(void *data)
4878 struct ipw_priv *priv = data;
4879 struct ipw_rx_queue *rxq = priv->rxq;
4880 struct list_head *element;
4881 struct ipw_rx_mem_buffer *rxb;
4882 unsigned long flags;
4884 spin_lock_irqsave(&rxq->lock, flags);
4885 while (!list_empty(&rxq->rx_used)) {
4886 element = rxq->rx_used.next;
4887 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4888 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
4889 if (!rxb->skb) {
4890 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
4891 priv->net_dev->name);
4892 /* We don't reschedule replenish work here -- we will
4893 * call the restock method and if it still needs
4894 * more buffers it will schedule replenish */
4895 break;
4897 list_del(element);
4899 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
4900 rxb->dma_addr =
4901 pci_map_single(priv->pci_dev, rxb->skb->data,
4902 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4904 list_add_tail(&rxb->list, &rxq->rx_free);
4905 rxq->free_count++;
4907 spin_unlock_irqrestore(&rxq->lock, flags);
4909 ipw_rx_queue_restock(priv);
4912 static void ipw_bg_rx_queue_replenish(void *data)
4914 struct ipw_priv *priv = data;
4915 mutex_lock(&priv->mutex);
4916 ipw_rx_queue_replenish(data);
4917 mutex_unlock(&priv->mutex);
4920 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4921 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
4922 * This free routine walks the list of POOL entries and if SKB is set to
4923 * non NULL it is unmapped and freed
4925 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
4927 int i;
4929 if (!rxq)
4930 return;
4932 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4933 if (rxq->pool[i].skb != NULL) {
4934 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
4935 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4936 dev_kfree_skb(rxq->pool[i].skb);
4940 kfree(rxq);
4943 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4945 struct ipw_rx_queue *rxq;
4946 int i;
4948 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
4949 if (unlikely(!rxq)) {
4950 IPW_ERROR("memory allocation failed\n");
4951 return NULL;
4953 spin_lock_init(&rxq->lock);
4954 INIT_LIST_HEAD(&rxq->rx_free);
4955 INIT_LIST_HEAD(&rxq->rx_used);
4957 /* Fill the rx_used queue with _all_ of the Rx buffers */
4958 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4959 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4961 /* Set us so that we have processed and used all buffers, but have
4962 * not restocked the Rx queue with fresh buffers */
4963 rxq->read = rxq->write = 0;
4964 rxq->processed = RX_QUEUE_SIZE - 1;
4965 rxq->free_count = 0;
4967 return rxq;
4970 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
4972 rate &= ~IEEE80211_BASIC_RATE_MASK;
4973 if (ieee_mode == IEEE_A) {
4974 switch (rate) {
4975 case IEEE80211_OFDM_RATE_6MB:
4976 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
4977 1 : 0;
4978 case IEEE80211_OFDM_RATE_9MB:
4979 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
4980 1 : 0;
4981 case IEEE80211_OFDM_RATE_12MB:
4982 return priv->
4983 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4984 case IEEE80211_OFDM_RATE_18MB:
4985 return priv->
4986 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4987 case IEEE80211_OFDM_RATE_24MB:
4988 return priv->
4989 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4990 case IEEE80211_OFDM_RATE_36MB:
4991 return priv->
4992 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4993 case IEEE80211_OFDM_RATE_48MB:
4994 return priv->
4995 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4996 case IEEE80211_OFDM_RATE_54MB:
4997 return priv->
4998 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4999 default:
5000 return 0;
5004 /* B and G mixed */
5005 switch (rate) {
5006 case IEEE80211_CCK_RATE_1MB:
5007 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5008 case IEEE80211_CCK_RATE_2MB:
5009 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5010 case IEEE80211_CCK_RATE_5MB:
5011 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5012 case IEEE80211_CCK_RATE_11MB:
5013 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5016 /* If we are limited to B modulations, bail at this point */
5017 if (ieee_mode == IEEE_B)
5018 return 0;
5020 /* G */
5021 switch (rate) {
5022 case IEEE80211_OFDM_RATE_6MB:
5023 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5024 case IEEE80211_OFDM_RATE_9MB:
5025 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5026 case IEEE80211_OFDM_RATE_12MB:
5027 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5028 case IEEE80211_OFDM_RATE_18MB:
5029 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5030 case IEEE80211_OFDM_RATE_24MB:
5031 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5032 case IEEE80211_OFDM_RATE_36MB:
5033 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5034 case IEEE80211_OFDM_RATE_48MB:
5035 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5036 case IEEE80211_OFDM_RATE_54MB:
5037 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5040 return 0;
5043 static int ipw_compatible_rates(struct ipw_priv *priv,
5044 const struct ieee80211_network *network,
5045 struct ipw_supported_rates *rates)
5047 int num_rates, i;
5049 memset(rates, 0, sizeof(*rates));
5050 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5051 rates->num_rates = 0;
5052 for (i = 0; i < num_rates; i++) {
5053 if (!ipw_is_rate_in_mask(priv, network->mode,
5054 network->rates[i])) {
5056 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5057 IPW_DEBUG_SCAN("Adding masked mandatory "
5058 "rate %02X\n",
5059 network->rates[i]);
5060 rates->supported_rates[rates->num_rates++] =
5061 network->rates[i];
5062 continue;
5065 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5066 network->rates[i], priv->rates_mask);
5067 continue;
5070 rates->supported_rates[rates->num_rates++] = network->rates[i];
5073 num_rates = min(network->rates_ex_len,
5074 (u8) (IPW_MAX_RATES - num_rates));
5075 for (i = 0; i < num_rates; i++) {
5076 if (!ipw_is_rate_in_mask(priv, network->mode,
5077 network->rates_ex[i])) {
5078 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5079 IPW_DEBUG_SCAN("Adding masked mandatory "
5080 "rate %02X\n",
5081 network->rates_ex[i]);
5082 rates->supported_rates[rates->num_rates++] =
5083 network->rates[i];
5084 continue;
5087 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5088 network->rates_ex[i], priv->rates_mask);
5089 continue;
5092 rates->supported_rates[rates->num_rates++] =
5093 network->rates_ex[i];
5096 return 1;
5099 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5100 const struct ipw_supported_rates *src)
5102 u8 i;
5103 for (i = 0; i < src->num_rates; i++)
5104 dest->supported_rates[i] = src->supported_rates[i];
5105 dest->num_rates = src->num_rates;
5108 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5109 * mask should ever be used -- right now all callers to add the scan rates are
5110 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5111 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5112 u8 modulation, u32 rate_mask)
5114 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5115 IEEE80211_BASIC_RATE_MASK : 0;
5117 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5118 rates->supported_rates[rates->num_rates++] =
5119 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5121 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5122 rates->supported_rates[rates->num_rates++] =
5123 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5125 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5126 rates->supported_rates[rates->num_rates++] = basic_mask |
5127 IEEE80211_CCK_RATE_5MB;
5129 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5130 rates->supported_rates[rates->num_rates++] = basic_mask |
5131 IEEE80211_CCK_RATE_11MB;
5134 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5135 u8 modulation, u32 rate_mask)
5137 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5138 IEEE80211_BASIC_RATE_MASK : 0;
5140 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5141 rates->supported_rates[rates->num_rates++] = basic_mask |
5142 IEEE80211_OFDM_RATE_6MB;
5144 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5145 rates->supported_rates[rates->num_rates++] =
5146 IEEE80211_OFDM_RATE_9MB;
5148 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5149 rates->supported_rates[rates->num_rates++] = basic_mask |
5150 IEEE80211_OFDM_RATE_12MB;
5152 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5153 rates->supported_rates[rates->num_rates++] =
5154 IEEE80211_OFDM_RATE_18MB;
5156 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5157 rates->supported_rates[rates->num_rates++] = basic_mask |
5158 IEEE80211_OFDM_RATE_24MB;
5160 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5161 rates->supported_rates[rates->num_rates++] =
5162 IEEE80211_OFDM_RATE_36MB;
5164 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5165 rates->supported_rates[rates->num_rates++] =
5166 IEEE80211_OFDM_RATE_48MB;
5168 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5169 rates->supported_rates[rates->num_rates++] =
5170 IEEE80211_OFDM_RATE_54MB;
5173 struct ipw_network_match {
5174 struct ieee80211_network *network;
5175 struct ipw_supported_rates rates;
5178 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5179 struct ipw_network_match *match,
5180 struct ieee80211_network *network,
5181 int roaming)
5183 struct ipw_supported_rates rates;
5185 /* Verify that this network's capability is compatible with the
5186 * current mode (AdHoc or Infrastructure) */
5187 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5188 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5189 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5190 "capability mismatch.\n",
5191 escape_essid(network->ssid, network->ssid_len),
5192 MAC_ARG(network->bssid));
5193 return 0;
5196 /* If we do not have an ESSID for this AP, we can not associate with
5197 * it */
5198 if (network->flags & NETWORK_EMPTY_ESSID) {
5199 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5200 "because of hidden ESSID.\n",
5201 escape_essid(network->ssid, network->ssid_len),
5202 MAC_ARG(network->bssid));
5203 return 0;
5206 if (unlikely(roaming)) {
5207 /* If we are roaming, then ensure check if this is a valid
5208 * network to try and roam to */
5209 if ((network->ssid_len != match->network->ssid_len) ||
5210 memcmp(network->ssid, match->network->ssid,
5211 network->ssid_len)) {
5212 IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5213 "because of non-network ESSID.\n",
5214 escape_essid(network->ssid,
5215 network->ssid_len),
5216 MAC_ARG(network->bssid));
5217 return 0;
5219 } else {
5220 /* If an ESSID has been configured then compare the broadcast
5221 * ESSID to ours */
5222 if ((priv->config & CFG_STATIC_ESSID) &&
5223 ((network->ssid_len != priv->essid_len) ||
5224 memcmp(network->ssid, priv->essid,
5225 min(network->ssid_len, priv->essid_len)))) {
5226 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5228 strncpy(escaped,
5229 escape_essid(network->ssid, network->ssid_len),
5230 sizeof(escaped));
5231 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5232 "because of ESSID mismatch: '%s'.\n",
5233 escaped, MAC_ARG(network->bssid),
5234 escape_essid(priv->essid,
5235 priv->essid_len));
5236 return 0;
5240 /* If the old network rate is better than this one, don't bother
5241 * testing everything else. */
5243 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5244 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5245 "current network.\n",
5246 escape_essid(match->network->ssid,
5247 match->network->ssid_len));
5248 return 0;
5249 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5250 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5251 "current network.\n",
5252 escape_essid(match->network->ssid,
5253 match->network->ssid_len));
5254 return 0;
5257 /* Now go through and see if the requested network is valid... */
5258 if (priv->ieee->scan_age != 0 &&
5259 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5260 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5261 "because of age: %ums.\n",
5262 escape_essid(network->ssid, network->ssid_len),
5263 MAC_ARG(network->bssid),
5264 jiffies_to_msecs(jiffies -
5265 network->last_scanned));
5266 return 0;
5269 if ((priv->config & CFG_STATIC_CHANNEL) &&
5270 (network->channel != priv->channel)) {
5271 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5272 "because of channel mismatch: %d != %d.\n",
5273 escape_essid(network->ssid, network->ssid_len),
5274 MAC_ARG(network->bssid),
5275 network->channel, priv->channel);
5276 return 0;
5279 /* Verify privacy compatability */
5280 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5281 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5282 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5283 "because of privacy mismatch: %s != %s.\n",
5284 escape_essid(network->ssid, network->ssid_len),
5285 MAC_ARG(network->bssid),
5286 priv->
5287 capability & CAP_PRIVACY_ON ? "on" : "off",
5288 network->
5289 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5290 "off");
5291 return 0;
5294 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5295 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5296 "because of the same BSSID match: " MAC_FMT
5297 ".\n", escape_essid(network->ssid,
5298 network->ssid_len),
5299 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5300 return 0;
5303 /* Filter out any incompatible freq / mode combinations */
5304 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5305 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5306 "because of invalid frequency/mode "
5307 "combination.\n",
5308 escape_essid(network->ssid, network->ssid_len),
5309 MAC_ARG(network->bssid));
5310 return 0;
5313 /* Ensure that the rates supported by the driver are compatible with
5314 * this AP, including verification of basic rates (mandatory) */
5315 if (!ipw_compatible_rates(priv, network, &rates)) {
5316 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5317 "because configured rate mask excludes "
5318 "AP mandatory rate.\n",
5319 escape_essid(network->ssid, network->ssid_len),
5320 MAC_ARG(network->bssid));
5321 return 0;
5324 if (rates.num_rates == 0) {
5325 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5326 "because of no compatible rates.\n",
5327 escape_essid(network->ssid, network->ssid_len),
5328 MAC_ARG(network->bssid));
5329 return 0;
5332 /* TODO: Perform any further minimal comparititive tests. We do not
5333 * want to put too much policy logic here; intelligent scan selection
5334 * should occur within a generic IEEE 802.11 user space tool. */
5336 /* Set up 'new' AP to this network */
5337 ipw_copy_rates(&match->rates, &rates);
5338 match->network = network;
5339 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5340 escape_essid(network->ssid, network->ssid_len),
5341 MAC_ARG(network->bssid));
5343 return 1;
5346 static void ipw_merge_adhoc_network(void *data)
5348 struct ipw_priv *priv = data;
5349 struct ieee80211_network *network = NULL;
5350 struct ipw_network_match match = {
5351 .network = priv->assoc_network
5354 if ((priv->status & STATUS_ASSOCIATED) &&
5355 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5356 /* First pass through ROAM process -- look for a better
5357 * network */
5358 unsigned long flags;
5360 spin_lock_irqsave(&priv->ieee->lock, flags);
5361 list_for_each_entry(network, &priv->ieee->network_list, list) {
5362 if (network != priv->assoc_network)
5363 ipw_find_adhoc_network(priv, &match, network,
5366 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5368 if (match.network == priv->assoc_network) {
5369 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5370 "merge to.\n");
5371 return;
5374 mutex_lock(&priv->mutex);
5375 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5376 IPW_DEBUG_MERGE("remove network %s\n",
5377 escape_essid(priv->essid,
5378 priv->essid_len));
5379 ipw_remove_current_network(priv);
5382 ipw_disassociate(priv);
5383 priv->assoc_network = match.network;
5384 mutex_unlock(&priv->mutex);
5385 return;
5389 static int ipw_best_network(struct ipw_priv *priv,
5390 struct ipw_network_match *match,
5391 struct ieee80211_network *network, int roaming)
5393 struct ipw_supported_rates rates;
5395 /* Verify that this network's capability is compatible with the
5396 * current mode (AdHoc or Infrastructure) */
5397 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5398 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5399 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5400 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5401 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5402 "capability mismatch.\n",
5403 escape_essid(network->ssid, network->ssid_len),
5404 MAC_ARG(network->bssid));
5405 return 0;
5408 /* If we do not have an ESSID for this AP, we can not associate with
5409 * it */
5410 if (network->flags & NETWORK_EMPTY_ESSID) {
5411 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5412 "because of hidden ESSID.\n",
5413 escape_essid(network->ssid, network->ssid_len),
5414 MAC_ARG(network->bssid));
5415 return 0;
5418 if (unlikely(roaming)) {
5419 /* If we are roaming, then ensure check if this is a valid
5420 * network to try and roam to */
5421 if ((network->ssid_len != match->network->ssid_len) ||
5422 memcmp(network->ssid, match->network->ssid,
5423 network->ssid_len)) {
5424 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5425 "because of non-network ESSID.\n",
5426 escape_essid(network->ssid,
5427 network->ssid_len),
5428 MAC_ARG(network->bssid));
5429 return 0;
5431 } else {
5432 /* If an ESSID has been configured then compare the broadcast
5433 * ESSID to ours */
5434 if ((priv->config & CFG_STATIC_ESSID) &&
5435 ((network->ssid_len != priv->essid_len) ||
5436 memcmp(network->ssid, priv->essid,
5437 min(network->ssid_len, priv->essid_len)))) {
5438 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5439 strncpy(escaped,
5440 escape_essid(network->ssid, network->ssid_len),
5441 sizeof(escaped));
5442 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5443 "because of ESSID mismatch: '%s'.\n",
5444 escaped, MAC_ARG(network->bssid),
5445 escape_essid(priv->essid,
5446 priv->essid_len));
5447 return 0;
5451 /* If the old network rate is better than this one, don't bother
5452 * testing everything else. */
5453 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5454 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5455 strncpy(escaped,
5456 escape_essid(network->ssid, network->ssid_len),
5457 sizeof(escaped));
5458 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5459 "'%s (" MAC_FMT ")' has a stronger signal.\n",
5460 escaped, MAC_ARG(network->bssid),
5461 escape_essid(match->network->ssid,
5462 match->network->ssid_len),
5463 MAC_ARG(match->network->bssid));
5464 return 0;
5467 /* If this network has already had an association attempt within the
5468 * last 3 seconds, do not try and associate again... */
5469 if (network->last_associate &&
5470 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5471 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5472 "because of storming (%ums since last "
5473 "assoc attempt).\n",
5474 escape_essid(network->ssid, network->ssid_len),
5475 MAC_ARG(network->bssid),
5476 jiffies_to_msecs(jiffies -
5477 network->last_associate));
5478 return 0;
5481 /* Now go through and see if the requested network is valid... */
5482 if (priv->ieee->scan_age != 0 &&
5483 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5484 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5485 "because of age: %ums.\n",
5486 escape_essid(network->ssid, network->ssid_len),
5487 MAC_ARG(network->bssid),
5488 jiffies_to_msecs(jiffies -
5489 network->last_scanned));
5490 return 0;
5493 if ((priv->config & CFG_STATIC_CHANNEL) &&
5494 (network->channel != priv->channel)) {
5495 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5496 "because of channel mismatch: %d != %d.\n",
5497 escape_essid(network->ssid, network->ssid_len),
5498 MAC_ARG(network->bssid),
5499 network->channel, priv->channel);
5500 return 0;
5503 /* Verify privacy compatability */
5504 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5505 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5506 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5507 "because of privacy mismatch: %s != %s.\n",
5508 escape_essid(network->ssid, network->ssid_len),
5509 MAC_ARG(network->bssid),
5510 priv->capability & CAP_PRIVACY_ON ? "on" :
5511 "off",
5512 network->capability &
5513 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5514 return 0;
5517 if ((priv->config & CFG_STATIC_BSSID) &&
5518 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5519 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5520 "because of BSSID mismatch: " MAC_FMT ".\n",
5521 escape_essid(network->ssid, network->ssid_len),
5522 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5523 return 0;
5526 /* Filter out any incompatible freq / mode combinations */
5527 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5528 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5529 "because of invalid frequency/mode "
5530 "combination.\n",
5531 escape_essid(network->ssid, network->ssid_len),
5532 MAC_ARG(network->bssid));
5533 return 0;
5536 /* Filter out invalid channel in current GEO */
5537 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5538 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5539 "because of invalid channel in current GEO\n",
5540 escape_essid(network->ssid, network->ssid_len),
5541 MAC_ARG(network->bssid));
5542 return 0;
5545 /* Ensure that the rates supported by the driver are compatible with
5546 * this AP, including verification of basic rates (mandatory) */
5547 if (!ipw_compatible_rates(priv, network, &rates)) {
5548 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5549 "because configured rate mask excludes "
5550 "AP mandatory rate.\n",
5551 escape_essid(network->ssid, network->ssid_len),
5552 MAC_ARG(network->bssid));
5553 return 0;
5556 if (rates.num_rates == 0) {
5557 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5558 "because of no compatible rates.\n",
5559 escape_essid(network->ssid, network->ssid_len),
5560 MAC_ARG(network->bssid));
5561 return 0;
5564 /* TODO: Perform any further minimal comparititive tests. We do not
5565 * want to put too much policy logic here; intelligent scan selection
5566 * should occur within a generic IEEE 802.11 user space tool. */
5568 /* Set up 'new' AP to this network */
5569 ipw_copy_rates(&match->rates, &rates);
5570 match->network = network;
5572 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5573 escape_essid(network->ssid, network->ssid_len),
5574 MAC_ARG(network->bssid));
5576 return 1;
5579 static void ipw_adhoc_create(struct ipw_priv *priv,
5580 struct ieee80211_network *network)
5582 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5583 int i;
5586 * For the purposes of scanning, we can set our wireless mode
5587 * to trigger scans across combinations of bands, but when it
5588 * comes to creating a new ad-hoc network, we have tell the FW
5589 * exactly which band to use.
5591 * We also have the possibility of an invalid channel for the
5592 * chossen band. Attempting to create a new ad-hoc network
5593 * with an invalid channel for wireless mode will trigger a
5594 * FW fatal error.
5597 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5598 case IEEE80211_52GHZ_BAND:
5599 network->mode = IEEE_A;
5600 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5601 BUG_ON(i == -1);
5602 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5603 IPW_WARNING("Overriding invalid channel\n");
5604 priv->channel = geo->a[0].channel;
5606 break;
5608 case IEEE80211_24GHZ_BAND:
5609 if (priv->ieee->mode & IEEE_G)
5610 network->mode = IEEE_G;
5611 else
5612 network->mode = IEEE_B;
5613 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5614 BUG_ON(i == -1);
5615 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5616 IPW_WARNING("Overriding invalid channel\n");
5617 priv->channel = geo->bg[0].channel;
5619 break;
5621 default:
5622 IPW_WARNING("Overriding invalid channel\n");
5623 if (priv->ieee->mode & IEEE_A) {
5624 network->mode = IEEE_A;
5625 priv->channel = geo->a[0].channel;
5626 } else if (priv->ieee->mode & IEEE_G) {
5627 network->mode = IEEE_G;
5628 priv->channel = geo->bg[0].channel;
5629 } else {
5630 network->mode = IEEE_B;
5631 priv->channel = geo->bg[0].channel;
5633 break;
5636 network->channel = priv->channel;
5637 priv->config |= CFG_ADHOC_PERSIST;
5638 ipw_create_bssid(priv, network->bssid);
5639 network->ssid_len = priv->essid_len;
5640 memcpy(network->ssid, priv->essid, priv->essid_len);
5641 memset(&network->stats, 0, sizeof(network->stats));
5642 network->capability = WLAN_CAPABILITY_IBSS;
5643 if (!(priv->config & CFG_PREAMBLE_LONG))
5644 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5645 if (priv->capability & CAP_PRIVACY_ON)
5646 network->capability |= WLAN_CAPABILITY_PRIVACY;
5647 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5648 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5649 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5650 memcpy(network->rates_ex,
5651 &priv->rates.supported_rates[network->rates_len],
5652 network->rates_ex_len);
5653 network->last_scanned = 0;
5654 network->flags = 0;
5655 network->last_associate = 0;
5656 network->time_stamp[0] = 0;
5657 network->time_stamp[1] = 0;
5658 network->beacon_interval = 100; /* Default */
5659 network->listen_interval = 10; /* Default */
5660 network->atim_window = 0; /* Default */
5661 network->wpa_ie_len = 0;
5662 network->rsn_ie_len = 0;
5665 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5667 struct ipw_tgi_tx_key key;
5669 if (!(priv->ieee->sec.flags & (1 << index)))
5670 return;
5672 key.key_id = index;
5673 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5674 key.security_type = type;
5675 key.station_index = 0; /* always 0 for BSS */
5676 key.flags = 0;
5677 /* 0 for new key; previous value of counter (after fatal error) */
5678 key.tx_counter[0] = 0;
5679 key.tx_counter[1] = 0;
5681 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5684 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5686 struct ipw_wep_key key;
5687 int i;
5689 key.cmd_id = DINO_CMD_WEP_KEY;
5690 key.seq_num = 0;
5692 /* Note: AES keys cannot be set for multiple times.
5693 * Only set it at the first time. */
5694 for (i = 0; i < 4; i++) {
5695 key.key_index = i | type;
5696 if (!(priv->ieee->sec.flags & (1 << i))) {
5697 key.key_size = 0;
5698 continue;
5701 key.key_size = priv->ieee->sec.key_sizes[i];
5702 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5704 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5708 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5710 if (priv->ieee->host_encrypt)
5711 return;
5713 switch (level) {
5714 case SEC_LEVEL_3:
5715 priv->sys_config.disable_unicast_decryption = 0;
5716 priv->ieee->host_decrypt = 0;
5717 break;
5718 case SEC_LEVEL_2:
5719 priv->sys_config.disable_unicast_decryption = 1;
5720 priv->ieee->host_decrypt = 1;
5721 break;
5722 case SEC_LEVEL_1:
5723 priv->sys_config.disable_unicast_decryption = 0;
5724 priv->ieee->host_decrypt = 0;
5725 break;
5726 case SEC_LEVEL_0:
5727 priv->sys_config.disable_unicast_decryption = 1;
5728 break;
5729 default:
5730 break;
5734 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5736 if (priv->ieee->host_encrypt)
5737 return;
5739 switch (level) {
5740 case SEC_LEVEL_3:
5741 priv->sys_config.disable_multicast_decryption = 0;
5742 break;
5743 case SEC_LEVEL_2:
5744 priv->sys_config.disable_multicast_decryption = 1;
5745 break;
5746 case SEC_LEVEL_1:
5747 priv->sys_config.disable_multicast_decryption = 0;
5748 break;
5749 case SEC_LEVEL_0:
5750 priv->sys_config.disable_multicast_decryption = 1;
5751 break;
5752 default:
5753 break;
5757 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5759 switch (priv->ieee->sec.level) {
5760 case SEC_LEVEL_3:
5761 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5762 ipw_send_tgi_tx_key(priv,
5763 DCT_FLAG_EXT_SECURITY_CCM,
5764 priv->ieee->sec.active_key);
5766 if (!priv->ieee->host_mc_decrypt)
5767 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5768 break;
5769 case SEC_LEVEL_2:
5770 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5771 ipw_send_tgi_tx_key(priv,
5772 DCT_FLAG_EXT_SECURITY_TKIP,
5773 priv->ieee->sec.active_key);
5774 break;
5775 case SEC_LEVEL_1:
5776 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5777 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5778 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5779 break;
5780 case SEC_LEVEL_0:
5781 default:
5782 break;
5786 static void ipw_adhoc_check(void *data)
5788 struct ipw_priv *priv = data;
5790 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
5791 !(priv->config & CFG_ADHOC_PERSIST)) {
5792 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
5793 IPW_DL_STATE | IPW_DL_ASSOC,
5794 "Missed beacon: %d - disassociate\n",
5795 priv->missed_adhoc_beacons);
5796 ipw_remove_current_network(priv);
5797 ipw_disassociate(priv);
5798 return;
5801 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
5802 priv->assoc_request.beacon_interval);
5805 static void ipw_bg_adhoc_check(void *data)
5807 struct ipw_priv *priv = data;
5808 mutex_lock(&priv->mutex);
5809 ipw_adhoc_check(data);
5810 mutex_unlock(&priv->mutex);
5813 #ifdef CONFIG_IPW2200_DEBUG
5814 static void ipw_debug_config(struct ipw_priv *priv)
5816 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
5817 "[CFG 0x%08X]\n", priv->config);
5818 if (priv->config & CFG_STATIC_CHANNEL)
5819 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
5820 else
5821 IPW_DEBUG_INFO("Channel unlocked.\n");
5822 if (priv->config & CFG_STATIC_ESSID)
5823 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
5824 escape_essid(priv->essid, priv->essid_len));
5825 else
5826 IPW_DEBUG_INFO("ESSID unlocked.\n");
5827 if (priv->config & CFG_STATIC_BSSID)
5828 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
5829 MAC_ARG(priv->bssid));
5830 else
5831 IPW_DEBUG_INFO("BSSID unlocked.\n");
5832 if (priv->capability & CAP_PRIVACY_ON)
5833 IPW_DEBUG_INFO("PRIVACY on\n");
5834 else
5835 IPW_DEBUG_INFO("PRIVACY off\n");
5836 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
5838 #else
5839 #define ipw_debug_config(x) do {} while (0)
5840 #endif
5842 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
5844 /* TODO: Verify that this works... */
5845 struct ipw_fixed_rate fr = {
5846 .tx_rates = priv->rates_mask
5848 u32 reg;
5849 u16 mask = 0;
5851 /* Identify 'current FW band' and match it with the fixed
5852 * Tx rates */
5854 switch (priv->ieee->freq_band) {
5855 case IEEE80211_52GHZ_BAND: /* A only */
5856 /* IEEE_A */
5857 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
5858 /* Invalid fixed rate mask */
5859 IPW_DEBUG_WX
5860 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5861 fr.tx_rates = 0;
5862 break;
5865 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
5866 break;
5868 default: /* 2.4Ghz or Mixed */
5869 /* IEEE_B */
5870 if (mode == IEEE_B) {
5871 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
5872 /* Invalid fixed rate mask */
5873 IPW_DEBUG_WX
5874 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5875 fr.tx_rates = 0;
5877 break;
5880 /* IEEE_G */
5881 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
5882 IEEE80211_OFDM_RATES_MASK)) {
5883 /* Invalid fixed rate mask */
5884 IPW_DEBUG_WX
5885 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5886 fr.tx_rates = 0;
5887 break;
5890 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
5891 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
5892 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
5895 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
5896 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
5897 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
5900 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
5901 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
5902 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
5905 fr.tx_rates |= mask;
5906 break;
5909 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
5910 ipw_write_reg32(priv, reg, *(u32 *) & fr);
5913 static void ipw_abort_scan(struct ipw_priv *priv)
5915 int err;
5917 if (priv->status & STATUS_SCAN_ABORTING) {
5918 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
5919 return;
5921 priv->status |= STATUS_SCAN_ABORTING;
5923 err = ipw_send_scan_abort(priv);
5924 if (err)
5925 IPW_DEBUG_HC("Request to abort scan failed.\n");
5928 static void ipw_add_scan_channels(struct ipw_priv *priv,
5929 struct ipw_scan_request_ext *scan,
5930 int scan_type)
5932 int channel_index = 0;
5933 const struct ieee80211_geo *geo;
5934 int i;
5936 geo = ieee80211_get_geo(priv->ieee);
5938 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
5939 int start = channel_index;
5940 for (i = 0; i < geo->a_channels; i++) {
5941 if ((priv->status & STATUS_ASSOCIATED) &&
5942 geo->a[i].channel == priv->channel)
5943 continue;
5944 channel_index++;
5945 scan->channels_list[channel_index] = geo->a[i].channel;
5946 ipw_set_scan_type(scan, channel_index,
5947 geo->a[i].
5948 flags & IEEE80211_CH_PASSIVE_ONLY ?
5949 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
5950 scan_type);
5953 if (start != channel_index) {
5954 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
5955 (channel_index - start);
5956 channel_index++;
5960 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
5961 int start = channel_index;
5962 if (priv->config & CFG_SPEED_SCAN) {
5963 int index;
5964 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
5965 /* nop out the list */
5966 [0] = 0
5969 u8 channel;
5970 while (channel_index < IPW_SCAN_CHANNELS) {
5971 channel =
5972 priv->speed_scan[priv->speed_scan_pos];
5973 if (channel == 0) {
5974 priv->speed_scan_pos = 0;
5975 channel = priv->speed_scan[0];
5977 if ((priv->status & STATUS_ASSOCIATED) &&
5978 channel == priv->channel) {
5979 priv->speed_scan_pos++;
5980 continue;
5983 /* If this channel has already been
5984 * added in scan, break from loop
5985 * and this will be the first channel
5986 * in the next scan.
5988 if (channels[channel - 1] != 0)
5989 break;
5991 channels[channel - 1] = 1;
5992 priv->speed_scan_pos++;
5993 channel_index++;
5994 scan->channels_list[channel_index] = channel;
5995 index =
5996 ieee80211_channel_to_index(priv->ieee, channel);
5997 ipw_set_scan_type(scan, channel_index,
5998 geo->bg[index].
5999 flags &
6000 IEEE80211_CH_PASSIVE_ONLY ?
6001 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6002 : scan_type);
6004 } else {
6005 for (i = 0; i < geo->bg_channels; i++) {
6006 if ((priv->status & STATUS_ASSOCIATED) &&
6007 geo->bg[i].channel == priv->channel)
6008 continue;
6009 channel_index++;
6010 scan->channels_list[channel_index] =
6011 geo->bg[i].channel;
6012 ipw_set_scan_type(scan, channel_index,
6013 geo->bg[i].
6014 flags &
6015 IEEE80211_CH_PASSIVE_ONLY ?
6016 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6017 : scan_type);
6021 if (start != channel_index) {
6022 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6023 (channel_index - start);
6028 static int ipw_request_scan(struct ipw_priv *priv)
6030 struct ipw_scan_request_ext scan;
6031 int err = 0, scan_type;
6033 if (!(priv->status & STATUS_INIT) ||
6034 (priv->status & STATUS_EXIT_PENDING))
6035 return 0;
6037 mutex_lock(&priv->mutex);
6039 if (priv->status & STATUS_SCANNING) {
6040 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6041 priv->status |= STATUS_SCAN_PENDING;
6042 goto done;
6045 if (!(priv->status & STATUS_SCAN_FORCED) &&
6046 priv->status & STATUS_SCAN_ABORTING) {
6047 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6048 priv->status |= STATUS_SCAN_PENDING;
6049 goto done;
6052 if (priv->status & STATUS_RF_KILL_MASK) {
6053 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6054 priv->status |= STATUS_SCAN_PENDING;
6055 goto done;
6058 memset(&scan, 0, sizeof(scan));
6060 if (priv->config & CFG_SPEED_SCAN)
6061 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6062 cpu_to_le16(30);
6063 else
6064 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6065 cpu_to_le16(20);
6067 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6068 cpu_to_le16(20);
6069 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6071 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6073 #ifdef CONFIG_IPW2200_MONITOR
6074 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6075 u8 channel;
6076 u8 band = 0;
6078 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6079 case IEEE80211_52GHZ_BAND:
6080 band = (u8) (IPW_A_MODE << 6) | 1;
6081 channel = priv->channel;
6082 break;
6084 case IEEE80211_24GHZ_BAND:
6085 band = (u8) (IPW_B_MODE << 6) | 1;
6086 channel = priv->channel;
6087 break;
6089 default:
6090 band = (u8) (IPW_B_MODE << 6) | 1;
6091 channel = 9;
6092 break;
6095 scan.channels_list[0] = band;
6096 scan.channels_list[1] = channel;
6097 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6099 /* NOTE: The card will sit on this channel for this time
6100 * period. Scan aborts are timing sensitive and frequently
6101 * result in firmware restarts. As such, it is best to
6102 * set a small dwell_time here and just keep re-issuing
6103 * scans. Otherwise fast channel hopping will not actually
6104 * hop channels.
6106 * TODO: Move SPEED SCAN support to all modes and bands */
6107 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6108 cpu_to_le16(2000);
6109 } else {
6110 #endif /* CONFIG_IPW2200_MONITOR */
6111 /* If we are roaming, then make this a directed scan for the
6112 * current network. Otherwise, ensure that every other scan
6113 * is a fast channel hop scan */
6114 if ((priv->status & STATUS_ROAMING)
6115 || (!(priv->status & STATUS_ASSOCIATED)
6116 && (priv->config & CFG_STATIC_ESSID)
6117 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6118 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6119 if (err) {
6120 IPW_DEBUG_HC("Attempt to send SSID command "
6121 "failed.\n");
6122 goto done;
6125 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6126 } else
6127 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6129 ipw_add_scan_channels(priv, &scan, scan_type);
6130 #ifdef CONFIG_IPW2200_MONITOR
6132 #endif
6134 err = ipw_send_scan_request_ext(priv, &scan);
6135 if (err) {
6136 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6137 goto done;
6140 priv->status |= STATUS_SCANNING;
6141 priv->status &= ~STATUS_SCAN_PENDING;
6142 queue_delayed_work(priv->workqueue, &priv->scan_check,
6143 IPW_SCAN_CHECK_WATCHDOG);
6144 done:
6145 mutex_unlock(&priv->mutex);
6146 return err;
6149 static void ipw_bg_abort_scan(void *data)
6151 struct ipw_priv *priv = data;
6152 mutex_lock(&priv->mutex);
6153 ipw_abort_scan(data);
6154 mutex_unlock(&priv->mutex);
6157 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6159 /* This is called when wpa_supplicant loads and closes the driver
6160 * interface. */
6161 priv->ieee->wpa_enabled = value;
6162 return 0;
6165 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6167 struct ieee80211_device *ieee = priv->ieee;
6168 struct ieee80211_security sec = {
6169 .flags = SEC_AUTH_MODE,
6171 int ret = 0;
6173 if (value & IW_AUTH_ALG_SHARED_KEY) {
6174 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6175 ieee->open_wep = 0;
6176 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6177 sec.auth_mode = WLAN_AUTH_OPEN;
6178 ieee->open_wep = 1;
6179 } else if (value & IW_AUTH_ALG_LEAP) {
6180 sec.auth_mode = WLAN_AUTH_LEAP;
6181 ieee->open_wep = 1;
6182 } else
6183 return -EINVAL;
6185 if (ieee->set_security)
6186 ieee->set_security(ieee->dev, &sec);
6187 else
6188 ret = -EOPNOTSUPP;
6190 return ret;
6193 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6194 int wpa_ie_len)
6196 /* make sure WPA is enabled */
6197 ipw_wpa_enable(priv, 1);
6199 ipw_disassociate(priv);
6202 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6203 char *capabilities, int length)
6205 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6207 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6208 capabilities);
6212 * WE-18 support
6215 /* SIOCSIWGENIE */
6216 static int ipw_wx_set_genie(struct net_device *dev,
6217 struct iw_request_info *info,
6218 union iwreq_data *wrqu, char *extra)
6220 struct ipw_priv *priv = ieee80211_priv(dev);
6221 struct ieee80211_device *ieee = priv->ieee;
6222 u8 *buf;
6223 int err = 0;
6225 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6226 (wrqu->data.length && extra == NULL))
6227 return -EINVAL;
6229 //mutex_lock(&priv->mutex);
6231 //if (!ieee->wpa_enabled) {
6232 // err = -EOPNOTSUPP;
6233 // goto out;
6236 if (wrqu->data.length) {
6237 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6238 if (buf == NULL) {
6239 err = -ENOMEM;
6240 goto out;
6243 memcpy(buf, extra, wrqu->data.length);
6244 kfree(ieee->wpa_ie);
6245 ieee->wpa_ie = buf;
6246 ieee->wpa_ie_len = wrqu->data.length;
6247 } else {
6248 kfree(ieee->wpa_ie);
6249 ieee->wpa_ie = NULL;
6250 ieee->wpa_ie_len = 0;
6253 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6254 out:
6255 //mutex_unlock(&priv->mutex);
6256 return err;
6259 /* SIOCGIWGENIE */
6260 static int ipw_wx_get_genie(struct net_device *dev,
6261 struct iw_request_info *info,
6262 union iwreq_data *wrqu, char *extra)
6264 struct ipw_priv *priv = ieee80211_priv(dev);
6265 struct ieee80211_device *ieee = priv->ieee;
6266 int err = 0;
6268 //mutex_lock(&priv->mutex);
6270 //if (!ieee->wpa_enabled) {
6271 // err = -EOPNOTSUPP;
6272 // goto out;
6275 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6276 wrqu->data.length = 0;
6277 goto out;
6280 if (wrqu->data.length < ieee->wpa_ie_len) {
6281 err = -E2BIG;
6282 goto out;
6285 wrqu->data.length = ieee->wpa_ie_len;
6286 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6288 out:
6289 //mutex_unlock(&priv->mutex);
6290 return err;
6293 static int wext_cipher2level(int cipher)
6295 switch (cipher) {
6296 case IW_AUTH_CIPHER_NONE:
6297 return SEC_LEVEL_0;
6298 case IW_AUTH_CIPHER_WEP40:
6299 case IW_AUTH_CIPHER_WEP104:
6300 return SEC_LEVEL_1;
6301 case IW_AUTH_CIPHER_TKIP:
6302 return SEC_LEVEL_2;
6303 case IW_AUTH_CIPHER_CCMP:
6304 return SEC_LEVEL_3;
6305 default:
6306 return -1;
6310 /* SIOCSIWAUTH */
6311 static int ipw_wx_set_auth(struct net_device *dev,
6312 struct iw_request_info *info,
6313 union iwreq_data *wrqu, char *extra)
6315 struct ipw_priv *priv = ieee80211_priv(dev);
6316 struct ieee80211_device *ieee = priv->ieee;
6317 struct iw_param *param = &wrqu->param;
6318 struct ieee80211_crypt_data *crypt;
6319 unsigned long flags;
6320 int ret = 0;
6322 switch (param->flags & IW_AUTH_INDEX) {
6323 case IW_AUTH_WPA_VERSION:
6324 break;
6325 case IW_AUTH_CIPHER_PAIRWISE:
6326 ipw_set_hw_decrypt_unicast(priv,
6327 wext_cipher2level(param->value));
6328 break;
6329 case IW_AUTH_CIPHER_GROUP:
6330 ipw_set_hw_decrypt_multicast(priv,
6331 wext_cipher2level(param->value));
6332 break;
6333 case IW_AUTH_KEY_MGMT:
6335 * ipw2200 does not use these parameters
6337 break;
6339 case IW_AUTH_TKIP_COUNTERMEASURES:
6340 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6341 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6342 break;
6344 flags = crypt->ops->get_flags(crypt->priv);
6346 if (param->value)
6347 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6348 else
6349 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6351 crypt->ops->set_flags(flags, crypt->priv);
6353 break;
6355 case IW_AUTH_DROP_UNENCRYPTED:{
6356 /* HACK:
6358 * wpa_supplicant calls set_wpa_enabled when the driver
6359 * is loaded and unloaded, regardless of if WPA is being
6360 * used. No other calls are made which can be used to
6361 * determine if encryption will be used or not prior to
6362 * association being expected. If encryption is not being
6363 * used, drop_unencrypted is set to false, else true -- we
6364 * can use this to determine if the CAP_PRIVACY_ON bit should
6365 * be set.
6367 struct ieee80211_security sec = {
6368 .flags = SEC_ENABLED,
6369 .enabled = param->value,
6371 priv->ieee->drop_unencrypted = param->value;
6372 /* We only change SEC_LEVEL for open mode. Others
6373 * are set by ipw_wpa_set_encryption.
6375 if (!param->value) {
6376 sec.flags |= SEC_LEVEL;
6377 sec.level = SEC_LEVEL_0;
6378 } else {
6379 sec.flags |= SEC_LEVEL;
6380 sec.level = SEC_LEVEL_1;
6382 if (priv->ieee->set_security)
6383 priv->ieee->set_security(priv->ieee->dev, &sec);
6384 break;
6387 case IW_AUTH_80211_AUTH_ALG:
6388 ret = ipw_wpa_set_auth_algs(priv, param->value);
6389 break;
6391 case IW_AUTH_WPA_ENABLED:
6392 ret = ipw_wpa_enable(priv, param->value);
6393 break;
6395 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6396 ieee->ieee802_1x = param->value;
6397 break;
6399 //case IW_AUTH_ROAMING_CONTROL:
6400 case IW_AUTH_PRIVACY_INVOKED:
6401 ieee->privacy_invoked = param->value;
6402 break;
6404 default:
6405 return -EOPNOTSUPP;
6407 return ret;
6410 /* SIOCGIWAUTH */
6411 static int ipw_wx_get_auth(struct net_device *dev,
6412 struct iw_request_info *info,
6413 union iwreq_data *wrqu, char *extra)
6415 struct ipw_priv *priv = ieee80211_priv(dev);
6416 struct ieee80211_device *ieee = priv->ieee;
6417 struct ieee80211_crypt_data *crypt;
6418 struct iw_param *param = &wrqu->param;
6419 int ret = 0;
6421 switch (param->flags & IW_AUTH_INDEX) {
6422 case IW_AUTH_WPA_VERSION:
6423 case IW_AUTH_CIPHER_PAIRWISE:
6424 case IW_AUTH_CIPHER_GROUP:
6425 case IW_AUTH_KEY_MGMT:
6427 * wpa_supplicant will control these internally
6429 ret = -EOPNOTSUPP;
6430 break;
6432 case IW_AUTH_TKIP_COUNTERMEASURES:
6433 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6434 if (!crypt || !crypt->ops->get_flags)
6435 break;
6437 param->value = (crypt->ops->get_flags(crypt->priv) &
6438 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6440 break;
6442 case IW_AUTH_DROP_UNENCRYPTED:
6443 param->value = ieee->drop_unencrypted;
6444 break;
6446 case IW_AUTH_80211_AUTH_ALG:
6447 param->value = ieee->sec.auth_mode;
6448 break;
6450 case IW_AUTH_WPA_ENABLED:
6451 param->value = ieee->wpa_enabled;
6452 break;
6454 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6455 param->value = ieee->ieee802_1x;
6456 break;
6458 case IW_AUTH_ROAMING_CONTROL:
6459 case IW_AUTH_PRIVACY_INVOKED:
6460 param->value = ieee->privacy_invoked;
6461 break;
6463 default:
6464 return -EOPNOTSUPP;
6466 return 0;
6469 /* SIOCSIWENCODEEXT */
6470 static int ipw_wx_set_encodeext(struct net_device *dev,
6471 struct iw_request_info *info,
6472 union iwreq_data *wrqu, char *extra)
6474 struct ipw_priv *priv = ieee80211_priv(dev);
6475 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6477 if (hwcrypto) {
6478 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6479 /* IPW HW can't build TKIP MIC,
6480 host decryption still needed */
6481 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6482 priv->ieee->host_mc_decrypt = 1;
6483 else {
6484 priv->ieee->host_encrypt = 0;
6485 priv->ieee->host_encrypt_msdu = 1;
6486 priv->ieee->host_decrypt = 1;
6488 } else {
6489 priv->ieee->host_encrypt = 0;
6490 priv->ieee->host_encrypt_msdu = 0;
6491 priv->ieee->host_decrypt = 0;
6492 priv->ieee->host_mc_decrypt = 0;
6496 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6499 /* SIOCGIWENCODEEXT */
6500 static int ipw_wx_get_encodeext(struct net_device *dev,
6501 struct iw_request_info *info,
6502 union iwreq_data *wrqu, char *extra)
6504 struct ipw_priv *priv = ieee80211_priv(dev);
6505 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6508 /* SIOCSIWMLME */
6509 static int ipw_wx_set_mlme(struct net_device *dev,
6510 struct iw_request_info *info,
6511 union iwreq_data *wrqu, char *extra)
6513 struct ipw_priv *priv = ieee80211_priv(dev);
6514 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6515 u16 reason;
6517 reason = cpu_to_le16(mlme->reason_code);
6519 switch (mlme->cmd) {
6520 case IW_MLME_DEAUTH:
6521 // silently ignore
6522 break;
6524 case IW_MLME_DISASSOC:
6525 ipw_disassociate(priv);
6526 break;
6528 default:
6529 return -EOPNOTSUPP;
6531 return 0;
6534 #ifdef CONFIG_IPW_QOS
6536 /* QoS */
6538 * get the modulation type of the current network or
6539 * the card current mode
6541 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6543 u8 mode = 0;
6545 if (priv->status & STATUS_ASSOCIATED) {
6546 unsigned long flags;
6548 spin_lock_irqsave(&priv->ieee->lock, flags);
6549 mode = priv->assoc_network->mode;
6550 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6551 } else {
6552 mode = priv->ieee->mode;
6554 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6555 return mode;
6559 * Handle management frame beacon and probe response
6561 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6562 int active_network,
6563 struct ieee80211_network *network)
6565 u32 size = sizeof(struct ieee80211_qos_parameters);
6567 if (network->capability & WLAN_CAPABILITY_IBSS)
6568 network->qos_data.active = network->qos_data.supported;
6570 if (network->flags & NETWORK_HAS_QOS_MASK) {
6571 if (active_network &&
6572 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6573 network->qos_data.active = network->qos_data.supported;
6575 if ((network->qos_data.active == 1) && (active_network == 1) &&
6576 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6577 (network->qos_data.old_param_count !=
6578 network->qos_data.param_count)) {
6579 network->qos_data.old_param_count =
6580 network->qos_data.param_count;
6581 schedule_work(&priv->qos_activate);
6582 IPW_DEBUG_QOS("QoS parameters change call "
6583 "qos_activate\n");
6585 } else {
6586 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6587 memcpy(&network->qos_data.parameters,
6588 &def_parameters_CCK, size);
6589 else
6590 memcpy(&network->qos_data.parameters,
6591 &def_parameters_OFDM, size);
6593 if ((network->qos_data.active == 1) && (active_network == 1)) {
6594 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6595 schedule_work(&priv->qos_activate);
6598 network->qos_data.active = 0;
6599 network->qos_data.supported = 0;
6601 if ((priv->status & STATUS_ASSOCIATED) &&
6602 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6603 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6604 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6605 !(network->flags & NETWORK_EMPTY_ESSID))
6606 if ((network->ssid_len ==
6607 priv->assoc_network->ssid_len) &&
6608 !memcmp(network->ssid,
6609 priv->assoc_network->ssid,
6610 network->ssid_len)) {
6611 queue_work(priv->workqueue,
6612 &priv->merge_networks);
6616 return 0;
6620 * This function set up the firmware to support QoS. It sends
6621 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6623 static int ipw_qos_activate(struct ipw_priv *priv,
6624 struct ieee80211_qos_data *qos_network_data)
6626 int err;
6627 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6628 struct ieee80211_qos_parameters *active_one = NULL;
6629 u32 size = sizeof(struct ieee80211_qos_parameters);
6630 u32 burst_duration;
6631 int i;
6632 u8 type;
6634 type = ipw_qos_current_mode(priv);
6636 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6637 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6638 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6639 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6641 if (qos_network_data == NULL) {
6642 if (type == IEEE_B) {
6643 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6644 active_one = &def_parameters_CCK;
6645 } else
6646 active_one = &def_parameters_OFDM;
6648 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6649 burst_duration = ipw_qos_get_burst_duration(priv);
6650 for (i = 0; i < QOS_QUEUE_NUM; i++)
6651 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6652 (u16) burst_duration;
6653 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6654 if (type == IEEE_B) {
6655 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6656 type);
6657 if (priv->qos_data.qos_enable == 0)
6658 active_one = &def_parameters_CCK;
6659 else
6660 active_one = priv->qos_data.def_qos_parm_CCK;
6661 } else {
6662 if (priv->qos_data.qos_enable == 0)
6663 active_one = &def_parameters_OFDM;
6664 else
6665 active_one = priv->qos_data.def_qos_parm_OFDM;
6667 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6668 } else {
6669 unsigned long flags;
6670 int active;
6672 spin_lock_irqsave(&priv->ieee->lock, flags);
6673 active_one = &(qos_network_data->parameters);
6674 qos_network_data->old_param_count =
6675 qos_network_data->param_count;
6676 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6677 active = qos_network_data->supported;
6678 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6680 if (active == 0) {
6681 burst_duration = ipw_qos_get_burst_duration(priv);
6682 for (i = 0; i < QOS_QUEUE_NUM; i++)
6683 qos_parameters[QOS_PARAM_SET_ACTIVE].
6684 tx_op_limit[i] = (u16) burst_duration;
6688 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6689 err = ipw_send_qos_params_command(priv,
6690 (struct ieee80211_qos_parameters *)
6691 &(qos_parameters[0]));
6692 if (err)
6693 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6695 return err;
6699 * send IPW_CMD_WME_INFO to the firmware
6701 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6703 int ret = 0;
6704 struct ieee80211_qos_information_element qos_info;
6706 if (priv == NULL)
6707 return -1;
6709 qos_info.elementID = QOS_ELEMENT_ID;
6710 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6712 qos_info.version = QOS_VERSION_1;
6713 qos_info.ac_info = 0;
6715 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6716 qos_info.qui_type = QOS_OUI_TYPE;
6717 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6719 ret = ipw_send_qos_info_command(priv, &qos_info);
6720 if (ret != 0) {
6721 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6723 return ret;
6727 * Set the QoS parameter with the association request structure
6729 static int ipw_qos_association(struct ipw_priv *priv,
6730 struct ieee80211_network *network)
6732 int err = 0;
6733 struct ieee80211_qos_data *qos_data = NULL;
6734 struct ieee80211_qos_data ibss_data = {
6735 .supported = 1,
6736 .active = 1,
6739 switch (priv->ieee->iw_mode) {
6740 case IW_MODE_ADHOC:
6741 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6743 qos_data = &ibss_data;
6744 break;
6746 case IW_MODE_INFRA:
6747 qos_data = &network->qos_data;
6748 break;
6750 default:
6751 BUG();
6752 break;
6755 err = ipw_qos_activate(priv, qos_data);
6756 if (err) {
6757 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6758 return err;
6761 if (priv->qos_data.qos_enable && qos_data->supported) {
6762 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6763 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6764 return ipw_qos_set_info_element(priv);
6767 return 0;
6771 * handling the beaconing responces. if we get different QoS setting
6772 * of the network from the the associated setting adjust the QoS
6773 * setting
6775 static int ipw_qos_association_resp(struct ipw_priv *priv,
6776 struct ieee80211_network *network)
6778 int ret = 0;
6779 unsigned long flags;
6780 u32 size = sizeof(struct ieee80211_qos_parameters);
6781 int set_qos_param = 0;
6783 if ((priv == NULL) || (network == NULL) ||
6784 (priv->assoc_network == NULL))
6785 return ret;
6787 if (!(priv->status & STATUS_ASSOCIATED))
6788 return ret;
6790 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
6791 return ret;
6793 spin_lock_irqsave(&priv->ieee->lock, flags);
6794 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
6795 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
6796 sizeof(struct ieee80211_qos_data));
6797 priv->assoc_network->qos_data.active = 1;
6798 if ((network->qos_data.old_param_count !=
6799 network->qos_data.param_count)) {
6800 set_qos_param = 1;
6801 network->qos_data.old_param_count =
6802 network->qos_data.param_count;
6805 } else {
6806 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
6807 memcpy(&priv->assoc_network->qos_data.parameters,
6808 &def_parameters_CCK, size);
6809 else
6810 memcpy(&priv->assoc_network->qos_data.parameters,
6811 &def_parameters_OFDM, size);
6812 priv->assoc_network->qos_data.active = 0;
6813 priv->assoc_network->qos_data.supported = 0;
6814 set_qos_param = 1;
6817 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6819 if (set_qos_param == 1)
6820 schedule_work(&priv->qos_activate);
6822 return ret;
6825 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
6827 u32 ret = 0;
6829 if ((priv == NULL))
6830 return 0;
6832 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
6833 ret = priv->qos_data.burst_duration_CCK;
6834 else
6835 ret = priv->qos_data.burst_duration_OFDM;
6837 return ret;
6841 * Initialize the setting of QoS global
6843 static void ipw_qos_init(struct ipw_priv *priv, int enable,
6844 int burst_enable, u32 burst_duration_CCK,
6845 u32 burst_duration_OFDM)
6847 priv->qos_data.qos_enable = enable;
6849 if (priv->qos_data.qos_enable) {
6850 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
6851 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
6852 IPW_DEBUG_QOS("QoS is enabled\n");
6853 } else {
6854 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
6855 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
6856 IPW_DEBUG_QOS("QoS is not enabled\n");
6859 priv->qos_data.burst_enable = burst_enable;
6861 if (burst_enable) {
6862 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
6863 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
6864 } else {
6865 priv->qos_data.burst_duration_CCK = 0;
6866 priv->qos_data.burst_duration_OFDM = 0;
6871 * map the packet priority to the right TX Queue
6873 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
6875 if (priority > 7 || !priv->qos_data.qos_enable)
6876 priority = 0;
6878 return from_priority_to_tx_queue[priority] - 1;
6881 static int ipw_is_qos_active(struct net_device *dev,
6882 struct sk_buff *skb)
6884 struct ipw_priv *priv = ieee80211_priv(dev);
6885 struct ieee80211_qos_data *qos_data = NULL;
6886 int active, supported;
6887 u8 *daddr = skb->data + ETH_ALEN;
6888 int unicast = !is_multicast_ether_addr(daddr);
6890 if (!(priv->status & STATUS_ASSOCIATED))
6891 return 0;
6893 qos_data = &priv->assoc_network->qos_data;
6895 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6896 if (unicast == 0)
6897 qos_data->active = 0;
6898 else
6899 qos_data->active = qos_data->supported;
6901 active = qos_data->active;
6902 supported = qos_data->supported;
6903 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
6904 "unicast %d\n",
6905 priv->qos_data.qos_enable, active, supported, unicast);
6906 if (active && priv->qos_data.qos_enable)
6907 return 1;
6909 return 0;
6913 * add QoS parameter to the TX command
6915 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
6916 u16 priority,
6917 struct tfd_data *tfd)
6919 int tx_queue_id = 0;
6922 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
6923 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
6925 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
6926 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
6927 tfd->tfd.tfd_26.mchdr.qos_ctrl |= CTRL_QOS_NO_ACK;
6929 return 0;
6933 * background support to run QoS activate functionality
6935 static void ipw_bg_qos_activate(void *data)
6937 struct ipw_priv *priv = data;
6939 if (priv == NULL)
6940 return;
6942 mutex_lock(&priv->mutex);
6944 if (priv->status & STATUS_ASSOCIATED)
6945 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
6947 mutex_unlock(&priv->mutex);
6950 static int ipw_handle_probe_response(struct net_device *dev,
6951 struct ieee80211_probe_response *resp,
6952 struct ieee80211_network *network)
6954 struct ipw_priv *priv = ieee80211_priv(dev);
6955 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
6956 (network == priv->assoc_network));
6958 ipw_qos_handle_probe_response(priv, active_network, network);
6960 return 0;
6963 static int ipw_handle_beacon(struct net_device *dev,
6964 struct ieee80211_beacon *resp,
6965 struct ieee80211_network *network)
6967 struct ipw_priv *priv = ieee80211_priv(dev);
6968 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
6969 (network == priv->assoc_network));
6971 ipw_qos_handle_probe_response(priv, active_network, network);
6973 return 0;
6976 static int ipw_handle_assoc_response(struct net_device *dev,
6977 struct ieee80211_assoc_response *resp,
6978 struct ieee80211_network *network)
6980 struct ipw_priv *priv = ieee80211_priv(dev);
6981 ipw_qos_association_resp(priv, network);
6982 return 0;
6985 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
6986 *qos_param)
6988 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
6989 sizeof(*qos_param) * 3, qos_param);
6992 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
6993 *qos_param)
6995 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
6996 qos_param);
6999 #endif /* CONFIG_IPW_QOS */
7001 static int ipw_associate_network(struct ipw_priv *priv,
7002 struct ieee80211_network *network,
7003 struct ipw_supported_rates *rates, int roaming)
7005 int err;
7007 if (priv->config & CFG_FIXED_RATE)
7008 ipw_set_fixed_rate(priv, network->mode);
7010 if (!(priv->config & CFG_STATIC_ESSID)) {
7011 priv->essid_len = min(network->ssid_len,
7012 (u8) IW_ESSID_MAX_SIZE);
7013 memcpy(priv->essid, network->ssid, priv->essid_len);
7016 network->last_associate = jiffies;
7018 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7019 priv->assoc_request.channel = network->channel;
7020 priv->assoc_request.auth_key = 0;
7022 if ((priv->capability & CAP_PRIVACY_ON) &&
7023 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7024 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7025 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7027 if (priv->ieee->sec.level == SEC_LEVEL_1)
7028 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7030 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7031 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7032 priv->assoc_request.auth_type = AUTH_LEAP;
7033 else
7034 priv->assoc_request.auth_type = AUTH_OPEN;
7036 if (priv->ieee->wpa_ie_len) {
7037 priv->assoc_request.policy_support = 0x02; /* RSN active */
7038 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7039 priv->ieee->wpa_ie_len);
7043 * It is valid for our ieee device to support multiple modes, but
7044 * when it comes to associating to a given network we have to choose
7045 * just one mode.
7047 if (network->mode & priv->ieee->mode & IEEE_A)
7048 priv->assoc_request.ieee_mode = IPW_A_MODE;
7049 else if (network->mode & priv->ieee->mode & IEEE_G)
7050 priv->assoc_request.ieee_mode = IPW_G_MODE;
7051 else if (network->mode & priv->ieee->mode & IEEE_B)
7052 priv->assoc_request.ieee_mode = IPW_B_MODE;
7054 priv->assoc_request.capability = network->capability;
7055 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7056 && !(priv->config & CFG_PREAMBLE_LONG)) {
7057 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7058 } else {
7059 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7061 /* Clear the short preamble if we won't be supporting it */
7062 priv->assoc_request.capability &=
7063 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7066 /* Clear capability bits that aren't used in Ad Hoc */
7067 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7068 priv->assoc_request.capability &=
7069 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7071 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7072 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7073 roaming ? "Rea" : "A",
7074 escape_essid(priv->essid, priv->essid_len),
7075 network->channel,
7076 ipw_modes[priv->assoc_request.ieee_mode],
7077 rates->num_rates,
7078 (priv->assoc_request.preamble_length ==
7079 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7080 network->capability &
7081 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7082 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7083 priv->capability & CAP_PRIVACY_ON ?
7084 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7085 "(open)") : "",
7086 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7087 priv->capability & CAP_PRIVACY_ON ?
7088 '1' + priv->ieee->sec.active_key : '.',
7089 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7091 priv->assoc_request.beacon_interval = network->beacon_interval;
7092 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7093 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7094 priv->assoc_request.assoc_type = HC_IBSS_START;
7095 priv->assoc_request.assoc_tsf_msw = 0;
7096 priv->assoc_request.assoc_tsf_lsw = 0;
7097 } else {
7098 if (unlikely(roaming))
7099 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7100 else
7101 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7102 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7103 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7106 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7108 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7109 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7110 priv->assoc_request.atim_window = network->atim_window;
7111 } else {
7112 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7113 priv->assoc_request.atim_window = 0;
7116 priv->assoc_request.listen_interval = network->listen_interval;
7118 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7119 if (err) {
7120 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7121 return err;
7124 rates->ieee_mode = priv->assoc_request.ieee_mode;
7125 rates->purpose = IPW_RATE_CONNECT;
7126 ipw_send_supported_rates(priv, rates);
7128 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7129 priv->sys_config.dot11g_auto_detection = 1;
7130 else
7131 priv->sys_config.dot11g_auto_detection = 0;
7133 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7134 priv->sys_config.answer_broadcast_ssid_probe = 1;
7135 else
7136 priv->sys_config.answer_broadcast_ssid_probe = 0;
7138 err = ipw_send_system_config(priv, &priv->sys_config);
7139 if (err) {
7140 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7141 return err;
7144 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7145 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7146 if (err) {
7147 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7148 return err;
7152 * If preemption is enabled, it is possible for the association
7153 * to complete before we return from ipw_send_associate. Therefore
7154 * we have to be sure and update our priviate data first.
7156 priv->channel = network->channel;
7157 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7158 priv->status |= STATUS_ASSOCIATING;
7159 priv->status &= ~STATUS_SECURITY_UPDATED;
7161 priv->assoc_network = network;
7163 #ifdef CONFIG_IPW_QOS
7164 ipw_qos_association(priv, network);
7165 #endif
7167 err = ipw_send_associate(priv, &priv->assoc_request);
7168 if (err) {
7169 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7170 return err;
7173 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
7174 escape_essid(priv->essid, priv->essid_len),
7175 MAC_ARG(priv->bssid));
7177 return 0;
7180 static void ipw_roam(void *data)
7182 struct ipw_priv *priv = data;
7183 struct ieee80211_network *network = NULL;
7184 struct ipw_network_match match = {
7185 .network = priv->assoc_network
7188 /* The roaming process is as follows:
7190 * 1. Missed beacon threshold triggers the roaming process by
7191 * setting the status ROAM bit and requesting a scan.
7192 * 2. When the scan completes, it schedules the ROAM work
7193 * 3. The ROAM work looks at all of the known networks for one that
7194 * is a better network than the currently associated. If none
7195 * found, the ROAM process is over (ROAM bit cleared)
7196 * 4. If a better network is found, a disassociation request is
7197 * sent.
7198 * 5. When the disassociation completes, the roam work is again
7199 * scheduled. The second time through, the driver is no longer
7200 * associated, and the newly selected network is sent an
7201 * association request.
7202 * 6. At this point ,the roaming process is complete and the ROAM
7203 * status bit is cleared.
7206 /* If we are no longer associated, and the roaming bit is no longer
7207 * set, then we are not actively roaming, so just return */
7208 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7209 return;
7211 if (priv->status & STATUS_ASSOCIATED) {
7212 /* First pass through ROAM process -- look for a better
7213 * network */
7214 unsigned long flags;
7215 u8 rssi = priv->assoc_network->stats.rssi;
7216 priv->assoc_network->stats.rssi = -128;
7217 spin_lock_irqsave(&priv->ieee->lock, flags);
7218 list_for_each_entry(network, &priv->ieee->network_list, list) {
7219 if (network != priv->assoc_network)
7220 ipw_best_network(priv, &match, network, 1);
7222 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7223 priv->assoc_network->stats.rssi = rssi;
7225 if (match.network == priv->assoc_network) {
7226 IPW_DEBUG_ASSOC("No better APs in this network to "
7227 "roam to.\n");
7228 priv->status &= ~STATUS_ROAMING;
7229 ipw_debug_config(priv);
7230 return;
7233 ipw_send_disassociate(priv, 1);
7234 priv->assoc_network = match.network;
7236 return;
7239 /* Second pass through ROAM process -- request association */
7240 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7241 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7242 priv->status &= ~STATUS_ROAMING;
7245 static void ipw_bg_roam(void *data)
7247 struct ipw_priv *priv = data;
7248 mutex_lock(&priv->mutex);
7249 ipw_roam(data);
7250 mutex_unlock(&priv->mutex);
7253 static int ipw_associate(void *data)
7255 struct ipw_priv *priv = data;
7257 struct ieee80211_network *network = NULL;
7258 struct ipw_network_match match = {
7259 .network = NULL
7261 struct ipw_supported_rates *rates;
7262 struct list_head *element;
7263 unsigned long flags;
7265 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7266 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7267 return 0;
7270 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7271 IPW_DEBUG_ASSOC("Not attempting association (already in "
7272 "progress)\n");
7273 return 0;
7276 if (priv->status & STATUS_DISASSOCIATING) {
7277 IPW_DEBUG_ASSOC("Not attempting association (in "
7278 "disassociating)\n ");
7279 queue_work(priv->workqueue, &priv->associate);
7280 return 0;
7283 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7284 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7285 "initialized)\n");
7286 return 0;
7289 if (!(priv->config & CFG_ASSOCIATE) &&
7290 !(priv->config & (CFG_STATIC_ESSID |
7291 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7292 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7293 return 0;
7296 /* Protect our use of the network_list */
7297 spin_lock_irqsave(&priv->ieee->lock, flags);
7298 list_for_each_entry(network, &priv->ieee->network_list, list)
7299 ipw_best_network(priv, &match, network, 0);
7301 network = match.network;
7302 rates = &match.rates;
7304 if (network == NULL &&
7305 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7306 priv->config & CFG_ADHOC_CREATE &&
7307 priv->config & CFG_STATIC_ESSID &&
7308 priv->config & CFG_STATIC_CHANNEL &&
7309 !list_empty(&priv->ieee->network_free_list)) {
7310 element = priv->ieee->network_free_list.next;
7311 network = list_entry(element, struct ieee80211_network, list);
7312 ipw_adhoc_create(priv, network);
7313 rates = &priv->rates;
7314 list_del(element);
7315 list_add_tail(&network->list, &priv->ieee->network_list);
7317 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7319 /* If we reached the end of the list, then we don't have any valid
7320 * matching APs */
7321 if (!network) {
7322 ipw_debug_config(priv);
7324 if (!(priv->status & STATUS_SCANNING)) {
7325 if (!(priv->config & CFG_SPEED_SCAN))
7326 queue_delayed_work(priv->workqueue,
7327 &priv->request_scan,
7328 SCAN_INTERVAL);
7329 else
7330 queue_work(priv->workqueue,
7331 &priv->request_scan);
7334 return 0;
7337 ipw_associate_network(priv, network, rates, 0);
7339 return 1;
7342 static void ipw_bg_associate(void *data)
7344 struct ipw_priv *priv = data;
7345 mutex_lock(&priv->mutex);
7346 ipw_associate(data);
7347 mutex_unlock(&priv->mutex);
7350 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7351 struct sk_buff *skb)
7353 struct ieee80211_hdr *hdr;
7354 u16 fc;
7356 hdr = (struct ieee80211_hdr *)skb->data;
7357 fc = le16_to_cpu(hdr->frame_ctl);
7358 if (!(fc & IEEE80211_FCTL_PROTECTED))
7359 return;
7361 fc &= ~IEEE80211_FCTL_PROTECTED;
7362 hdr->frame_ctl = cpu_to_le16(fc);
7363 switch (priv->ieee->sec.level) {
7364 case SEC_LEVEL_3:
7365 /* Remove CCMP HDR */
7366 memmove(skb->data + IEEE80211_3ADDR_LEN,
7367 skb->data + IEEE80211_3ADDR_LEN + 8,
7368 skb->len - IEEE80211_3ADDR_LEN - 8);
7369 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7370 break;
7371 case SEC_LEVEL_2:
7372 break;
7373 case SEC_LEVEL_1:
7374 /* Remove IV */
7375 memmove(skb->data + IEEE80211_3ADDR_LEN,
7376 skb->data + IEEE80211_3ADDR_LEN + 4,
7377 skb->len - IEEE80211_3ADDR_LEN - 4);
7378 skb_trim(skb, skb->len - 8); /* IV + ICV */
7379 break;
7380 case SEC_LEVEL_0:
7381 break;
7382 default:
7383 printk(KERN_ERR "Unknow security level %d\n",
7384 priv->ieee->sec.level);
7385 break;
7389 static void ipw_handle_data_packet(struct ipw_priv *priv,
7390 struct ipw_rx_mem_buffer *rxb,
7391 struct ieee80211_rx_stats *stats)
7393 struct ieee80211_hdr_4addr *hdr;
7394 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7396 /* We received data from the HW, so stop the watchdog */
7397 priv->net_dev->trans_start = jiffies;
7399 /* We only process data packets if the
7400 * interface is open */
7401 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7402 skb_tailroom(rxb->skb))) {
7403 priv->ieee->stats.rx_errors++;
7404 priv->wstats.discard.misc++;
7405 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7406 return;
7407 } else if (unlikely(!netif_running(priv->net_dev))) {
7408 priv->ieee->stats.rx_dropped++;
7409 priv->wstats.discard.misc++;
7410 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7411 return;
7414 /* Advance skb->data to the start of the actual payload */
7415 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7417 /* Set the size of the skb to the size of the frame */
7418 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7420 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7422 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7423 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7424 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7425 (is_multicast_ether_addr(hdr->addr1) ?
7426 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7427 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7429 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7430 priv->ieee->stats.rx_errors++;
7431 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7432 rxb->skb = NULL;
7433 __ipw_led_activity_on(priv);
7437 #ifdef CONFIG_IEEE80211_RADIOTAP
7438 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7439 struct ipw_rx_mem_buffer *rxb,
7440 struct ieee80211_rx_stats *stats)
7442 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7443 struct ipw_rx_frame *frame = &pkt->u.frame;
7445 /* initial pull of some data */
7446 u16 received_channel = frame->received_channel;
7447 u8 antennaAndPhy = frame->antennaAndPhy;
7448 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7449 u16 pktrate = frame->rate;
7451 /* Magic struct that slots into the radiotap header -- no reason
7452 * to build this manually element by element, we can write it much
7453 * more efficiently than we can parse it. ORDER MATTERS HERE */
7454 struct ipw_rt_hdr {
7455 struct ieee80211_radiotap_header rt_hdr;
7456 u8 rt_flags; /* radiotap packet flags */
7457 u8 rt_rate; /* rate in 500kb/s */
7458 u16 rt_channel; /* channel in mhz */
7459 u16 rt_chbitmask; /* channel bitfield */
7460 s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
7461 u8 rt_antenna; /* antenna number */
7462 } *ipw_rt;
7464 short len = le16_to_cpu(pkt->u.frame.length);
7466 /* We received data from the HW, so stop the watchdog */
7467 priv->net_dev->trans_start = jiffies;
7469 /* We only process data packets if the
7470 * interface is open */
7471 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7472 skb_tailroom(rxb->skb))) {
7473 priv->ieee->stats.rx_errors++;
7474 priv->wstats.discard.misc++;
7475 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7476 return;
7477 } else if (unlikely(!netif_running(priv->net_dev))) {
7478 priv->ieee->stats.rx_dropped++;
7479 priv->wstats.discard.misc++;
7480 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7481 return;
7484 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7485 * that now */
7486 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7487 /* FIXME: Should alloc bigger skb instead */
7488 priv->ieee->stats.rx_dropped++;
7489 priv->wstats.discard.misc++;
7490 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7491 return;
7494 /* copy the frame itself */
7495 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7496 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7498 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7499 * part of our real header, saves a little time.
7501 * No longer necessary since we fill in all our data. Purge before merging
7502 * patch officially.
7503 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7504 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7507 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7509 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7510 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7511 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7513 /* Big bitfield of all the fields we provide in radiotap */
7514 ipw_rt->rt_hdr.it_present =
7515 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7516 (1 << IEEE80211_RADIOTAP_RATE) |
7517 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7518 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7519 (1 << IEEE80211_RADIOTAP_ANTENNA));
7521 /* Zero the flags, we'll add to them as we go */
7522 ipw_rt->rt_flags = 0;
7524 /* Convert signal to DBM */
7525 ipw_rt->rt_dbmsignal = antsignal;
7527 /* Convert the channel data and set the flags */
7528 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7529 if (received_channel > 14) { /* 802.11a */
7530 ipw_rt->rt_chbitmask =
7531 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7532 } else if (antennaAndPhy & 32) { /* 802.11b */
7533 ipw_rt->rt_chbitmask =
7534 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7535 } else { /* 802.11g */
7536 ipw_rt->rt_chbitmask =
7537 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7540 /* set the rate in multiples of 500k/s */
7541 switch (pktrate) {
7542 case IPW_TX_RATE_1MB:
7543 ipw_rt->rt_rate = 2;
7544 break;
7545 case IPW_TX_RATE_2MB:
7546 ipw_rt->rt_rate = 4;
7547 break;
7548 case IPW_TX_RATE_5MB:
7549 ipw_rt->rt_rate = 10;
7550 break;
7551 case IPW_TX_RATE_6MB:
7552 ipw_rt->rt_rate = 12;
7553 break;
7554 case IPW_TX_RATE_9MB:
7555 ipw_rt->rt_rate = 18;
7556 break;
7557 case IPW_TX_RATE_11MB:
7558 ipw_rt->rt_rate = 22;
7559 break;
7560 case IPW_TX_RATE_12MB:
7561 ipw_rt->rt_rate = 24;
7562 break;
7563 case IPW_TX_RATE_18MB:
7564 ipw_rt->rt_rate = 36;
7565 break;
7566 case IPW_TX_RATE_24MB:
7567 ipw_rt->rt_rate = 48;
7568 break;
7569 case IPW_TX_RATE_36MB:
7570 ipw_rt->rt_rate = 72;
7571 break;
7572 case IPW_TX_RATE_48MB:
7573 ipw_rt->rt_rate = 96;
7574 break;
7575 case IPW_TX_RATE_54MB:
7576 ipw_rt->rt_rate = 108;
7577 break;
7578 default:
7579 ipw_rt->rt_rate = 0;
7580 break;
7583 /* antenna number */
7584 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7586 /* set the preamble flag if we have it */
7587 if ((antennaAndPhy & 64))
7588 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7590 /* Set the size of the skb to the size of the frame */
7591 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7593 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7595 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7596 priv->ieee->stats.rx_errors++;
7597 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7598 rxb->skb = NULL;
7599 /* no LED during capture */
7602 #endif
7604 static int is_network_packet(struct ipw_priv *priv,
7605 struct ieee80211_hdr_4addr *header)
7607 /* Filter incoming packets to determine if they are targetted toward
7608 * this network, discarding packets coming from ourselves */
7609 switch (priv->ieee->iw_mode) {
7610 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
7611 /* packets from our adapter are dropped (echo) */
7612 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
7613 return 0;
7615 /* {broad,multi}cast packets to our BSSID go through */
7616 if (is_multicast_ether_addr(header->addr1))
7617 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
7619 /* packets to our adapter go through */
7620 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7621 ETH_ALEN);
7623 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
7624 /* packets from our adapter are dropped (echo) */
7625 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
7626 return 0;
7628 /* {broad,multi}cast packets to our BSS go through */
7629 if (is_multicast_ether_addr(header->addr1))
7630 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
7632 /* packets to our adapter go through */
7633 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7634 ETH_ALEN);
7637 return 1;
7640 #define IPW_PACKET_RETRY_TIME HZ
7642 static int is_duplicate_packet(struct ipw_priv *priv,
7643 struct ieee80211_hdr_4addr *header)
7645 u16 sc = le16_to_cpu(header->seq_ctl);
7646 u16 seq = WLAN_GET_SEQ_SEQ(sc);
7647 u16 frag = WLAN_GET_SEQ_FRAG(sc);
7648 u16 *last_seq, *last_frag;
7649 unsigned long *last_time;
7651 switch (priv->ieee->iw_mode) {
7652 case IW_MODE_ADHOC:
7654 struct list_head *p;
7655 struct ipw_ibss_seq *entry = NULL;
7656 u8 *mac = header->addr2;
7657 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
7659 __list_for_each(p, &priv->ibss_mac_hash[index]) {
7660 entry =
7661 list_entry(p, struct ipw_ibss_seq, list);
7662 if (!memcmp(entry->mac, mac, ETH_ALEN))
7663 break;
7665 if (p == &priv->ibss_mac_hash[index]) {
7666 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
7667 if (!entry) {
7668 IPW_ERROR
7669 ("Cannot malloc new mac entry\n");
7670 return 0;
7672 memcpy(entry->mac, mac, ETH_ALEN);
7673 entry->seq_num = seq;
7674 entry->frag_num = frag;
7675 entry->packet_time = jiffies;
7676 list_add(&entry->list,
7677 &priv->ibss_mac_hash[index]);
7678 return 0;
7680 last_seq = &entry->seq_num;
7681 last_frag = &entry->frag_num;
7682 last_time = &entry->packet_time;
7683 break;
7685 case IW_MODE_INFRA:
7686 last_seq = &priv->last_seq_num;
7687 last_frag = &priv->last_frag_num;
7688 last_time = &priv->last_packet_time;
7689 break;
7690 default:
7691 return 0;
7693 if ((*last_seq == seq) &&
7694 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
7695 if (*last_frag == frag)
7696 goto drop;
7697 if (*last_frag + 1 != frag)
7698 /* out-of-order fragment */
7699 goto drop;
7700 } else
7701 *last_seq = seq;
7703 *last_frag = frag;
7704 *last_time = jiffies;
7705 return 0;
7707 drop:
7708 /* Comment this line now since we observed the card receives
7709 * duplicate packets but the FCTL_RETRY bit is not set in the
7710 * IBSS mode with fragmentation enabled.
7711 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
7712 return 1;
7715 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
7716 struct ipw_rx_mem_buffer *rxb,
7717 struct ieee80211_rx_stats *stats)
7719 struct sk_buff *skb = rxb->skb;
7720 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
7721 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
7722 (skb->data + IPW_RX_FRAME_SIZE);
7724 ieee80211_rx_mgt(priv->ieee, header, stats);
7726 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
7727 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
7728 IEEE80211_STYPE_PROBE_RESP) ||
7729 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
7730 IEEE80211_STYPE_BEACON))) {
7731 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
7732 ipw_add_station(priv, header->addr2);
7735 if (priv->config & CFG_NET_STATS) {
7736 IPW_DEBUG_HC("sending stat packet\n");
7738 /* Set the size of the skb to the size of the full
7739 * ipw header and 802.11 frame */
7740 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
7741 IPW_RX_FRAME_SIZE);
7743 /* Advance past the ipw packet header to the 802.11 frame */
7744 skb_pull(skb, IPW_RX_FRAME_SIZE);
7746 /* Push the ieee80211_rx_stats before the 802.11 frame */
7747 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
7749 skb->dev = priv->ieee->dev;
7751 /* Point raw at the ieee80211_stats */
7752 skb->mac.raw = skb->data;
7754 skb->pkt_type = PACKET_OTHERHOST;
7755 skb->protocol = __constant_htons(ETH_P_80211_STATS);
7756 memset(skb->cb, 0, sizeof(rxb->skb->cb));
7757 netif_rx(skb);
7758 rxb->skb = NULL;
7763 * Main entry function for recieving a packet with 80211 headers. This
7764 * should be called when ever the FW has notified us that there is a new
7765 * skb in the recieve queue.
7767 static void ipw_rx(struct ipw_priv *priv)
7769 struct ipw_rx_mem_buffer *rxb;
7770 struct ipw_rx_packet *pkt;
7771 struct ieee80211_hdr_4addr *header;
7772 u32 r, w, i;
7773 u8 network_packet;
7775 r = ipw_read32(priv, IPW_RX_READ_INDEX);
7776 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
7777 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
7779 while (i != r) {
7780 rxb = priv->rxq->queue[i];
7781 if (unlikely(rxb == NULL)) {
7782 printk(KERN_CRIT "Queue not allocated!\n");
7783 break;
7785 priv->rxq->queue[i] = NULL;
7787 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
7788 IPW_RX_BUF_SIZE,
7789 PCI_DMA_FROMDEVICE);
7791 pkt = (struct ipw_rx_packet *)rxb->skb->data;
7792 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
7793 pkt->header.message_type,
7794 pkt->header.rx_seq_num, pkt->header.control_bits);
7796 switch (pkt->header.message_type) {
7797 case RX_FRAME_TYPE: /* 802.11 frame */ {
7798 struct ieee80211_rx_stats stats = {
7799 .rssi =
7800 le16_to_cpu(pkt->u.frame.rssi_dbm) -
7801 IPW_RSSI_TO_DBM,
7802 .signal =
7803 le16_to_cpu(pkt->u.frame.rssi_dbm) -
7804 IPW_RSSI_TO_DBM + 0x100,
7805 .noise =
7806 le16_to_cpu(pkt->u.frame.noise),
7807 .rate = pkt->u.frame.rate,
7808 .mac_time = jiffies,
7809 .received_channel =
7810 pkt->u.frame.received_channel,
7811 .freq =
7812 (pkt->u.frame.
7813 control & (1 << 0)) ?
7814 IEEE80211_24GHZ_BAND :
7815 IEEE80211_52GHZ_BAND,
7816 .len = le16_to_cpu(pkt->u.frame.length),
7819 if (stats.rssi != 0)
7820 stats.mask |= IEEE80211_STATMASK_RSSI;
7821 if (stats.signal != 0)
7822 stats.mask |= IEEE80211_STATMASK_SIGNAL;
7823 if (stats.noise != 0)
7824 stats.mask |= IEEE80211_STATMASK_NOISE;
7825 if (stats.rate != 0)
7826 stats.mask |= IEEE80211_STATMASK_RATE;
7828 priv->rx_packets++;
7830 #ifdef CONFIG_IPW2200_MONITOR
7831 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7832 #ifdef CONFIG_IEEE80211_RADIOTAP
7833 ipw_handle_data_packet_monitor(priv,
7834 rxb,
7835 &stats);
7836 #else
7837 ipw_handle_data_packet(priv, rxb,
7838 &stats);
7839 #endif
7840 break;
7842 #endif
7844 header =
7845 (struct ieee80211_hdr_4addr *)(rxb->skb->
7846 data +
7847 IPW_RX_FRAME_SIZE);
7848 /* TODO: Check Ad-Hoc dest/source and make sure
7849 * that we are actually parsing these packets
7850 * correctly -- we should probably use the
7851 * frame control of the packet and disregard
7852 * the current iw_mode */
7854 network_packet =
7855 is_network_packet(priv, header);
7856 if (network_packet && priv->assoc_network) {
7857 priv->assoc_network->stats.rssi =
7858 stats.rssi;
7859 priv->exp_avg_rssi =
7860 exponential_average(priv->exp_avg_rssi,
7861 stats.rssi, DEPTH_RSSI);
7864 IPW_DEBUG_RX("Frame: len=%u\n",
7865 le16_to_cpu(pkt->u.frame.length));
7867 if (le16_to_cpu(pkt->u.frame.length) <
7868 ieee80211_get_hdrlen(le16_to_cpu(
7869 header->frame_ctl))) {
7870 IPW_DEBUG_DROP
7871 ("Received packet is too small. "
7872 "Dropping.\n");
7873 priv->ieee->stats.rx_errors++;
7874 priv->wstats.discard.misc++;
7875 break;
7878 switch (WLAN_FC_GET_TYPE
7879 (le16_to_cpu(header->frame_ctl))) {
7881 case IEEE80211_FTYPE_MGMT:
7882 ipw_handle_mgmt_packet(priv, rxb,
7883 &stats);
7884 break;
7886 case IEEE80211_FTYPE_CTL:
7887 break;
7889 case IEEE80211_FTYPE_DATA:
7890 if (unlikely(!network_packet ||
7891 is_duplicate_packet(priv,
7892 header)))
7894 IPW_DEBUG_DROP("Dropping: "
7895 MAC_FMT ", "
7896 MAC_FMT ", "
7897 MAC_FMT "\n",
7898 MAC_ARG(header->
7899 addr1),
7900 MAC_ARG(header->
7901 addr2),
7902 MAC_ARG(header->
7903 addr3));
7904 break;
7907 ipw_handle_data_packet(priv, rxb,
7908 &stats);
7910 break;
7912 break;
7915 case RX_HOST_NOTIFICATION_TYPE:{
7916 IPW_DEBUG_RX
7917 ("Notification: subtype=%02X flags=%02X size=%d\n",
7918 pkt->u.notification.subtype,
7919 pkt->u.notification.flags,
7920 pkt->u.notification.size);
7921 ipw_rx_notification(priv, &pkt->u.notification);
7922 break;
7925 default:
7926 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
7927 pkt->header.message_type);
7928 break;
7931 /* For now we just don't re-use anything. We can tweak this
7932 * later to try and re-use notification packets and SKBs that
7933 * fail to Rx correctly */
7934 if (rxb->skb != NULL) {
7935 dev_kfree_skb_any(rxb->skb);
7936 rxb->skb = NULL;
7939 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
7940 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
7941 list_add_tail(&rxb->list, &priv->rxq->rx_used);
7943 i = (i + 1) % RX_QUEUE_SIZE;
7946 /* Backtrack one entry */
7947 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
7949 ipw_rx_queue_restock(priv);
7952 #define DEFAULT_RTS_THRESHOLD 2304U
7953 #define MIN_RTS_THRESHOLD 1U
7954 #define MAX_RTS_THRESHOLD 2304U
7955 #define DEFAULT_BEACON_INTERVAL 100U
7956 #define DEFAULT_SHORT_RETRY_LIMIT 7U
7957 #define DEFAULT_LONG_RETRY_LIMIT 4U
7960 * ipw_sw_reset
7961 * @option: options to control different reset behaviour
7962 * 0 = reset everything except the 'disable' module_param
7963 * 1 = reset everything and print out driver info (for probe only)
7964 * 2 = reset everything
7966 static int ipw_sw_reset(struct ipw_priv *priv, int option)
7968 int band, modulation;
7969 int old_mode = priv->ieee->iw_mode;
7971 /* Initialize module parameter values here */
7972 priv->config = 0;
7974 /* We default to disabling the LED code as right now it causes
7975 * too many systems to lock up... */
7976 if (!led)
7977 priv->config |= CFG_NO_LED;
7979 if (associate)
7980 priv->config |= CFG_ASSOCIATE;
7981 else
7982 IPW_DEBUG_INFO("Auto associate disabled.\n");
7984 if (auto_create)
7985 priv->config |= CFG_ADHOC_CREATE;
7986 else
7987 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
7989 priv->config &= ~CFG_STATIC_ESSID;
7990 priv->essid_len = 0;
7991 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7993 if (disable && option) {
7994 priv->status |= STATUS_RF_KILL_SW;
7995 IPW_DEBUG_INFO("Radio disabled.\n");
7998 if (channel != 0) {
7999 priv->config |= CFG_STATIC_CHANNEL;
8000 priv->channel = channel;
8001 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8002 /* TODO: Validate that provided channel is in range */
8004 #ifdef CONFIG_IPW_QOS
8005 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8006 burst_duration_CCK, burst_duration_OFDM);
8007 #endif /* CONFIG_IPW_QOS */
8009 switch (mode) {
8010 case 1:
8011 priv->ieee->iw_mode = IW_MODE_ADHOC;
8012 priv->net_dev->type = ARPHRD_ETHER;
8014 break;
8015 #ifdef CONFIG_IPW2200_MONITOR
8016 case 2:
8017 priv->ieee->iw_mode = IW_MODE_MONITOR;
8018 #ifdef CONFIG_IEEE80211_RADIOTAP
8019 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8020 #else
8021 priv->net_dev->type = ARPHRD_IEEE80211;
8022 #endif
8023 break;
8024 #endif
8025 default:
8026 case 0:
8027 priv->net_dev->type = ARPHRD_ETHER;
8028 priv->ieee->iw_mode = IW_MODE_INFRA;
8029 break;
8032 if (hwcrypto) {
8033 priv->ieee->host_encrypt = 0;
8034 priv->ieee->host_encrypt_msdu = 0;
8035 priv->ieee->host_decrypt = 0;
8036 priv->ieee->host_mc_decrypt = 0;
8038 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8040 /* IPW2200/2915 is abled to do hardware fragmentation. */
8041 priv->ieee->host_open_frag = 0;
8043 if ((priv->pci_dev->device == 0x4223) ||
8044 (priv->pci_dev->device == 0x4224)) {
8045 if (option == 1)
8046 printk(KERN_INFO DRV_NAME
8047 ": Detected Intel PRO/Wireless 2915ABG Network "
8048 "Connection\n");
8049 priv->ieee->abg_true = 1;
8050 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8051 modulation = IEEE80211_OFDM_MODULATION |
8052 IEEE80211_CCK_MODULATION;
8053 priv->adapter = IPW_2915ABG;
8054 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8055 } else {
8056 if (option == 1)
8057 printk(KERN_INFO DRV_NAME
8058 ": Detected Intel PRO/Wireless 2200BG Network "
8059 "Connection\n");
8061 priv->ieee->abg_true = 0;
8062 band = IEEE80211_24GHZ_BAND;
8063 modulation = IEEE80211_OFDM_MODULATION |
8064 IEEE80211_CCK_MODULATION;
8065 priv->adapter = IPW_2200BG;
8066 priv->ieee->mode = IEEE_G | IEEE_B;
8069 priv->ieee->freq_band = band;
8070 priv->ieee->modulation = modulation;
8072 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8074 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8075 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8077 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8078 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8079 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8081 /* If power management is turned on, default to AC mode */
8082 priv->power_mode = IPW_POWER_AC;
8083 priv->tx_power = IPW_TX_POWER_DEFAULT;
8085 return old_mode == priv->ieee->iw_mode;
8089 * This file defines the Wireless Extension handlers. It does not
8090 * define any methods of hardware manipulation and relies on the
8091 * functions defined in ipw_main to provide the HW interaction.
8093 * The exception to this is the use of the ipw_get_ordinal()
8094 * function used to poll the hardware vs. making unecessary calls.
8098 static int ipw_wx_get_name(struct net_device *dev,
8099 struct iw_request_info *info,
8100 union iwreq_data *wrqu, char *extra)
8102 struct ipw_priv *priv = ieee80211_priv(dev);
8103 mutex_lock(&priv->mutex);
8104 if (priv->status & STATUS_RF_KILL_MASK)
8105 strcpy(wrqu->name, "radio off");
8106 else if (!(priv->status & STATUS_ASSOCIATED))
8107 strcpy(wrqu->name, "unassociated");
8108 else
8109 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8110 ipw_modes[priv->assoc_request.ieee_mode]);
8111 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8112 mutex_unlock(&priv->mutex);
8113 return 0;
8116 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8118 if (channel == 0) {
8119 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8120 priv->config &= ~CFG_STATIC_CHANNEL;
8121 IPW_DEBUG_ASSOC("Attempting to associate with new "
8122 "parameters.\n");
8123 ipw_associate(priv);
8124 return 0;
8127 priv->config |= CFG_STATIC_CHANNEL;
8129 if (priv->channel == channel) {
8130 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8131 channel);
8132 return 0;
8135 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8136 priv->channel = channel;
8138 #ifdef CONFIG_IPW2200_MONITOR
8139 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8140 int i;
8141 if (priv->status & STATUS_SCANNING) {
8142 IPW_DEBUG_SCAN("Scan abort triggered due to "
8143 "channel change.\n");
8144 ipw_abort_scan(priv);
8147 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8148 udelay(10);
8150 if (priv->status & STATUS_SCANNING)
8151 IPW_DEBUG_SCAN("Still scanning...\n");
8152 else
8153 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8154 1000 - i);
8156 return 0;
8158 #endif /* CONFIG_IPW2200_MONITOR */
8160 /* Network configuration changed -- force [re]association */
8161 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8162 if (!ipw_disassociate(priv))
8163 ipw_associate(priv);
8165 return 0;
8168 static int ipw_wx_set_freq(struct net_device *dev,
8169 struct iw_request_info *info,
8170 union iwreq_data *wrqu, char *extra)
8172 struct ipw_priv *priv = ieee80211_priv(dev);
8173 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8174 struct iw_freq *fwrq = &wrqu->freq;
8175 int ret = 0, i;
8176 u8 channel, flags;
8177 int band;
8179 if (fwrq->m == 0) {
8180 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8181 mutex_lock(&priv->mutex);
8182 ret = ipw_set_channel(priv, 0);
8183 mutex_unlock(&priv->mutex);
8184 return ret;
8186 /* if setting by freq convert to channel */
8187 if (fwrq->e == 1) {
8188 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8189 if (channel == 0)
8190 return -EINVAL;
8191 } else
8192 channel = fwrq->m;
8194 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8195 return -EINVAL;
8197 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8198 i = ieee80211_channel_to_index(priv->ieee, channel);
8199 if (i == -1)
8200 return -EINVAL;
8202 flags = (band == IEEE80211_24GHZ_BAND) ?
8203 geo->bg[i].flags : geo->a[i].flags;
8204 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8205 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8206 return -EINVAL;
8210 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8211 mutex_lock(&priv->mutex);
8212 ret = ipw_set_channel(priv, channel);
8213 mutex_unlock(&priv->mutex);
8214 return ret;
8217 static int ipw_wx_get_freq(struct net_device *dev,
8218 struct iw_request_info *info,
8219 union iwreq_data *wrqu, char *extra)
8221 struct ipw_priv *priv = ieee80211_priv(dev);
8223 wrqu->freq.e = 0;
8225 /* If we are associated, trying to associate, or have a statically
8226 * configured CHANNEL then return that; otherwise return ANY */
8227 mutex_lock(&priv->mutex);
8228 if (priv->config & CFG_STATIC_CHANNEL ||
8229 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
8230 wrqu->freq.m = priv->channel;
8231 else
8232 wrqu->freq.m = 0;
8234 mutex_unlock(&priv->mutex);
8235 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8236 return 0;
8239 static int ipw_wx_set_mode(struct net_device *dev,
8240 struct iw_request_info *info,
8241 union iwreq_data *wrqu, char *extra)
8243 struct ipw_priv *priv = ieee80211_priv(dev);
8244 int err = 0;
8246 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8248 switch (wrqu->mode) {
8249 #ifdef CONFIG_IPW2200_MONITOR
8250 case IW_MODE_MONITOR:
8251 #endif
8252 case IW_MODE_ADHOC:
8253 case IW_MODE_INFRA:
8254 break;
8255 case IW_MODE_AUTO:
8256 wrqu->mode = IW_MODE_INFRA;
8257 break;
8258 default:
8259 return -EINVAL;
8261 if (wrqu->mode == priv->ieee->iw_mode)
8262 return 0;
8264 mutex_lock(&priv->mutex);
8266 ipw_sw_reset(priv, 0);
8268 #ifdef CONFIG_IPW2200_MONITOR
8269 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8270 priv->net_dev->type = ARPHRD_ETHER;
8272 if (wrqu->mode == IW_MODE_MONITOR)
8273 #ifdef CONFIG_IEEE80211_RADIOTAP
8274 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8275 #else
8276 priv->net_dev->type = ARPHRD_IEEE80211;
8277 #endif
8278 #endif /* CONFIG_IPW2200_MONITOR */
8280 /* Free the existing firmware and reset the fw_loaded
8281 * flag so ipw_load() will bring in the new firmawre */
8282 free_firmware();
8284 priv->ieee->iw_mode = wrqu->mode;
8286 queue_work(priv->workqueue, &priv->adapter_restart);
8287 mutex_unlock(&priv->mutex);
8288 return err;
8291 static int ipw_wx_get_mode(struct net_device *dev,
8292 struct iw_request_info *info,
8293 union iwreq_data *wrqu, char *extra)
8295 struct ipw_priv *priv = ieee80211_priv(dev);
8296 mutex_lock(&priv->mutex);
8297 wrqu->mode = priv->ieee->iw_mode;
8298 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8299 mutex_unlock(&priv->mutex);
8300 return 0;
8303 /* Values are in microsecond */
8304 static const s32 timeout_duration[] = {
8305 350000,
8306 250000,
8307 75000,
8308 37000,
8309 25000,
8312 static const s32 period_duration[] = {
8313 400000,
8314 700000,
8315 1000000,
8316 1000000,
8317 1000000
8320 static int ipw_wx_get_range(struct net_device *dev,
8321 struct iw_request_info *info,
8322 union iwreq_data *wrqu, char *extra)
8324 struct ipw_priv *priv = ieee80211_priv(dev);
8325 struct iw_range *range = (struct iw_range *)extra;
8326 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8327 int i = 0, j;
8329 wrqu->data.length = sizeof(*range);
8330 memset(range, 0, sizeof(*range));
8332 /* 54Mbs == ~27 Mb/s real (802.11g) */
8333 range->throughput = 27 * 1000 * 1000;
8335 range->max_qual.qual = 100;
8336 /* TODO: Find real max RSSI and stick here */
8337 range->max_qual.level = 0;
8338 range->max_qual.noise = 0;
8339 range->max_qual.updated = 7; /* Updated all three */
8341 range->avg_qual.qual = 70;
8342 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8343 range->avg_qual.level = 0; /* FIXME to real average level */
8344 range->avg_qual.noise = 0;
8345 range->avg_qual.updated = 7; /* Updated all three */
8346 mutex_lock(&priv->mutex);
8347 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8349 for (i = 0; i < range->num_bitrates; i++)
8350 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8351 500000;
8353 range->max_rts = DEFAULT_RTS_THRESHOLD;
8354 range->min_frag = MIN_FRAG_THRESHOLD;
8355 range->max_frag = MAX_FRAG_THRESHOLD;
8357 range->encoding_size[0] = 5;
8358 range->encoding_size[1] = 13;
8359 range->num_encoding_sizes = 2;
8360 range->max_encoding_tokens = WEP_KEYS;
8362 /* Set the Wireless Extension versions */
8363 range->we_version_compiled = WIRELESS_EXT;
8364 range->we_version_source = 18;
8366 i = 0;
8367 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8368 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8369 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8370 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8371 continue;
8373 range->freq[i].i = geo->bg[j].channel;
8374 range->freq[i].m = geo->bg[j].freq * 100000;
8375 range->freq[i].e = 1;
8376 i++;
8380 if (priv->ieee->mode & IEEE_A) {
8381 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8382 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8383 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8384 continue;
8386 range->freq[i].i = geo->a[j].channel;
8387 range->freq[i].m = geo->a[j].freq * 100000;
8388 range->freq[i].e = 1;
8389 i++;
8393 range->num_channels = i;
8394 range->num_frequency = i;
8396 mutex_unlock(&priv->mutex);
8398 /* Event capability (kernel + driver) */
8399 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8400 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8401 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8402 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8403 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8405 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8406 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8408 IPW_DEBUG_WX("GET Range\n");
8409 return 0;
8412 static int ipw_wx_set_wap(struct net_device *dev,
8413 struct iw_request_info *info,
8414 union iwreq_data *wrqu, char *extra)
8416 struct ipw_priv *priv = ieee80211_priv(dev);
8418 static const unsigned char any[] = {
8419 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8421 static const unsigned char off[] = {
8422 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8425 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8426 return -EINVAL;
8427 mutex_lock(&priv->mutex);
8428 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8429 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8430 /* we disable mandatory BSSID association */
8431 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8432 priv->config &= ~CFG_STATIC_BSSID;
8433 IPW_DEBUG_ASSOC("Attempting to associate with new "
8434 "parameters.\n");
8435 ipw_associate(priv);
8436 mutex_unlock(&priv->mutex);
8437 return 0;
8440 priv->config |= CFG_STATIC_BSSID;
8441 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8442 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8443 mutex_unlock(&priv->mutex);
8444 return 0;
8447 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
8448 MAC_ARG(wrqu->ap_addr.sa_data));
8450 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8452 /* Network configuration changed -- force [re]association */
8453 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8454 if (!ipw_disassociate(priv))
8455 ipw_associate(priv);
8457 mutex_unlock(&priv->mutex);
8458 return 0;
8461 static int ipw_wx_get_wap(struct net_device *dev,
8462 struct iw_request_info *info,
8463 union iwreq_data *wrqu, char *extra)
8465 struct ipw_priv *priv = ieee80211_priv(dev);
8466 /* If we are associated, trying to associate, or have a statically
8467 * configured BSSID then return that; otherwise return ANY */
8468 mutex_lock(&priv->mutex);
8469 if (priv->config & CFG_STATIC_BSSID ||
8470 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8471 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8472 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8473 } else
8474 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8476 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8477 MAC_ARG(wrqu->ap_addr.sa_data));
8478 mutex_unlock(&priv->mutex);
8479 return 0;
8482 static int ipw_wx_set_essid(struct net_device *dev,
8483 struct iw_request_info *info,
8484 union iwreq_data *wrqu, char *extra)
8486 struct ipw_priv *priv = ieee80211_priv(dev);
8487 char *essid = ""; /* ANY */
8488 int length = 0;
8489 mutex_lock(&priv->mutex);
8490 if (wrqu->essid.flags && wrqu->essid.length) {
8491 length = wrqu->essid.length - 1;
8492 essid = extra;
8494 if (length == 0) {
8495 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8496 if ((priv->config & CFG_STATIC_ESSID) &&
8497 !(priv->status & (STATUS_ASSOCIATED |
8498 STATUS_ASSOCIATING))) {
8499 IPW_DEBUG_ASSOC("Attempting to associate with new "
8500 "parameters.\n");
8501 priv->config &= ~CFG_STATIC_ESSID;
8502 ipw_associate(priv);
8504 mutex_unlock(&priv->mutex);
8505 return 0;
8508 length = min(length, IW_ESSID_MAX_SIZE);
8510 priv->config |= CFG_STATIC_ESSID;
8512 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
8513 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8514 mutex_unlock(&priv->mutex);
8515 return 0;
8518 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
8519 length);
8521 priv->essid_len = length;
8522 memcpy(priv->essid, essid, priv->essid_len);
8524 /* Network configuration changed -- force [re]association */
8525 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8526 if (!ipw_disassociate(priv))
8527 ipw_associate(priv);
8529 mutex_unlock(&priv->mutex);
8530 return 0;
8533 static int ipw_wx_get_essid(struct net_device *dev,
8534 struct iw_request_info *info,
8535 union iwreq_data *wrqu, char *extra)
8537 struct ipw_priv *priv = ieee80211_priv(dev);
8539 /* If we are associated, trying to associate, or have a statically
8540 * configured ESSID then return that; otherwise return ANY */
8541 mutex_lock(&priv->mutex);
8542 if (priv->config & CFG_STATIC_ESSID ||
8543 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8544 IPW_DEBUG_WX("Getting essid: '%s'\n",
8545 escape_essid(priv->essid, priv->essid_len));
8546 memcpy(extra, priv->essid, priv->essid_len);
8547 wrqu->essid.length = priv->essid_len;
8548 wrqu->essid.flags = 1; /* active */
8549 } else {
8550 IPW_DEBUG_WX("Getting essid: ANY\n");
8551 wrqu->essid.length = 0;
8552 wrqu->essid.flags = 0; /* active */
8554 mutex_unlock(&priv->mutex);
8555 return 0;
8558 static int ipw_wx_set_nick(struct net_device *dev,
8559 struct iw_request_info *info,
8560 union iwreq_data *wrqu, char *extra)
8562 struct ipw_priv *priv = ieee80211_priv(dev);
8564 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
8565 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
8566 return -E2BIG;
8567 mutex_lock(&priv->mutex);
8568 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
8569 memset(priv->nick, 0, sizeof(priv->nick));
8570 memcpy(priv->nick, extra, wrqu->data.length);
8571 IPW_DEBUG_TRACE("<<\n");
8572 mutex_unlock(&priv->mutex);
8573 return 0;
8577 static int ipw_wx_get_nick(struct net_device *dev,
8578 struct iw_request_info *info,
8579 union iwreq_data *wrqu, char *extra)
8581 struct ipw_priv *priv = ieee80211_priv(dev);
8582 IPW_DEBUG_WX("Getting nick\n");
8583 mutex_lock(&priv->mutex);
8584 wrqu->data.length = strlen(priv->nick) + 1;
8585 memcpy(extra, priv->nick, wrqu->data.length);
8586 wrqu->data.flags = 1; /* active */
8587 mutex_unlock(&priv->mutex);
8588 return 0;
8591 static int ipw_wx_set_sens(struct net_device *dev,
8592 struct iw_request_info *info,
8593 union iwreq_data *wrqu, char *extra)
8595 struct ipw_priv *priv = ieee80211_priv(dev);
8596 int err = 0;
8598 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
8599 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
8600 mutex_lock(&priv->mutex);
8602 if (wrqu->sens.fixed == 0)
8604 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8605 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8606 goto out;
8608 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
8609 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
8610 err = -EINVAL;
8611 goto out;
8614 priv->roaming_threshold = wrqu->sens.value;
8615 priv->disassociate_threshold = 3*wrqu->sens.value;
8616 out:
8617 mutex_unlock(&priv->mutex);
8618 return err;
8621 static int ipw_wx_get_sens(struct net_device *dev,
8622 struct iw_request_info *info,
8623 union iwreq_data *wrqu, char *extra)
8625 struct ipw_priv *priv = ieee80211_priv(dev);
8626 mutex_lock(&priv->mutex);
8627 wrqu->sens.fixed = 1;
8628 wrqu->sens.value = priv->roaming_threshold;
8629 mutex_unlock(&priv->mutex);
8631 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
8632 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
8634 return 0;
8637 static int ipw_wx_set_rate(struct net_device *dev,
8638 struct iw_request_info *info,
8639 union iwreq_data *wrqu, char *extra)
8641 /* TODO: We should use semaphores or locks for access to priv */
8642 struct ipw_priv *priv = ieee80211_priv(dev);
8643 u32 target_rate = wrqu->bitrate.value;
8644 u32 fixed, mask;
8646 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
8647 /* value = X, fixed = 1 means only rate X */
8648 /* value = X, fixed = 0 means all rates lower equal X */
8650 if (target_rate == -1) {
8651 fixed = 0;
8652 mask = IEEE80211_DEFAULT_RATES_MASK;
8653 /* Now we should reassociate */
8654 goto apply;
8657 mask = 0;
8658 fixed = wrqu->bitrate.fixed;
8660 if (target_rate == 1000000 || !fixed)
8661 mask |= IEEE80211_CCK_RATE_1MB_MASK;
8662 if (target_rate == 1000000)
8663 goto apply;
8665 if (target_rate == 2000000 || !fixed)
8666 mask |= IEEE80211_CCK_RATE_2MB_MASK;
8667 if (target_rate == 2000000)
8668 goto apply;
8670 if (target_rate == 5500000 || !fixed)
8671 mask |= IEEE80211_CCK_RATE_5MB_MASK;
8672 if (target_rate == 5500000)
8673 goto apply;
8675 if (target_rate == 6000000 || !fixed)
8676 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
8677 if (target_rate == 6000000)
8678 goto apply;
8680 if (target_rate == 9000000 || !fixed)
8681 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
8682 if (target_rate == 9000000)
8683 goto apply;
8685 if (target_rate == 11000000 || !fixed)
8686 mask |= IEEE80211_CCK_RATE_11MB_MASK;
8687 if (target_rate == 11000000)
8688 goto apply;
8690 if (target_rate == 12000000 || !fixed)
8691 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
8692 if (target_rate == 12000000)
8693 goto apply;
8695 if (target_rate == 18000000 || !fixed)
8696 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
8697 if (target_rate == 18000000)
8698 goto apply;
8700 if (target_rate == 24000000 || !fixed)
8701 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
8702 if (target_rate == 24000000)
8703 goto apply;
8705 if (target_rate == 36000000 || !fixed)
8706 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
8707 if (target_rate == 36000000)
8708 goto apply;
8710 if (target_rate == 48000000 || !fixed)
8711 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
8712 if (target_rate == 48000000)
8713 goto apply;
8715 if (target_rate == 54000000 || !fixed)
8716 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
8717 if (target_rate == 54000000)
8718 goto apply;
8720 IPW_DEBUG_WX("invalid rate specified, returning error\n");
8721 return -EINVAL;
8723 apply:
8724 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
8725 mask, fixed ? "fixed" : "sub-rates");
8726 mutex_lock(&priv->mutex);
8727 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
8728 priv->config &= ~CFG_FIXED_RATE;
8729 ipw_set_fixed_rate(priv, priv->ieee->mode);
8730 } else
8731 priv->config |= CFG_FIXED_RATE;
8733 if (priv->rates_mask == mask) {
8734 IPW_DEBUG_WX("Mask set to current mask.\n");
8735 mutex_unlock(&priv->mutex);
8736 return 0;
8739 priv->rates_mask = mask;
8741 /* Network configuration changed -- force [re]association */
8742 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
8743 if (!ipw_disassociate(priv))
8744 ipw_associate(priv);
8746 mutex_unlock(&priv->mutex);
8747 return 0;
8750 static int ipw_wx_get_rate(struct net_device *dev,
8751 struct iw_request_info *info,
8752 union iwreq_data *wrqu, char *extra)
8754 struct ipw_priv *priv = ieee80211_priv(dev);
8755 mutex_lock(&priv->mutex);
8756 wrqu->bitrate.value = priv->last_rate;
8757 mutex_unlock(&priv->mutex);
8758 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
8759 return 0;
8762 static int ipw_wx_set_rts(struct net_device *dev,
8763 struct iw_request_info *info,
8764 union iwreq_data *wrqu, char *extra)
8766 struct ipw_priv *priv = ieee80211_priv(dev);
8767 mutex_lock(&priv->mutex);
8768 if (wrqu->rts.disabled)
8769 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8770 else {
8771 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
8772 wrqu->rts.value > MAX_RTS_THRESHOLD) {
8773 mutex_unlock(&priv->mutex);
8774 return -EINVAL;
8776 priv->rts_threshold = wrqu->rts.value;
8779 ipw_send_rts_threshold(priv, priv->rts_threshold);
8780 mutex_unlock(&priv->mutex);
8781 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
8782 return 0;
8785 static int ipw_wx_get_rts(struct net_device *dev,
8786 struct iw_request_info *info,
8787 union iwreq_data *wrqu, char *extra)
8789 struct ipw_priv *priv = ieee80211_priv(dev);
8790 mutex_lock(&priv->mutex);
8791 wrqu->rts.value = priv->rts_threshold;
8792 wrqu->rts.fixed = 0; /* no auto select */
8793 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
8794 mutex_unlock(&priv->mutex);
8795 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
8796 return 0;
8799 static int ipw_wx_set_txpow(struct net_device *dev,
8800 struct iw_request_info *info,
8801 union iwreq_data *wrqu, char *extra)
8803 struct ipw_priv *priv = ieee80211_priv(dev);
8804 int err = 0;
8806 mutex_lock(&priv->mutex);
8807 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
8808 err = -EINPROGRESS;
8809 goto out;
8812 if (!wrqu->power.fixed)
8813 wrqu->power.value = IPW_TX_POWER_DEFAULT;
8815 if (wrqu->power.flags != IW_TXPOW_DBM) {
8816 err = -EINVAL;
8817 goto out;
8820 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
8821 (wrqu->power.value < IPW_TX_POWER_MIN)) {
8822 err = -EINVAL;
8823 goto out;
8826 priv->tx_power = wrqu->power.value;
8827 err = ipw_set_tx_power(priv);
8828 out:
8829 mutex_unlock(&priv->mutex);
8830 return err;
8833 static int ipw_wx_get_txpow(struct net_device *dev,
8834 struct iw_request_info *info,
8835 union iwreq_data *wrqu, char *extra)
8837 struct ipw_priv *priv = ieee80211_priv(dev);
8838 mutex_lock(&priv->mutex);
8839 wrqu->power.value = priv->tx_power;
8840 wrqu->power.fixed = 1;
8841 wrqu->power.flags = IW_TXPOW_DBM;
8842 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
8843 mutex_unlock(&priv->mutex);
8845 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
8846 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
8848 return 0;
8851 static int ipw_wx_set_frag(struct net_device *dev,
8852 struct iw_request_info *info,
8853 union iwreq_data *wrqu, char *extra)
8855 struct ipw_priv *priv = ieee80211_priv(dev);
8856 mutex_lock(&priv->mutex);
8857 if (wrqu->frag.disabled)
8858 priv->ieee->fts = DEFAULT_FTS;
8859 else {
8860 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
8861 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
8862 mutex_unlock(&priv->mutex);
8863 return -EINVAL;
8866 priv->ieee->fts = wrqu->frag.value & ~0x1;
8869 ipw_send_frag_threshold(priv, wrqu->frag.value);
8870 mutex_unlock(&priv->mutex);
8871 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
8872 return 0;
8875 static int ipw_wx_get_frag(struct net_device *dev,
8876 struct iw_request_info *info,
8877 union iwreq_data *wrqu, char *extra)
8879 struct ipw_priv *priv = ieee80211_priv(dev);
8880 mutex_lock(&priv->mutex);
8881 wrqu->frag.value = priv->ieee->fts;
8882 wrqu->frag.fixed = 0; /* no auto select */
8883 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
8884 mutex_unlock(&priv->mutex);
8885 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
8887 return 0;
8890 static int ipw_wx_set_retry(struct net_device *dev,
8891 struct iw_request_info *info,
8892 union iwreq_data *wrqu, char *extra)
8894 struct ipw_priv *priv = ieee80211_priv(dev);
8896 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
8897 return -EINVAL;
8899 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
8900 return 0;
8902 if (wrqu->retry.value < 0 || wrqu->retry.value > 255)
8903 return -EINVAL;
8905 mutex_lock(&priv->mutex);
8906 if (wrqu->retry.flags & IW_RETRY_MIN)
8907 priv->short_retry_limit = (u8) wrqu->retry.value;
8908 else if (wrqu->retry.flags & IW_RETRY_MAX)
8909 priv->long_retry_limit = (u8) wrqu->retry.value;
8910 else {
8911 priv->short_retry_limit = (u8) wrqu->retry.value;
8912 priv->long_retry_limit = (u8) wrqu->retry.value;
8915 ipw_send_retry_limit(priv, priv->short_retry_limit,
8916 priv->long_retry_limit);
8917 mutex_unlock(&priv->mutex);
8918 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
8919 priv->short_retry_limit, priv->long_retry_limit);
8920 return 0;
8923 static int ipw_wx_get_retry(struct net_device *dev,
8924 struct iw_request_info *info,
8925 union iwreq_data *wrqu, char *extra)
8927 struct ipw_priv *priv = ieee80211_priv(dev);
8929 mutex_lock(&priv->mutex);
8930 wrqu->retry.disabled = 0;
8932 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
8933 mutex_unlock(&priv->mutex);
8934 return -EINVAL;
8937 if (wrqu->retry.flags & IW_RETRY_MAX) {
8938 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
8939 wrqu->retry.value = priv->long_retry_limit;
8940 } else if (wrqu->retry.flags & IW_RETRY_MIN) {
8941 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
8942 wrqu->retry.value = priv->short_retry_limit;
8943 } else {
8944 wrqu->retry.flags = IW_RETRY_LIMIT;
8945 wrqu->retry.value = priv->short_retry_limit;
8947 mutex_unlock(&priv->mutex);
8949 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
8951 return 0;
8954 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
8955 int essid_len)
8957 struct ipw_scan_request_ext scan;
8958 int err = 0, scan_type;
8960 if (!(priv->status & STATUS_INIT) ||
8961 (priv->status & STATUS_EXIT_PENDING))
8962 return 0;
8964 mutex_lock(&priv->mutex);
8966 if (priv->status & STATUS_RF_KILL_MASK) {
8967 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
8968 priv->status |= STATUS_SCAN_PENDING;
8969 goto done;
8972 IPW_DEBUG_HC("starting request direct scan!\n");
8974 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
8975 /* We should not sleep here; otherwise we will block most
8976 * of the system (for instance, we hold rtnl_lock when we
8977 * get here).
8979 err = -EAGAIN;
8980 goto done;
8982 memset(&scan, 0, sizeof(scan));
8984 if (priv->config & CFG_SPEED_SCAN)
8985 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
8986 cpu_to_le16(30);
8987 else
8988 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
8989 cpu_to_le16(20);
8991 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
8992 cpu_to_le16(20);
8993 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
8994 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
8996 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
8998 err = ipw_send_ssid(priv, essid, essid_len);
8999 if (err) {
9000 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
9001 goto done;
9003 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
9005 ipw_add_scan_channels(priv, &scan, scan_type);
9007 err = ipw_send_scan_request_ext(priv, &scan);
9008 if (err) {
9009 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
9010 goto done;
9013 priv->status |= STATUS_SCANNING;
9015 done:
9016 mutex_unlock(&priv->mutex);
9017 return err;
9020 static int ipw_wx_set_scan(struct net_device *dev,
9021 struct iw_request_info *info,
9022 union iwreq_data *wrqu, char *extra)
9024 struct ipw_priv *priv = ieee80211_priv(dev);
9025 struct iw_scan_req *req = NULL;
9026 if (wrqu->data.length
9027 && wrqu->data.length == sizeof(struct iw_scan_req)) {
9028 req = (struct iw_scan_req *)extra;
9029 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9030 ipw_request_direct_scan(priv, req->essid,
9031 req->essid_len);
9032 return 0;
9036 IPW_DEBUG_WX("Start scan\n");
9038 queue_work(priv->workqueue, &priv->request_scan);
9040 return 0;
9043 static int ipw_wx_get_scan(struct net_device *dev,
9044 struct iw_request_info *info,
9045 union iwreq_data *wrqu, char *extra)
9047 struct ipw_priv *priv = ieee80211_priv(dev);
9048 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9051 static int ipw_wx_set_encode(struct net_device *dev,
9052 struct iw_request_info *info,
9053 union iwreq_data *wrqu, char *key)
9055 struct ipw_priv *priv = ieee80211_priv(dev);
9056 int ret;
9057 u32 cap = priv->capability;
9059 mutex_lock(&priv->mutex);
9060 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9062 /* In IBSS mode, we need to notify the firmware to update
9063 * the beacon info after we changed the capability. */
9064 if (cap != priv->capability &&
9065 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9066 priv->status & STATUS_ASSOCIATED)
9067 ipw_disassociate(priv);
9069 mutex_unlock(&priv->mutex);
9070 return ret;
9073 static int ipw_wx_get_encode(struct net_device *dev,
9074 struct iw_request_info *info,
9075 union iwreq_data *wrqu, char *key)
9077 struct ipw_priv *priv = ieee80211_priv(dev);
9078 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9081 static int ipw_wx_set_power(struct net_device *dev,
9082 struct iw_request_info *info,
9083 union iwreq_data *wrqu, char *extra)
9085 struct ipw_priv *priv = ieee80211_priv(dev);
9086 int err;
9087 mutex_lock(&priv->mutex);
9088 if (wrqu->power.disabled) {
9089 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9090 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9091 if (err) {
9092 IPW_DEBUG_WX("failed setting power mode.\n");
9093 mutex_unlock(&priv->mutex);
9094 return err;
9096 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9097 mutex_unlock(&priv->mutex);
9098 return 0;
9101 switch (wrqu->power.flags & IW_POWER_MODE) {
9102 case IW_POWER_ON: /* If not specified */
9103 case IW_POWER_MODE: /* If set all mask */
9104 case IW_POWER_ALL_R: /* If explicitely state all */
9105 break;
9106 default: /* Otherwise we don't support it */
9107 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9108 wrqu->power.flags);
9109 mutex_unlock(&priv->mutex);
9110 return -EOPNOTSUPP;
9113 /* If the user hasn't specified a power management mode yet, default
9114 * to BATTERY */
9115 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9116 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9117 else
9118 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9119 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9120 if (err) {
9121 IPW_DEBUG_WX("failed setting power mode.\n");
9122 mutex_unlock(&priv->mutex);
9123 return err;
9126 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9127 mutex_unlock(&priv->mutex);
9128 return 0;
9131 static int ipw_wx_get_power(struct net_device *dev,
9132 struct iw_request_info *info,
9133 union iwreq_data *wrqu, char *extra)
9135 struct ipw_priv *priv = ieee80211_priv(dev);
9136 mutex_lock(&priv->mutex);
9137 if (!(priv->power_mode & IPW_POWER_ENABLED))
9138 wrqu->power.disabled = 1;
9139 else
9140 wrqu->power.disabled = 0;
9142 mutex_unlock(&priv->mutex);
9143 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9145 return 0;
9148 static int ipw_wx_set_powermode(struct net_device *dev,
9149 struct iw_request_info *info,
9150 union iwreq_data *wrqu, char *extra)
9152 struct ipw_priv *priv = ieee80211_priv(dev);
9153 int mode = *(int *)extra;
9154 int err;
9155 mutex_lock(&priv->mutex);
9156 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
9157 mode = IPW_POWER_AC;
9158 priv->power_mode = mode;
9159 } else {
9160 priv->power_mode = IPW_POWER_ENABLED | mode;
9163 if (priv->power_mode != mode) {
9164 err = ipw_send_power_mode(priv, mode);
9166 if (err) {
9167 IPW_DEBUG_WX("failed setting power mode.\n");
9168 mutex_unlock(&priv->mutex);
9169 return err;
9172 mutex_unlock(&priv->mutex);
9173 return 0;
9176 #define MAX_WX_STRING 80
9177 static int ipw_wx_get_powermode(struct net_device *dev,
9178 struct iw_request_info *info,
9179 union iwreq_data *wrqu, char *extra)
9181 struct ipw_priv *priv = ieee80211_priv(dev);
9182 int level = IPW_POWER_LEVEL(priv->power_mode);
9183 char *p = extra;
9185 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9187 switch (level) {
9188 case IPW_POWER_AC:
9189 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9190 break;
9191 case IPW_POWER_BATTERY:
9192 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9193 break;
9194 default:
9195 p += snprintf(p, MAX_WX_STRING - (p - extra),
9196 "(Timeout %dms, Period %dms)",
9197 timeout_duration[level - 1] / 1000,
9198 period_duration[level - 1] / 1000);
9201 if (!(priv->power_mode & IPW_POWER_ENABLED))
9202 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9204 wrqu->data.length = p - extra + 1;
9206 return 0;
9209 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9210 struct iw_request_info *info,
9211 union iwreq_data *wrqu, char *extra)
9213 struct ipw_priv *priv = ieee80211_priv(dev);
9214 int mode = *(int *)extra;
9215 u8 band = 0, modulation = 0;
9217 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9218 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9219 return -EINVAL;
9221 mutex_lock(&priv->mutex);
9222 if (priv->adapter == IPW_2915ABG) {
9223 priv->ieee->abg_true = 1;
9224 if (mode & IEEE_A) {
9225 band |= IEEE80211_52GHZ_BAND;
9226 modulation |= IEEE80211_OFDM_MODULATION;
9227 } else
9228 priv->ieee->abg_true = 0;
9229 } else {
9230 if (mode & IEEE_A) {
9231 IPW_WARNING("Attempt to set 2200BG into "
9232 "802.11a mode\n");
9233 mutex_unlock(&priv->mutex);
9234 return -EINVAL;
9237 priv->ieee->abg_true = 0;
9240 if (mode & IEEE_B) {
9241 band |= IEEE80211_24GHZ_BAND;
9242 modulation |= IEEE80211_CCK_MODULATION;
9243 } else
9244 priv->ieee->abg_true = 0;
9246 if (mode & IEEE_G) {
9247 band |= IEEE80211_24GHZ_BAND;
9248 modulation |= IEEE80211_OFDM_MODULATION;
9249 } else
9250 priv->ieee->abg_true = 0;
9252 priv->ieee->mode = mode;
9253 priv->ieee->freq_band = band;
9254 priv->ieee->modulation = modulation;
9255 init_supported_rates(priv, &priv->rates);
9257 /* Network configuration changed -- force [re]association */
9258 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9259 if (!ipw_disassociate(priv)) {
9260 ipw_send_supported_rates(priv, &priv->rates);
9261 ipw_associate(priv);
9264 /* Update the band LEDs */
9265 ipw_led_band_on(priv);
9267 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9268 mode & IEEE_A ? 'a' : '.',
9269 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9270 mutex_unlock(&priv->mutex);
9271 return 0;
9274 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9275 struct iw_request_info *info,
9276 union iwreq_data *wrqu, char *extra)
9278 struct ipw_priv *priv = ieee80211_priv(dev);
9279 mutex_lock(&priv->mutex);
9280 switch (priv->ieee->mode) {
9281 case IEEE_A:
9282 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9283 break;
9284 case IEEE_B:
9285 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9286 break;
9287 case IEEE_A | IEEE_B:
9288 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9289 break;
9290 case IEEE_G:
9291 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9292 break;
9293 case IEEE_A | IEEE_G:
9294 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9295 break;
9296 case IEEE_B | IEEE_G:
9297 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9298 break;
9299 case IEEE_A | IEEE_B | IEEE_G:
9300 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9301 break;
9302 default:
9303 strncpy(extra, "unknown", MAX_WX_STRING);
9304 break;
9307 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9309 wrqu->data.length = strlen(extra) + 1;
9310 mutex_unlock(&priv->mutex);
9312 return 0;
9315 static int ipw_wx_set_preamble(struct net_device *dev,
9316 struct iw_request_info *info,
9317 union iwreq_data *wrqu, char *extra)
9319 struct ipw_priv *priv = ieee80211_priv(dev);
9320 int mode = *(int *)extra;
9321 mutex_lock(&priv->mutex);
9322 /* Switching from SHORT -> LONG requires a disassociation */
9323 if (mode == 1) {
9324 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9325 priv->config |= CFG_PREAMBLE_LONG;
9327 /* Network configuration changed -- force [re]association */
9328 IPW_DEBUG_ASSOC
9329 ("[re]association triggered due to preamble change.\n");
9330 if (!ipw_disassociate(priv))
9331 ipw_associate(priv);
9333 goto done;
9336 if (mode == 0) {
9337 priv->config &= ~CFG_PREAMBLE_LONG;
9338 goto done;
9340 mutex_unlock(&priv->mutex);
9341 return -EINVAL;
9343 done:
9344 mutex_unlock(&priv->mutex);
9345 return 0;
9348 static int ipw_wx_get_preamble(struct net_device *dev,
9349 struct iw_request_info *info,
9350 union iwreq_data *wrqu, char *extra)
9352 struct ipw_priv *priv = ieee80211_priv(dev);
9353 mutex_lock(&priv->mutex);
9354 if (priv->config & CFG_PREAMBLE_LONG)
9355 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9356 else
9357 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9358 mutex_unlock(&priv->mutex);
9359 return 0;
9362 #ifdef CONFIG_IPW2200_MONITOR
9363 static int ipw_wx_set_monitor(struct net_device *dev,
9364 struct iw_request_info *info,
9365 union iwreq_data *wrqu, char *extra)
9367 struct ipw_priv *priv = ieee80211_priv(dev);
9368 int *parms = (int *)extra;
9369 int enable = (parms[0] > 0);
9370 mutex_lock(&priv->mutex);
9371 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9372 if (enable) {
9373 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9374 #ifdef CONFIG_IEEE80211_RADIOTAP
9375 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9376 #else
9377 priv->net_dev->type = ARPHRD_IEEE80211;
9378 #endif
9379 queue_work(priv->workqueue, &priv->adapter_restart);
9382 ipw_set_channel(priv, parms[1]);
9383 } else {
9384 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9385 mutex_unlock(&priv->mutex);
9386 return 0;
9388 priv->net_dev->type = ARPHRD_ETHER;
9389 queue_work(priv->workqueue, &priv->adapter_restart);
9391 mutex_unlock(&priv->mutex);
9392 return 0;
9395 #endif // CONFIG_IPW2200_MONITOR
9397 static int ipw_wx_reset(struct net_device *dev,
9398 struct iw_request_info *info,
9399 union iwreq_data *wrqu, char *extra)
9401 struct ipw_priv *priv = ieee80211_priv(dev);
9402 IPW_DEBUG_WX("RESET\n");
9403 queue_work(priv->workqueue, &priv->adapter_restart);
9404 return 0;
9407 static int ipw_wx_sw_reset(struct net_device *dev,
9408 struct iw_request_info *info,
9409 union iwreq_data *wrqu, char *extra)
9411 struct ipw_priv *priv = ieee80211_priv(dev);
9412 union iwreq_data wrqu_sec = {
9413 .encoding = {
9414 .flags = IW_ENCODE_DISABLED,
9417 int ret;
9419 IPW_DEBUG_WX("SW_RESET\n");
9421 mutex_lock(&priv->mutex);
9423 ret = ipw_sw_reset(priv, 2);
9424 if (!ret) {
9425 free_firmware();
9426 ipw_adapter_restart(priv);
9429 /* The SW reset bit might have been toggled on by the 'disable'
9430 * module parameter, so take appropriate action */
9431 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9433 mutex_unlock(&priv->mutex);
9434 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9435 mutex_lock(&priv->mutex);
9437 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9438 /* Configuration likely changed -- force [re]association */
9439 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9440 "reset.\n");
9441 if (!ipw_disassociate(priv))
9442 ipw_associate(priv);
9445 mutex_unlock(&priv->mutex);
9447 return 0;
9450 /* Rebase the WE IOCTLs to zero for the handler array */
9451 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9452 static iw_handler ipw_wx_handlers[] = {
9453 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9454 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9455 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9456 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9457 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9458 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9459 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9460 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9461 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9462 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9463 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9464 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9465 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9466 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9467 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9468 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9469 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9470 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9471 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9472 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9473 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9474 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9475 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9476 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9477 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9478 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9479 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9480 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9481 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9482 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9483 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9484 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9485 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9486 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9487 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9488 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9489 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9490 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9491 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9492 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9493 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9496 enum {
9497 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9498 IPW_PRIV_GET_POWER,
9499 IPW_PRIV_SET_MODE,
9500 IPW_PRIV_GET_MODE,
9501 IPW_PRIV_SET_PREAMBLE,
9502 IPW_PRIV_GET_PREAMBLE,
9503 IPW_PRIV_RESET,
9504 IPW_PRIV_SW_RESET,
9505 #ifdef CONFIG_IPW2200_MONITOR
9506 IPW_PRIV_SET_MONITOR,
9507 #endif
9510 static struct iw_priv_args ipw_priv_args[] = {
9512 .cmd = IPW_PRIV_SET_POWER,
9513 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9514 .name = "set_power"},
9516 .cmd = IPW_PRIV_GET_POWER,
9517 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9518 .name = "get_power"},
9520 .cmd = IPW_PRIV_SET_MODE,
9521 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9522 .name = "set_mode"},
9524 .cmd = IPW_PRIV_GET_MODE,
9525 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9526 .name = "get_mode"},
9528 .cmd = IPW_PRIV_SET_PREAMBLE,
9529 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9530 .name = "set_preamble"},
9532 .cmd = IPW_PRIV_GET_PREAMBLE,
9533 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9534 .name = "get_preamble"},
9536 IPW_PRIV_RESET,
9537 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9539 IPW_PRIV_SW_RESET,
9540 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9541 #ifdef CONFIG_IPW2200_MONITOR
9543 IPW_PRIV_SET_MONITOR,
9544 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9545 #endif /* CONFIG_IPW2200_MONITOR */
9548 static iw_handler ipw_priv_handler[] = {
9549 ipw_wx_set_powermode,
9550 ipw_wx_get_powermode,
9551 ipw_wx_set_wireless_mode,
9552 ipw_wx_get_wireless_mode,
9553 ipw_wx_set_preamble,
9554 ipw_wx_get_preamble,
9555 ipw_wx_reset,
9556 ipw_wx_sw_reset,
9557 #ifdef CONFIG_IPW2200_MONITOR
9558 ipw_wx_set_monitor,
9559 #endif
9562 static struct iw_handler_def ipw_wx_handler_def = {
9563 .standard = ipw_wx_handlers,
9564 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
9565 .num_private = ARRAY_SIZE(ipw_priv_handler),
9566 .num_private_args = ARRAY_SIZE(ipw_priv_args),
9567 .private = ipw_priv_handler,
9568 .private_args = ipw_priv_args,
9569 .get_wireless_stats = ipw_get_wireless_stats,
9573 * Get wireless statistics.
9574 * Called by /proc/net/wireless
9575 * Also called by SIOCGIWSTATS
9577 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9579 struct ipw_priv *priv = ieee80211_priv(dev);
9580 struct iw_statistics *wstats;
9582 wstats = &priv->wstats;
9584 /* if hw is disabled, then ipw_get_ordinal() can't be called.
9585 * netdev->get_wireless_stats seems to be called before fw is
9586 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
9587 * and associated; if not associcated, the values are all meaningless
9588 * anyway, so set them all to NULL and INVALID */
9589 if (!(priv->status & STATUS_ASSOCIATED)) {
9590 wstats->miss.beacon = 0;
9591 wstats->discard.retries = 0;
9592 wstats->qual.qual = 0;
9593 wstats->qual.level = 0;
9594 wstats->qual.noise = 0;
9595 wstats->qual.updated = 7;
9596 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
9597 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
9598 return wstats;
9601 wstats->qual.qual = priv->quality;
9602 wstats->qual.level = priv->exp_avg_rssi;
9603 wstats->qual.noise = priv->exp_avg_noise;
9604 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
9605 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
9607 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
9608 wstats->discard.retries = priv->last_tx_failures;
9609 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
9611 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
9612 goto fail_get_ordinal;
9613 wstats->discard.retries += tx_retry; */
9615 return wstats;
9618 /* net device stuff */
9620 static void init_sys_config(struct ipw_sys_config *sys_config)
9622 memset(sys_config, 0, sizeof(struct ipw_sys_config));
9623 sys_config->bt_coexistence = 0;
9624 sys_config->answer_broadcast_ssid_probe = 0;
9625 sys_config->accept_all_data_frames = 0;
9626 sys_config->accept_non_directed_frames = 1;
9627 sys_config->exclude_unicast_unencrypted = 0;
9628 sys_config->disable_unicast_decryption = 1;
9629 sys_config->exclude_multicast_unencrypted = 0;
9630 sys_config->disable_multicast_decryption = 1;
9631 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
9632 antenna = CFG_SYS_ANTENNA_BOTH;
9633 sys_config->antenna_diversity = antenna;
9634 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
9635 sys_config->dot11g_auto_detection = 0;
9636 sys_config->enable_cts_to_self = 0;
9637 sys_config->bt_coexist_collision_thr = 0;
9638 sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256
9639 sys_config->silence_threshold = 0x1e;
9642 static int ipw_net_open(struct net_device *dev)
9644 struct ipw_priv *priv = ieee80211_priv(dev);
9645 IPW_DEBUG_INFO("dev->open\n");
9646 /* we should be verifying the device is ready to be opened */
9647 mutex_lock(&priv->mutex);
9648 if (!(priv->status & STATUS_RF_KILL_MASK) &&
9649 (priv->status & STATUS_ASSOCIATED))
9650 netif_start_queue(dev);
9651 mutex_unlock(&priv->mutex);
9652 return 0;
9655 static int ipw_net_stop(struct net_device *dev)
9657 IPW_DEBUG_INFO("dev->close\n");
9658 netif_stop_queue(dev);
9659 return 0;
9663 todo:
9665 modify to send one tfd per fragment instead of using chunking. otherwise
9666 we need to heavily modify the ieee80211_skb_to_txb.
9669 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
9670 int pri)
9672 struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
9673 txb->fragments[0]->data;
9674 int i = 0;
9675 struct tfd_frame *tfd;
9676 #ifdef CONFIG_IPW_QOS
9677 int tx_id = ipw_get_tx_queue_number(priv, pri);
9678 struct clx2_tx_queue *txq = &priv->txq[tx_id];
9679 #else
9680 struct clx2_tx_queue *txq = &priv->txq[0];
9681 #endif
9682 struct clx2_queue *q = &txq->q;
9683 u8 id, hdr_len, unicast;
9684 u16 remaining_bytes;
9685 int fc;
9687 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
9688 switch (priv->ieee->iw_mode) {
9689 case IW_MODE_ADHOC:
9690 unicast = !is_multicast_ether_addr(hdr->addr1);
9691 id = ipw_find_station(priv, hdr->addr1);
9692 if (id == IPW_INVALID_STATION) {
9693 id = ipw_add_station(priv, hdr->addr1);
9694 if (id == IPW_INVALID_STATION) {
9695 IPW_WARNING("Attempt to send data to "
9696 "invalid cell: " MAC_FMT "\n",
9697 MAC_ARG(hdr->addr1));
9698 goto drop;
9701 break;
9703 case IW_MODE_INFRA:
9704 default:
9705 unicast = !is_multicast_ether_addr(hdr->addr3);
9706 id = 0;
9707 break;
9710 tfd = &txq->bd[q->first_empty];
9711 txq->txb[q->first_empty] = txb;
9712 memset(tfd, 0, sizeof(*tfd));
9713 tfd->u.data.station_number = id;
9715 tfd->control_flags.message_type = TX_FRAME_TYPE;
9716 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
9718 tfd->u.data.cmd_id = DINO_CMD_TX;
9719 tfd->u.data.len = cpu_to_le16(txb->payload_size);
9720 remaining_bytes = txb->payload_size;
9722 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
9723 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
9724 else
9725 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
9727 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
9728 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
9730 fc = le16_to_cpu(hdr->frame_ctl);
9731 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
9733 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
9735 if (likely(unicast))
9736 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
9738 if (txb->encrypted && !priv->ieee->host_encrypt) {
9739 switch (priv->ieee->sec.level) {
9740 case SEC_LEVEL_3:
9741 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9742 IEEE80211_FCTL_PROTECTED;
9743 /* XXX: ACK flag must be set for CCMP even if it
9744 * is a multicast/broadcast packet, because CCMP
9745 * group communication encrypted by GTK is
9746 * actually done by the AP. */
9747 if (!unicast)
9748 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
9750 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
9751 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
9752 tfd->u.data.key_index = 0;
9753 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
9754 break;
9755 case SEC_LEVEL_2:
9756 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9757 IEEE80211_FCTL_PROTECTED;
9758 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
9759 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
9760 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
9761 break;
9762 case SEC_LEVEL_1:
9763 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9764 IEEE80211_FCTL_PROTECTED;
9765 tfd->u.data.key_index = priv->ieee->tx_keyidx;
9766 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
9768 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
9769 else
9770 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
9771 break;
9772 case SEC_LEVEL_0:
9773 break;
9774 default:
9775 printk(KERN_ERR "Unknow security level %d\n",
9776 priv->ieee->sec.level);
9777 break;
9779 } else
9780 /* No hardware encryption */
9781 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
9783 #ifdef CONFIG_IPW_QOS
9784 if (fc & IEEE80211_STYPE_QOS_DATA)
9785 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
9786 #endif /* CONFIG_IPW_QOS */
9788 /* payload */
9789 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
9790 txb->nr_frags));
9791 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
9792 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
9793 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
9794 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
9795 i, le32_to_cpu(tfd->u.data.num_chunks),
9796 txb->fragments[i]->len - hdr_len);
9797 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
9798 i, tfd->u.data.num_chunks,
9799 txb->fragments[i]->len - hdr_len);
9800 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
9801 txb->fragments[i]->len - hdr_len);
9803 tfd->u.data.chunk_ptr[i] =
9804 cpu_to_le32(pci_map_single
9805 (priv->pci_dev,
9806 txb->fragments[i]->data + hdr_len,
9807 txb->fragments[i]->len - hdr_len,
9808 PCI_DMA_TODEVICE));
9809 tfd->u.data.chunk_len[i] =
9810 cpu_to_le16(txb->fragments[i]->len - hdr_len);
9813 if (i != txb->nr_frags) {
9814 struct sk_buff *skb;
9815 u16 remaining_bytes = 0;
9816 int j;
9818 for (j = i; j < txb->nr_frags; j++)
9819 remaining_bytes += txb->fragments[j]->len - hdr_len;
9821 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
9822 remaining_bytes);
9823 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
9824 if (skb != NULL) {
9825 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
9826 for (j = i; j < txb->nr_frags; j++) {
9827 int size = txb->fragments[j]->len - hdr_len;
9829 printk(KERN_INFO "Adding frag %d %d...\n",
9830 j, size);
9831 memcpy(skb_put(skb, size),
9832 txb->fragments[j]->data + hdr_len, size);
9834 dev_kfree_skb_any(txb->fragments[i]);
9835 txb->fragments[i] = skb;
9836 tfd->u.data.chunk_ptr[i] =
9837 cpu_to_le32(pci_map_single
9838 (priv->pci_dev, skb->data,
9839 tfd->u.data.chunk_len[i],
9840 PCI_DMA_TODEVICE));
9842 tfd->u.data.num_chunks =
9843 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
9848 /* kick DMA */
9849 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
9850 ipw_write32(priv, q->reg_w, q->first_empty);
9852 if (ipw_queue_space(q) < q->high_mark)
9853 netif_stop_queue(priv->net_dev);
9855 return NETDEV_TX_OK;
9857 drop:
9858 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
9859 ieee80211_txb_free(txb);
9860 return NETDEV_TX_OK;
9863 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
9865 struct ipw_priv *priv = ieee80211_priv(dev);
9866 #ifdef CONFIG_IPW_QOS
9867 int tx_id = ipw_get_tx_queue_number(priv, pri);
9868 struct clx2_tx_queue *txq = &priv->txq[tx_id];
9869 #else
9870 struct clx2_tx_queue *txq = &priv->txq[0];
9871 #endif /* CONFIG_IPW_QOS */
9873 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
9874 return 1;
9876 return 0;
9879 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
9880 struct net_device *dev, int pri)
9882 struct ipw_priv *priv = ieee80211_priv(dev);
9883 unsigned long flags;
9884 int ret;
9886 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
9887 spin_lock_irqsave(&priv->lock, flags);
9889 if (!(priv->status & STATUS_ASSOCIATED)) {
9890 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
9891 priv->ieee->stats.tx_carrier_errors++;
9892 netif_stop_queue(dev);
9893 goto fail_unlock;
9896 ret = ipw_tx_skb(priv, txb, pri);
9897 if (ret == NETDEV_TX_OK)
9898 __ipw_led_activity_on(priv);
9899 spin_unlock_irqrestore(&priv->lock, flags);
9901 return ret;
9903 fail_unlock:
9904 spin_unlock_irqrestore(&priv->lock, flags);
9905 return 1;
9908 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
9910 struct ipw_priv *priv = ieee80211_priv(dev);
9912 priv->ieee->stats.tx_packets = priv->tx_packets;
9913 priv->ieee->stats.rx_packets = priv->rx_packets;
9914 return &priv->ieee->stats;
9917 static void ipw_net_set_multicast_list(struct net_device *dev)
9922 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
9924 struct ipw_priv *priv = ieee80211_priv(dev);
9925 struct sockaddr *addr = p;
9926 if (!is_valid_ether_addr(addr->sa_data))
9927 return -EADDRNOTAVAIL;
9928 mutex_lock(&priv->mutex);
9929 priv->config |= CFG_CUSTOM_MAC;
9930 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
9931 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
9932 priv->net_dev->name, MAC_ARG(priv->mac_addr));
9933 queue_work(priv->workqueue, &priv->adapter_restart);
9934 mutex_unlock(&priv->mutex);
9935 return 0;
9938 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
9939 struct ethtool_drvinfo *info)
9941 struct ipw_priv *p = ieee80211_priv(dev);
9942 char vers[64];
9943 char date[32];
9944 u32 len;
9946 strcpy(info->driver, DRV_NAME);
9947 strcpy(info->version, DRV_VERSION);
9949 len = sizeof(vers);
9950 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
9951 len = sizeof(date);
9952 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
9954 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
9955 vers, date);
9956 strcpy(info->bus_info, pci_name(p->pci_dev));
9957 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
9960 static u32 ipw_ethtool_get_link(struct net_device *dev)
9962 struct ipw_priv *priv = ieee80211_priv(dev);
9963 return (priv->status & STATUS_ASSOCIATED) != 0;
9966 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
9968 return IPW_EEPROM_IMAGE_SIZE;
9971 static int ipw_ethtool_get_eeprom(struct net_device *dev,
9972 struct ethtool_eeprom *eeprom, u8 * bytes)
9974 struct ipw_priv *p = ieee80211_priv(dev);
9976 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
9977 return -EINVAL;
9978 mutex_lock(&p->mutex);
9979 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
9980 mutex_unlock(&p->mutex);
9981 return 0;
9984 static int ipw_ethtool_set_eeprom(struct net_device *dev,
9985 struct ethtool_eeprom *eeprom, u8 * bytes)
9987 struct ipw_priv *p = ieee80211_priv(dev);
9988 int i;
9990 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
9991 return -EINVAL;
9992 mutex_lock(&p->mutex);
9993 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
9994 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
9995 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
9996 mutex_unlock(&p->mutex);
9997 return 0;
10000 static struct ethtool_ops ipw_ethtool_ops = {
10001 .get_link = ipw_ethtool_get_link,
10002 .get_drvinfo = ipw_ethtool_get_drvinfo,
10003 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10004 .get_eeprom = ipw_ethtool_get_eeprom,
10005 .set_eeprom = ipw_ethtool_set_eeprom,
10008 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
10010 struct ipw_priv *priv = data;
10011 u32 inta, inta_mask;
10013 if (!priv)
10014 return IRQ_NONE;
10016 spin_lock(&priv->lock);
10018 if (!(priv->status & STATUS_INT_ENABLED)) {
10019 /* Shared IRQ */
10020 goto none;
10023 inta = ipw_read32(priv, IPW_INTA_RW);
10024 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10026 if (inta == 0xFFFFFFFF) {
10027 /* Hardware disappeared */
10028 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10029 goto none;
10032 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10033 /* Shared interrupt */
10034 goto none;
10037 /* tell the device to stop sending interrupts */
10038 ipw_disable_interrupts(priv);
10040 /* ack current interrupts */
10041 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10042 ipw_write32(priv, IPW_INTA_RW, inta);
10044 /* Cache INTA value for our tasklet */
10045 priv->isr_inta = inta;
10047 tasklet_schedule(&priv->irq_tasklet);
10049 spin_unlock(&priv->lock);
10051 return IRQ_HANDLED;
10052 none:
10053 spin_unlock(&priv->lock);
10054 return IRQ_NONE;
10057 static void ipw_rf_kill(void *adapter)
10059 struct ipw_priv *priv = adapter;
10060 unsigned long flags;
10062 spin_lock_irqsave(&priv->lock, flags);
10064 if (rf_kill_active(priv)) {
10065 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10066 if (priv->workqueue)
10067 queue_delayed_work(priv->workqueue,
10068 &priv->rf_kill, 2 * HZ);
10069 goto exit_unlock;
10072 /* RF Kill is now disabled, so bring the device back up */
10074 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10075 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10076 "device\n");
10078 /* we can not do an adapter restart while inside an irq lock */
10079 queue_work(priv->workqueue, &priv->adapter_restart);
10080 } else
10081 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10082 "enabled\n");
10084 exit_unlock:
10085 spin_unlock_irqrestore(&priv->lock, flags);
10088 static void ipw_bg_rf_kill(void *data)
10090 struct ipw_priv *priv = data;
10091 mutex_lock(&priv->mutex);
10092 ipw_rf_kill(data);
10093 mutex_unlock(&priv->mutex);
10096 static void ipw_link_up(struct ipw_priv *priv)
10098 priv->last_seq_num = -1;
10099 priv->last_frag_num = -1;
10100 priv->last_packet_time = 0;
10102 netif_carrier_on(priv->net_dev);
10103 if (netif_queue_stopped(priv->net_dev)) {
10104 IPW_DEBUG_NOTIF("waking queue\n");
10105 netif_wake_queue(priv->net_dev);
10106 } else {
10107 IPW_DEBUG_NOTIF("starting queue\n");
10108 netif_start_queue(priv->net_dev);
10111 cancel_delayed_work(&priv->request_scan);
10112 ipw_reset_stats(priv);
10113 /* Ensure the rate is updated immediately */
10114 priv->last_rate = ipw_get_current_rate(priv);
10115 ipw_gather_stats(priv);
10116 ipw_led_link_up(priv);
10117 notify_wx_assoc_event(priv);
10119 if (priv->config & CFG_BACKGROUND_SCAN)
10120 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10123 static void ipw_bg_link_up(void *data)
10125 struct ipw_priv *priv = data;
10126 mutex_lock(&priv->mutex);
10127 ipw_link_up(data);
10128 mutex_unlock(&priv->mutex);
10131 static void ipw_link_down(struct ipw_priv *priv)
10133 ipw_led_link_down(priv);
10134 netif_carrier_off(priv->net_dev);
10135 netif_stop_queue(priv->net_dev);
10136 notify_wx_assoc_event(priv);
10138 /* Cancel any queued work ... */
10139 cancel_delayed_work(&priv->request_scan);
10140 cancel_delayed_work(&priv->adhoc_check);
10141 cancel_delayed_work(&priv->gather_stats);
10143 ipw_reset_stats(priv);
10145 if (!(priv->status & STATUS_EXIT_PENDING)) {
10146 /* Queue up another scan... */
10147 queue_work(priv->workqueue, &priv->request_scan);
10151 static void ipw_bg_link_down(void *data)
10153 struct ipw_priv *priv = data;
10154 mutex_lock(&priv->mutex);
10155 ipw_link_down(data);
10156 mutex_unlock(&priv->mutex);
10159 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10161 int ret = 0;
10163 priv->workqueue = create_workqueue(DRV_NAME);
10164 init_waitqueue_head(&priv->wait_command_queue);
10165 init_waitqueue_head(&priv->wait_state);
10167 INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
10168 INIT_WORK(&priv->associate, ipw_bg_associate, priv);
10169 INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
10170 INIT_WORK(&priv->system_config, ipw_system_config, priv);
10171 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
10172 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
10173 INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
10174 INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
10175 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
10176 INIT_WORK(&priv->request_scan,
10177 (void (*)(void *))ipw_request_scan, priv);
10178 INIT_WORK(&priv->gather_stats,
10179 (void (*)(void *))ipw_bg_gather_stats, priv);
10180 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
10181 INIT_WORK(&priv->roam, ipw_bg_roam, priv);
10182 INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
10183 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
10184 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
10185 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
10186 priv);
10187 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
10188 priv);
10189 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
10190 priv);
10191 INIT_WORK(&priv->merge_networks,
10192 (void (*)(void *))ipw_merge_adhoc_network, priv);
10194 #ifdef CONFIG_IPW_QOS
10195 INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
10196 priv);
10197 #endif /* CONFIG_IPW_QOS */
10199 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10200 ipw_irq_tasklet, (unsigned long)priv);
10202 return ret;
10205 static void shim__set_security(struct net_device *dev,
10206 struct ieee80211_security *sec)
10208 struct ipw_priv *priv = ieee80211_priv(dev);
10209 int i;
10210 for (i = 0; i < 4; i++) {
10211 if (sec->flags & (1 << i)) {
10212 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10213 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10214 if (sec->key_sizes[i] == 0)
10215 priv->ieee->sec.flags &= ~(1 << i);
10216 else {
10217 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10218 sec->key_sizes[i]);
10219 priv->ieee->sec.flags |= (1 << i);
10221 priv->status |= STATUS_SECURITY_UPDATED;
10222 } else if (sec->level != SEC_LEVEL_1)
10223 priv->ieee->sec.flags &= ~(1 << i);
10226 if (sec->flags & SEC_ACTIVE_KEY) {
10227 if (sec->active_key <= 3) {
10228 priv->ieee->sec.active_key = sec->active_key;
10229 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10230 } else
10231 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10232 priv->status |= STATUS_SECURITY_UPDATED;
10233 } else
10234 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10236 if ((sec->flags & SEC_AUTH_MODE) &&
10237 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10238 priv->ieee->sec.auth_mode = sec->auth_mode;
10239 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10240 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10241 priv->capability |= CAP_SHARED_KEY;
10242 else
10243 priv->capability &= ~CAP_SHARED_KEY;
10244 priv->status |= STATUS_SECURITY_UPDATED;
10247 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10248 priv->ieee->sec.flags |= SEC_ENABLED;
10249 priv->ieee->sec.enabled = sec->enabled;
10250 priv->status |= STATUS_SECURITY_UPDATED;
10251 if (sec->enabled)
10252 priv->capability |= CAP_PRIVACY_ON;
10253 else
10254 priv->capability &= ~CAP_PRIVACY_ON;
10257 if (sec->flags & SEC_ENCRYPT)
10258 priv->ieee->sec.encrypt = sec->encrypt;
10260 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10261 priv->ieee->sec.level = sec->level;
10262 priv->ieee->sec.flags |= SEC_LEVEL;
10263 priv->status |= STATUS_SECURITY_UPDATED;
10266 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10267 ipw_set_hwcrypto_keys(priv);
10269 /* To match current functionality of ipw2100 (which works well w/
10270 * various supplicants, we don't force a disassociate if the
10271 * privacy capability changes ... */
10272 #if 0
10273 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10274 (((priv->assoc_request.capability &
10275 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10276 (!(priv->assoc_request.capability &
10277 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10278 IPW_DEBUG_ASSOC("Disassociating due to capability "
10279 "change.\n");
10280 ipw_disassociate(priv);
10282 #endif
10285 static int init_supported_rates(struct ipw_priv *priv,
10286 struct ipw_supported_rates *rates)
10288 /* TODO: Mask out rates based on priv->rates_mask */
10290 memset(rates, 0, sizeof(*rates));
10291 /* configure supported rates */
10292 switch (priv->ieee->freq_band) {
10293 case IEEE80211_52GHZ_BAND:
10294 rates->ieee_mode = IPW_A_MODE;
10295 rates->purpose = IPW_RATE_CAPABILITIES;
10296 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10297 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10298 break;
10300 default: /* Mixed or 2.4Ghz */
10301 rates->ieee_mode = IPW_G_MODE;
10302 rates->purpose = IPW_RATE_CAPABILITIES;
10303 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10304 IEEE80211_CCK_DEFAULT_RATES_MASK);
10305 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10306 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10307 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10309 break;
10312 return 0;
10315 static int ipw_config(struct ipw_priv *priv)
10317 /* This is only called from ipw_up, which resets/reloads the firmware
10318 so, we don't need to first disable the card before we configure
10319 it */
10320 if (ipw_set_tx_power(priv))
10321 goto error;
10323 /* initialize adapter address */
10324 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10325 goto error;
10327 /* set basic system config settings */
10328 init_sys_config(&priv->sys_config);
10330 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10331 * Does not support BT priority yet (don't abort or defer our Tx) */
10332 if (bt_coexist) {
10333 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10335 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10336 priv->sys_config.bt_coexistence
10337 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10338 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10339 priv->sys_config.bt_coexistence
10340 |= CFG_BT_COEXISTENCE_OOB;
10343 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10344 priv->sys_config.answer_broadcast_ssid_probe = 1;
10345 else
10346 priv->sys_config.answer_broadcast_ssid_probe = 0;
10348 if (ipw_send_system_config(priv, &priv->sys_config))
10349 goto error;
10351 init_supported_rates(priv, &priv->rates);
10352 if (ipw_send_supported_rates(priv, &priv->rates))
10353 goto error;
10355 /* Set request-to-send threshold */
10356 if (priv->rts_threshold) {
10357 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10358 goto error;
10360 #ifdef CONFIG_IPW_QOS
10361 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10362 ipw_qos_activate(priv, NULL);
10363 #endif /* CONFIG_IPW_QOS */
10365 if (ipw_set_random_seed(priv))
10366 goto error;
10368 /* final state transition to the RUN state */
10369 if (ipw_send_host_complete(priv))
10370 goto error;
10372 priv->status |= STATUS_INIT;
10374 ipw_led_init(priv);
10375 ipw_led_radio_on(priv);
10376 priv->notif_missed_beacons = 0;
10378 /* Set hardware WEP key if it is configured. */
10379 if ((priv->capability & CAP_PRIVACY_ON) &&
10380 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10381 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10382 ipw_set_hwcrypto_keys(priv);
10384 return 0;
10386 error:
10387 return -EIO;
10391 * NOTE:
10393 * These tables have been tested in conjunction with the
10394 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10396 * Altering this values, using it on other hardware, or in geographies
10397 * not intended for resale of the above mentioned Intel adapters has
10398 * not been tested.
10400 * Remember to update the table in README.ipw2200 when changing this
10401 * table.
10404 static const struct ieee80211_geo ipw_geos[] = {
10405 { /* Restricted */
10406 "---",
10407 .bg_channels = 11,
10408 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10409 {2427, 4}, {2432, 5}, {2437, 6},
10410 {2442, 7}, {2447, 8}, {2452, 9},
10411 {2457, 10}, {2462, 11}},
10414 { /* Custom US/Canada */
10415 "ZZF",
10416 .bg_channels = 11,
10417 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10418 {2427, 4}, {2432, 5}, {2437, 6},
10419 {2442, 7}, {2447, 8}, {2452, 9},
10420 {2457, 10}, {2462, 11}},
10421 .a_channels = 8,
10422 .a = {{5180, 36},
10423 {5200, 40},
10424 {5220, 44},
10425 {5240, 48},
10426 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10427 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10428 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10429 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10432 { /* Rest of World */
10433 "ZZD",
10434 .bg_channels = 13,
10435 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10436 {2427, 4}, {2432, 5}, {2437, 6},
10437 {2442, 7}, {2447, 8}, {2452, 9},
10438 {2457, 10}, {2462, 11}, {2467, 12},
10439 {2472, 13}},
10442 { /* Custom USA & Europe & High */
10443 "ZZA",
10444 .bg_channels = 11,
10445 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10446 {2427, 4}, {2432, 5}, {2437, 6},
10447 {2442, 7}, {2447, 8}, {2452, 9},
10448 {2457, 10}, {2462, 11}},
10449 .a_channels = 13,
10450 .a = {{5180, 36},
10451 {5200, 40},
10452 {5220, 44},
10453 {5240, 48},
10454 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10455 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10456 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10457 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10458 {5745, 149},
10459 {5765, 153},
10460 {5785, 157},
10461 {5805, 161},
10462 {5825, 165}},
10465 { /* Custom NA & Europe */
10466 "ZZB",
10467 .bg_channels = 11,
10468 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10469 {2427, 4}, {2432, 5}, {2437, 6},
10470 {2442, 7}, {2447, 8}, {2452, 9},
10471 {2457, 10}, {2462, 11}},
10472 .a_channels = 13,
10473 .a = {{5180, 36},
10474 {5200, 40},
10475 {5220, 44},
10476 {5240, 48},
10477 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10478 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10479 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10480 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10481 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10482 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10483 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10484 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10485 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10488 { /* Custom Japan */
10489 "ZZC",
10490 .bg_channels = 11,
10491 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10492 {2427, 4}, {2432, 5}, {2437, 6},
10493 {2442, 7}, {2447, 8}, {2452, 9},
10494 {2457, 10}, {2462, 11}},
10495 .a_channels = 4,
10496 .a = {{5170, 34}, {5190, 38},
10497 {5210, 42}, {5230, 46}},
10500 { /* Custom */
10501 "ZZM",
10502 .bg_channels = 11,
10503 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10504 {2427, 4}, {2432, 5}, {2437, 6},
10505 {2442, 7}, {2447, 8}, {2452, 9},
10506 {2457, 10}, {2462, 11}},
10509 { /* Europe */
10510 "ZZE",
10511 .bg_channels = 13,
10512 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10513 {2427, 4}, {2432, 5}, {2437, 6},
10514 {2442, 7}, {2447, 8}, {2452, 9},
10515 {2457, 10}, {2462, 11}, {2467, 12},
10516 {2472, 13}},
10517 .a_channels = 19,
10518 .a = {{5180, 36},
10519 {5200, 40},
10520 {5220, 44},
10521 {5240, 48},
10522 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10523 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10524 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10525 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10526 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10527 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10528 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10529 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10530 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10531 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10532 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10533 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10534 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10535 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10536 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
10539 { /* Custom Japan */
10540 "ZZJ",
10541 .bg_channels = 14,
10542 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10543 {2427, 4}, {2432, 5}, {2437, 6},
10544 {2442, 7}, {2447, 8}, {2452, 9},
10545 {2457, 10}, {2462, 11}, {2467, 12},
10546 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
10547 .a_channels = 4,
10548 .a = {{5170, 34}, {5190, 38},
10549 {5210, 42}, {5230, 46}},
10552 { /* Rest of World */
10553 "ZZR",
10554 .bg_channels = 14,
10555 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10556 {2427, 4}, {2432, 5}, {2437, 6},
10557 {2442, 7}, {2447, 8}, {2452, 9},
10558 {2457, 10}, {2462, 11}, {2467, 12},
10559 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
10560 IEEE80211_CH_PASSIVE_ONLY}},
10563 { /* High Band */
10564 "ZZH",
10565 .bg_channels = 13,
10566 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10567 {2427, 4}, {2432, 5}, {2437, 6},
10568 {2442, 7}, {2447, 8}, {2452, 9},
10569 {2457, 10}, {2462, 11},
10570 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
10571 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
10572 .a_channels = 4,
10573 .a = {{5745, 149}, {5765, 153},
10574 {5785, 157}, {5805, 161}},
10577 { /* Custom Europe */
10578 "ZZG",
10579 .bg_channels = 13,
10580 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10581 {2427, 4}, {2432, 5}, {2437, 6},
10582 {2442, 7}, {2447, 8}, {2452, 9},
10583 {2457, 10}, {2462, 11},
10584 {2467, 12}, {2472, 13}},
10585 .a_channels = 4,
10586 .a = {{5180, 36}, {5200, 40},
10587 {5220, 44}, {5240, 48}},
10590 { /* Europe */
10591 "ZZK",
10592 .bg_channels = 13,
10593 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10594 {2427, 4}, {2432, 5}, {2437, 6},
10595 {2442, 7}, {2447, 8}, {2452, 9},
10596 {2457, 10}, {2462, 11},
10597 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
10598 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
10599 .a_channels = 24,
10600 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
10601 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
10602 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
10603 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
10604 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10605 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10606 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10607 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10608 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10609 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10610 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10611 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10612 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10613 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10614 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10615 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10616 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10617 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10618 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
10619 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10620 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10621 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10622 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10623 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10626 { /* Europe */
10627 "ZZL",
10628 .bg_channels = 11,
10629 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10630 {2427, 4}, {2432, 5}, {2437, 6},
10631 {2442, 7}, {2447, 8}, {2452, 9},
10632 {2457, 10}, {2462, 11}},
10633 .a_channels = 13,
10634 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
10635 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
10636 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
10637 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
10638 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10639 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10640 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10641 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10642 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10643 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10644 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10645 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10646 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10650 #define MAX_HW_RESTARTS 5
10651 static int ipw_up(struct ipw_priv *priv)
10653 int rc, i, j;
10655 if (priv->status & STATUS_EXIT_PENDING)
10656 return -EIO;
10658 if (cmdlog && !priv->cmdlog) {
10659 priv->cmdlog = kmalloc(sizeof(*priv->cmdlog) * cmdlog,
10660 GFP_KERNEL);
10661 if (priv->cmdlog == NULL) {
10662 IPW_ERROR("Error allocating %d command log entries.\n",
10663 cmdlog);
10664 } else {
10665 memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
10666 priv->cmdlog_len = cmdlog;
10670 for (i = 0; i < MAX_HW_RESTARTS; i++) {
10671 /* Load the microcode, firmware, and eeprom.
10672 * Also start the clocks. */
10673 rc = ipw_load(priv);
10674 if (rc) {
10675 IPW_ERROR("Unable to load firmware: %d\n", rc);
10676 return rc;
10679 ipw_init_ordinals(priv);
10680 if (!(priv->config & CFG_CUSTOM_MAC))
10681 eeprom_parse_mac(priv, priv->mac_addr);
10682 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
10684 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
10685 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
10686 ipw_geos[j].name, 3))
10687 break;
10689 if (j == ARRAY_SIZE(ipw_geos)) {
10690 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
10691 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
10692 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
10693 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
10694 j = 0;
10696 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
10697 IPW_WARNING("Could not set geography.");
10698 return 0;
10701 if (priv->status & STATUS_RF_KILL_SW) {
10702 IPW_WARNING("Radio disabled by module parameter.\n");
10703 return 0;
10704 } else if (rf_kill_active(priv)) {
10705 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
10706 "Kill switch must be turned off for "
10707 "wireless networking to work.\n");
10708 queue_delayed_work(priv->workqueue, &priv->rf_kill,
10709 2 * HZ);
10710 return 0;
10713 rc = ipw_config(priv);
10714 if (!rc) {
10715 IPW_DEBUG_INFO("Configured device on count %i\n", i);
10717 /* If configure to try and auto-associate, kick
10718 * off a scan. */
10719 queue_work(priv->workqueue, &priv->request_scan);
10721 return 0;
10724 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
10725 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
10726 i, MAX_HW_RESTARTS);
10728 /* We had an error bringing up the hardware, so take it
10729 * all the way back down so we can try again */
10730 ipw_down(priv);
10733 /* tried to restart and config the device for as long as our
10734 * patience could withstand */
10735 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
10737 return -EIO;
10740 static void ipw_bg_up(void *data)
10742 struct ipw_priv *priv = data;
10743 mutex_lock(&priv->mutex);
10744 ipw_up(data);
10745 mutex_unlock(&priv->mutex);
10748 static void ipw_deinit(struct ipw_priv *priv)
10750 int i;
10752 if (priv->status & STATUS_SCANNING) {
10753 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
10754 ipw_abort_scan(priv);
10757 if (priv->status & STATUS_ASSOCIATED) {
10758 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
10759 ipw_disassociate(priv);
10762 ipw_led_shutdown(priv);
10764 /* Wait up to 1s for status to change to not scanning and not
10765 * associated (disassociation can take a while for a ful 802.11
10766 * exchange */
10767 for (i = 1000; i && (priv->status &
10768 (STATUS_DISASSOCIATING |
10769 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
10770 udelay(10);
10772 if (priv->status & (STATUS_DISASSOCIATING |
10773 STATUS_ASSOCIATED | STATUS_SCANNING))
10774 IPW_DEBUG_INFO("Still associated or scanning...\n");
10775 else
10776 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
10778 /* Attempt to disable the card */
10779 ipw_send_card_disable(priv, 0);
10781 priv->status &= ~STATUS_INIT;
10784 static void ipw_down(struct ipw_priv *priv)
10786 int exit_pending = priv->status & STATUS_EXIT_PENDING;
10788 priv->status |= STATUS_EXIT_PENDING;
10790 if (ipw_is_init(priv))
10791 ipw_deinit(priv);
10793 /* Wipe out the EXIT_PENDING status bit if we are not actually
10794 * exiting the module */
10795 if (!exit_pending)
10796 priv->status &= ~STATUS_EXIT_PENDING;
10798 /* tell the device to stop sending interrupts */
10799 ipw_disable_interrupts(priv);
10801 /* Clear all bits but the RF Kill */
10802 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
10803 netif_carrier_off(priv->net_dev);
10804 netif_stop_queue(priv->net_dev);
10806 ipw_stop_nic(priv);
10808 ipw_led_radio_off(priv);
10811 static void ipw_bg_down(void *data)
10813 struct ipw_priv *priv = data;
10814 mutex_lock(&priv->mutex);
10815 ipw_down(data);
10816 mutex_unlock(&priv->mutex);
10819 /* Called by register_netdev() */
10820 static int ipw_net_init(struct net_device *dev)
10822 struct ipw_priv *priv = ieee80211_priv(dev);
10823 mutex_lock(&priv->mutex);
10825 if (ipw_up(priv)) {
10826 mutex_unlock(&priv->mutex);
10827 return -EIO;
10830 mutex_unlock(&priv->mutex);
10831 return 0;
10834 /* PCI driver stuff */
10835 static struct pci_device_id card_ids[] = {
10836 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
10837 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
10838 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
10839 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
10840 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
10841 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
10842 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
10843 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
10844 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
10845 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
10846 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
10847 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
10848 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
10849 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
10850 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
10851 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
10852 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
10853 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
10854 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
10855 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
10856 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
10857 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
10859 /* required last entry */
10860 {0,}
10863 MODULE_DEVICE_TABLE(pci, card_ids);
10865 static struct attribute *ipw_sysfs_entries[] = {
10866 &dev_attr_rf_kill.attr,
10867 &dev_attr_direct_dword.attr,
10868 &dev_attr_indirect_byte.attr,
10869 &dev_attr_indirect_dword.attr,
10870 &dev_attr_mem_gpio_reg.attr,
10871 &dev_attr_command_event_reg.attr,
10872 &dev_attr_nic_type.attr,
10873 &dev_attr_status.attr,
10874 &dev_attr_cfg.attr,
10875 &dev_attr_error.attr,
10876 &dev_attr_event_log.attr,
10877 &dev_attr_cmd_log.attr,
10878 &dev_attr_eeprom_delay.attr,
10879 &dev_attr_ucode_version.attr,
10880 &dev_attr_rtc.attr,
10881 &dev_attr_scan_age.attr,
10882 &dev_attr_led.attr,
10883 &dev_attr_speed_scan.attr,
10884 &dev_attr_net_stats.attr,
10885 NULL
10888 static struct attribute_group ipw_attribute_group = {
10889 .name = NULL, /* put in device directory */
10890 .attrs = ipw_sysfs_entries,
10893 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10895 int err = 0;
10896 struct net_device *net_dev;
10897 void __iomem *base;
10898 u32 length, val;
10899 struct ipw_priv *priv;
10900 int i;
10902 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
10903 if (net_dev == NULL) {
10904 err = -ENOMEM;
10905 goto out;
10908 priv = ieee80211_priv(net_dev);
10909 priv->ieee = netdev_priv(net_dev);
10911 priv->net_dev = net_dev;
10912 priv->pci_dev = pdev;
10913 #ifdef CONFIG_IPW2200_DEBUG
10914 ipw_debug_level = debug;
10915 #endif
10916 spin_lock_init(&priv->lock);
10917 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
10918 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
10920 mutex_init(&priv->mutex);
10921 if (pci_enable_device(pdev)) {
10922 err = -ENODEV;
10923 goto out_free_ieee80211;
10926 pci_set_master(pdev);
10928 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10929 if (!err)
10930 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
10931 if (err) {
10932 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
10933 goto out_pci_disable_device;
10936 pci_set_drvdata(pdev, priv);
10938 err = pci_request_regions(pdev, DRV_NAME);
10939 if (err)
10940 goto out_pci_disable_device;
10942 /* We disable the RETRY_TIMEOUT register (0x41) to keep
10943 * PCI Tx retries from interfering with C3 CPU state */
10944 pci_read_config_dword(pdev, 0x40, &val);
10945 if ((val & 0x0000ff00) != 0)
10946 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
10948 length = pci_resource_len(pdev, 0);
10949 priv->hw_len = length;
10951 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
10952 if (!base) {
10953 err = -ENODEV;
10954 goto out_pci_release_regions;
10957 priv->hw_base = base;
10958 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
10959 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
10961 err = ipw_setup_deferred_work(priv);
10962 if (err) {
10963 IPW_ERROR("Unable to setup deferred work\n");
10964 goto out_iounmap;
10967 ipw_sw_reset(priv, 1);
10969 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
10970 if (err) {
10971 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
10972 goto out_destroy_workqueue;
10975 SET_MODULE_OWNER(net_dev);
10976 SET_NETDEV_DEV(net_dev, &pdev->dev);
10978 mutex_lock(&priv->mutex);
10980 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
10981 priv->ieee->set_security = shim__set_security;
10982 priv->ieee->is_queue_full = ipw_net_is_queue_full;
10984 #ifdef CONFIG_IPW_QOS
10985 priv->ieee->is_qos_active = ipw_is_qos_active;
10986 priv->ieee->handle_probe_response = ipw_handle_beacon;
10987 priv->ieee->handle_beacon = ipw_handle_probe_response;
10988 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
10989 #endif /* CONFIG_IPW_QOS */
10991 priv->ieee->perfect_rssi = -20;
10992 priv->ieee->worst_rssi = -85;
10994 net_dev->open = ipw_net_open;
10995 net_dev->stop = ipw_net_stop;
10996 net_dev->init = ipw_net_init;
10997 net_dev->get_stats = ipw_net_get_stats;
10998 net_dev->set_multicast_list = ipw_net_set_multicast_list;
10999 net_dev->set_mac_address = ipw_net_set_mac_address;
11000 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11001 net_dev->wireless_data = &priv->wireless_data;
11002 net_dev->wireless_handlers = &ipw_wx_handler_def;
11003 net_dev->ethtool_ops = &ipw_ethtool_ops;
11004 net_dev->irq = pdev->irq;
11005 net_dev->base_addr = (unsigned long)priv->hw_base;
11006 net_dev->mem_start = pci_resource_start(pdev, 0);
11007 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11009 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11010 if (err) {
11011 IPW_ERROR("failed to create sysfs device attributes\n");
11012 mutex_unlock(&priv->mutex);
11013 goto out_release_irq;
11016 mutex_unlock(&priv->mutex);
11017 err = register_netdev(net_dev);
11018 if (err) {
11019 IPW_ERROR("failed to register network device\n");
11020 goto out_remove_sysfs;
11023 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11024 "channels, %d 802.11a channels)\n",
11025 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11026 priv->ieee->geo.a_channels);
11028 return 0;
11030 out_remove_sysfs:
11031 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11032 out_release_irq:
11033 free_irq(pdev->irq, priv);
11034 out_destroy_workqueue:
11035 destroy_workqueue(priv->workqueue);
11036 priv->workqueue = NULL;
11037 out_iounmap:
11038 iounmap(priv->hw_base);
11039 out_pci_release_regions:
11040 pci_release_regions(pdev);
11041 out_pci_disable_device:
11042 pci_disable_device(pdev);
11043 pci_set_drvdata(pdev, NULL);
11044 out_free_ieee80211:
11045 free_ieee80211(priv->net_dev);
11046 out:
11047 return err;
11050 static void ipw_pci_remove(struct pci_dev *pdev)
11052 struct ipw_priv *priv = pci_get_drvdata(pdev);
11053 struct list_head *p, *q;
11054 int i;
11056 if (!priv)
11057 return;
11059 mutex_lock(&priv->mutex);
11061 priv->status |= STATUS_EXIT_PENDING;
11062 ipw_down(priv);
11063 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11065 mutex_unlock(&priv->mutex);
11067 unregister_netdev(priv->net_dev);
11069 if (priv->rxq) {
11070 ipw_rx_queue_free(priv, priv->rxq);
11071 priv->rxq = NULL;
11073 ipw_tx_queue_free(priv);
11075 if (priv->cmdlog) {
11076 kfree(priv->cmdlog);
11077 priv->cmdlog = NULL;
11079 /* ipw_down will ensure that there is no more pending work
11080 * in the workqueue's, so we can safely remove them now. */
11081 cancel_delayed_work(&priv->adhoc_check);
11082 cancel_delayed_work(&priv->gather_stats);
11083 cancel_delayed_work(&priv->request_scan);
11084 cancel_delayed_work(&priv->rf_kill);
11085 cancel_delayed_work(&priv->scan_check);
11086 destroy_workqueue(priv->workqueue);
11087 priv->workqueue = NULL;
11089 /* Free MAC hash list for ADHOC */
11090 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11091 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11092 list_del(p);
11093 kfree(list_entry(p, struct ipw_ibss_seq, list));
11097 if (priv->error) {
11098 ipw_free_error_log(priv->error);
11099 priv->error = NULL;
11102 free_irq(pdev->irq, priv);
11103 iounmap(priv->hw_base);
11104 pci_release_regions(pdev);
11105 pci_disable_device(pdev);
11106 pci_set_drvdata(pdev, NULL);
11107 free_ieee80211(priv->net_dev);
11108 free_firmware();
11111 #ifdef CONFIG_PM
11112 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11114 struct ipw_priv *priv = pci_get_drvdata(pdev);
11115 struct net_device *dev = priv->net_dev;
11117 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11119 /* Take down the device; powers it off, etc. */
11120 ipw_down(priv);
11122 /* Remove the PRESENT state of the device */
11123 netif_device_detach(dev);
11125 pci_save_state(pdev);
11126 pci_disable_device(pdev);
11127 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11129 return 0;
11132 static int ipw_pci_resume(struct pci_dev *pdev)
11134 struct ipw_priv *priv = pci_get_drvdata(pdev);
11135 struct net_device *dev = priv->net_dev;
11136 u32 val;
11138 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11140 pci_set_power_state(pdev, PCI_D0);
11141 pci_enable_device(pdev);
11142 pci_restore_state(pdev);
11145 * Suspend/Resume resets the PCI configuration space, so we have to
11146 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11147 * from interfering with C3 CPU state. pci_restore_state won't help
11148 * here since it only restores the first 64 bytes pci config header.
11150 pci_read_config_dword(pdev, 0x40, &val);
11151 if ((val & 0x0000ff00) != 0)
11152 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11154 /* Set the device back into the PRESENT state; this will also wake
11155 * the queue of needed */
11156 netif_device_attach(dev);
11158 /* Bring the device back up */
11159 queue_work(priv->workqueue, &priv->up);
11161 return 0;
11163 #endif
11165 /* driver initialization stuff */
11166 static struct pci_driver ipw_driver = {
11167 .name = DRV_NAME,
11168 .id_table = card_ids,
11169 .probe = ipw_pci_probe,
11170 .remove = __devexit_p(ipw_pci_remove),
11171 #ifdef CONFIG_PM
11172 .suspend = ipw_pci_suspend,
11173 .resume = ipw_pci_resume,
11174 #endif
11177 static int __init ipw_init(void)
11179 int ret;
11181 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11182 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11184 ret = pci_module_init(&ipw_driver);
11185 if (ret) {
11186 IPW_ERROR("Unable to initialize PCI module\n");
11187 return ret;
11190 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11191 if (ret) {
11192 IPW_ERROR("Unable to create driver sysfs file\n");
11193 pci_unregister_driver(&ipw_driver);
11194 return ret;
11197 return ret;
11200 static void __exit ipw_exit(void)
11202 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11203 pci_unregister_driver(&ipw_driver);
11206 module_param(disable, int, 0444);
11207 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11209 module_param(associate, int, 0444);
11210 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11212 module_param(auto_create, int, 0444);
11213 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11215 module_param(led, int, 0444);
11216 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11218 #ifdef CONFIG_IPW2200_DEBUG
11219 module_param(debug, int, 0444);
11220 MODULE_PARM_DESC(debug, "debug output mask");
11221 #endif
11223 module_param(channel, int, 0444);
11224 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11226 #ifdef CONFIG_IPW_QOS
11227 module_param(qos_enable, int, 0444);
11228 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11230 module_param(qos_burst_enable, int, 0444);
11231 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11233 module_param(qos_no_ack_mask, int, 0444);
11234 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11236 module_param(burst_duration_CCK, int, 0444);
11237 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11239 module_param(burst_duration_OFDM, int, 0444);
11240 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11241 #endif /* CONFIG_IPW_QOS */
11243 #ifdef CONFIG_IPW2200_MONITOR
11244 module_param(mode, int, 0444);
11245 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11246 #else
11247 module_param(mode, int, 0444);
11248 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11249 #endif
11251 module_param(bt_coexist, int, 0444);
11252 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11254 module_param(hwcrypto, int, 0444);
11255 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11257 module_param(cmdlog, int, 0444);
11258 MODULE_PARM_DESC(cmdlog,
11259 "allocate a ring buffer for logging firmware commands");
11261 module_param(roaming, int, 0444);
11262 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11264 module_param(antenna, int, 0444);
11265 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
11267 module_exit(ipw_exit);
11268 module_init(ipw_init);