Merge branch 'from-linus'
[linux-2.6/libata-dev.git] / drivers / net / wireless / ipw2200.c
blobed37141319ea9b8ecc64c8834b81b471198f0612
1 /******************************************************************************
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 ******************************************************************************/
33 #include "ipw2200.h"
34 #include <linux/version.h>
36 #define IPW2200_VERSION "git-1.0.10"
37 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
38 #define DRV_COPYRIGHT "Copyright(c) 2003-2005 Intel Corporation"
39 #define DRV_VERSION IPW2200_VERSION
41 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
43 MODULE_DESCRIPTION(DRV_DESCRIPTION);
44 MODULE_VERSION(DRV_VERSION);
45 MODULE_AUTHOR(DRV_COPYRIGHT);
46 MODULE_LICENSE("GPL");
48 static int cmdlog = 0;
49 static int debug = 0;
50 static int channel = 0;
51 static int mode = 0;
53 static u32 ipw_debug_level;
54 static int associate = 1;
55 static int auto_create = 1;
56 static int led = 0;
57 static int disable = 0;
58 static int bt_coexist = 0;
59 static int hwcrypto = 0;
60 static int roaming = 1;
61 static const char ipw_modes[] = {
62 'a', 'b', 'g', '?'
65 #ifdef CONFIG_IPW_QOS
66 static int qos_enable = 0;
67 static int qos_burst_enable = 0;
68 static int qos_no_ack_mask = 0;
69 static int burst_duration_CCK = 0;
70 static int burst_duration_OFDM = 0;
72 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
73 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
74 QOS_TX3_CW_MIN_OFDM},
75 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
76 QOS_TX3_CW_MAX_OFDM},
77 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
78 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
79 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
80 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
83 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
84 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
85 QOS_TX3_CW_MIN_CCK},
86 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
87 QOS_TX3_CW_MAX_CCK},
88 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
89 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
90 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
91 QOS_TX3_TXOP_LIMIT_CCK}
94 static struct ieee80211_qos_parameters def_parameters_OFDM = {
95 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
96 DEF_TX3_CW_MIN_OFDM},
97 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
98 DEF_TX3_CW_MAX_OFDM},
99 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
100 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
101 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
102 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
105 static struct ieee80211_qos_parameters def_parameters_CCK = {
106 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
107 DEF_TX3_CW_MIN_CCK},
108 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
109 DEF_TX3_CW_MAX_CCK},
110 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
111 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
112 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
113 DEF_TX3_TXOP_LIMIT_CCK}
116 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
118 static int from_priority_to_tx_queue[] = {
119 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
120 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
123 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
125 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
126 *qos_param);
127 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
128 *qos_param);
129 #endif /* CONFIG_IPW_QOS */
131 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
132 static void ipw_remove_current_network(struct ipw_priv *priv);
133 static void ipw_rx(struct ipw_priv *priv);
134 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
135 struct clx2_tx_queue *txq, int qindex);
136 static int ipw_queue_reset(struct ipw_priv *priv);
138 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
139 int len, int sync);
141 static void ipw_tx_queue_free(struct ipw_priv *);
143 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
144 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
145 static void ipw_rx_queue_replenish(void *);
146 static int ipw_up(struct ipw_priv *);
147 static void ipw_bg_up(void *);
148 static void ipw_down(struct ipw_priv *);
149 static void ipw_bg_down(void *);
150 static int ipw_config(struct ipw_priv *);
151 static int init_supported_rates(struct ipw_priv *priv,
152 struct ipw_supported_rates *prates);
153 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
154 static void ipw_send_wep_keys(struct ipw_priv *, int);
156 static int ipw_is_valid_channel(struct ieee80211_device *, u8);
157 static int ipw_channel_to_index(struct ieee80211_device *, u8);
158 static u8 ipw_freq_to_channel(struct ieee80211_device *, u32);
159 static int ipw_set_geo(struct ieee80211_device *, const struct ieee80211_geo *);
160 static const struct ieee80211_geo *ipw_get_geo(struct ieee80211_device *);
162 static int snprint_line(char *buf, size_t count,
163 const u8 * data, u32 len, u32 ofs)
165 int out, i, j, l;
166 char c;
168 out = snprintf(buf, count, "%08X", ofs);
170 for (l = 0, i = 0; i < 2; i++) {
171 out += snprintf(buf + out, count - out, " ");
172 for (j = 0; j < 8 && l < len; j++, l++)
173 out += snprintf(buf + out, count - out, "%02X ",
174 data[(i * 8 + j)]);
175 for (; j < 8; j++)
176 out += snprintf(buf + out, count - out, " ");
179 out += snprintf(buf + out, count - out, " ");
180 for (l = 0, i = 0; i < 2; i++) {
181 out += snprintf(buf + out, count - out, " ");
182 for (j = 0; j < 8 && l < len; j++, l++) {
183 c = data[(i * 8 + j)];
184 if (!isascii(c) || !isprint(c))
185 c = '.';
187 out += snprintf(buf + out, count - out, "%c", c);
190 for (; j < 8; j++)
191 out += snprintf(buf + out, count - out, " ");
194 return out;
197 static void printk_buf(int level, const u8 * data, u32 len)
199 char line[81];
200 u32 ofs = 0;
201 if (!(ipw_debug_level & level))
202 return;
204 while (len) {
205 snprint_line(line, sizeof(line), &data[ofs],
206 min(len, 16U), ofs);
207 printk(KERN_DEBUG "%s\n", line);
208 ofs += 16;
209 len -= min(len, 16U);
213 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
215 size_t out = size;
216 u32 ofs = 0;
217 int total = 0;
219 while (size && len) {
220 out = snprint_line(output, size, &data[ofs],
221 min_t(size_t, len, 16U), ofs);
223 ofs += 16;
224 output += out;
225 size -= out;
226 len -= min_t(size_t, len, 16U);
227 total += out;
229 return total;
232 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
233 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
234 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
236 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
237 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
238 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
240 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
241 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
242 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
244 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
245 __LINE__, (u32) (b), (u32) (c));
246 _ipw_write_reg8(a, b, c);
249 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
250 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
251 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
253 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
254 __LINE__, (u32) (b), (u32) (c));
255 _ipw_write_reg16(a, b, c);
258 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
259 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
260 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
262 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
263 __LINE__, (u32) (b), (u32) (c));
264 _ipw_write_reg32(a, b, c);
267 /* 8-bit direct write (low 4K) */
268 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
270 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
271 #define ipw_write8(ipw, ofs, val) \
272 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
273 _ipw_write8(ipw, ofs, val)
275 /* 16-bit direct write (low 4K) */
276 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
278 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
279 #define ipw_write16(ipw, ofs, val) \
280 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
281 _ipw_write16(ipw, ofs, val)
283 /* 32-bit direct write (low 4K) */
284 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
286 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
287 #define ipw_write32(ipw, ofs, val) \
288 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
289 _ipw_write32(ipw, ofs, val)
291 /* 8-bit direct read (low 4K) */
292 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
294 /* 8-bit direct read (low 4K), with debug wrapper */
295 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
297 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
298 return _ipw_read8(ipw, ofs);
301 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
302 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
304 /* 16-bit direct read (low 4K) */
305 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
307 /* 16-bit direct read (low 4K), with debug wrapper */
308 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
310 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
311 return _ipw_read16(ipw, ofs);
314 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
315 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
317 /* 32-bit direct read (low 4K) */
318 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
320 /* 32-bit direct read (low 4K), with debug wrapper */
321 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
323 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
324 return _ipw_read32(ipw, ofs);
327 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
328 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
330 /* multi-byte read (above 4K), with debug wrapper */
331 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
332 static inline void __ipw_read_indirect(const char *f, int l,
333 struct ipw_priv *a, u32 b, u8 * c, int d)
335 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
337 _ipw_read_indirect(a, b, c, d);
340 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
341 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
343 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
344 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
345 int num);
346 #define ipw_write_indirect(a, b, c, d) \
347 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
348 _ipw_write_indirect(a, b, c, d)
350 /* 32-bit indirect write (above 4K) */
351 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
353 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
354 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
355 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
358 /* 8-bit indirect write (above 4K) */
359 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
361 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
362 u32 dif_len = reg - aligned_addr;
364 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
365 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
366 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
369 /* 16-bit indirect write (above 4K) */
370 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
372 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
373 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
375 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
376 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
377 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
380 /* 8-bit indirect read (above 4K) */
381 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
383 u32 word;
384 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
385 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
386 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
387 return (word >> ((reg & 0x3) * 8)) & 0xff;
390 /* 32-bit indirect read (above 4K) */
391 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
393 u32 value;
395 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
397 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
398 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
399 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
400 return value;
403 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
404 /* for area above 1st 4K of SRAM/reg space */
405 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
406 int num)
408 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
409 u32 dif_len = addr - aligned_addr;
410 u32 i;
412 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
414 if (num <= 0) {
415 return;
418 /* Read the first dword (or portion) byte by byte */
419 if (unlikely(dif_len)) {
420 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
421 /* Start reading at aligned_addr + dif_len */
422 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
423 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
424 aligned_addr += 4;
427 /* Read all of the middle dwords as dwords, with auto-increment */
428 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
429 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
430 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
432 /* Read the last dword (or portion) byte by byte */
433 if (unlikely(num)) {
434 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
435 for (i = 0; num > 0; i++, num--)
436 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
440 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
441 /* for area above 1st 4K of SRAM/reg space */
442 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
443 int num)
445 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
446 u32 dif_len = addr - aligned_addr;
447 u32 i;
449 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
451 if (num <= 0) {
452 return;
455 /* Write the first dword (or portion) byte by byte */
456 if (unlikely(dif_len)) {
457 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
458 /* Start writing at aligned_addr + dif_len */
459 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
460 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
461 aligned_addr += 4;
464 /* Write all of the middle dwords as dwords, with auto-increment */
465 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
466 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
467 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
469 /* Write the last dword (or portion) byte by byte */
470 if (unlikely(num)) {
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472 for (i = 0; num > 0; i++, num--, buf++)
473 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
477 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
478 /* for 1st 4K of SRAM/regs space */
479 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
480 int num)
482 memcpy_toio((priv->hw_base + addr), buf, num);
485 /* Set bit(s) in low 4K of SRAM/regs */
486 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
488 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
491 /* Clear bit(s) in low 4K of SRAM/regs */
492 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
494 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
497 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
499 if (priv->status & STATUS_INT_ENABLED)
500 return;
501 priv->status |= STATUS_INT_ENABLED;
502 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
505 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
507 if (!(priv->status & STATUS_INT_ENABLED))
508 return;
509 priv->status &= ~STATUS_INT_ENABLED;
510 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
513 #ifdef CONFIG_IPW2200_DEBUG
514 static char *ipw_error_desc(u32 val)
516 switch (val) {
517 case IPW_FW_ERROR_OK:
518 return "ERROR_OK";
519 case IPW_FW_ERROR_FAIL:
520 return "ERROR_FAIL";
521 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
522 return "MEMORY_UNDERFLOW";
523 case IPW_FW_ERROR_MEMORY_OVERFLOW:
524 return "MEMORY_OVERFLOW";
525 case IPW_FW_ERROR_BAD_PARAM:
526 return "BAD_PARAM";
527 case IPW_FW_ERROR_BAD_CHECKSUM:
528 return "BAD_CHECKSUM";
529 case IPW_FW_ERROR_NMI_INTERRUPT:
530 return "NMI_INTERRUPT";
531 case IPW_FW_ERROR_BAD_DATABASE:
532 return "BAD_DATABASE";
533 case IPW_FW_ERROR_ALLOC_FAIL:
534 return "ALLOC_FAIL";
535 case IPW_FW_ERROR_DMA_UNDERRUN:
536 return "DMA_UNDERRUN";
537 case IPW_FW_ERROR_DMA_STATUS:
538 return "DMA_STATUS";
539 case IPW_FW_ERROR_DINO_ERROR:
540 return "DINO_ERROR";
541 case IPW_FW_ERROR_EEPROM_ERROR:
542 return "EEPROM_ERROR";
543 case IPW_FW_ERROR_SYSASSERT:
544 return "SYSASSERT";
545 case IPW_FW_ERROR_FATAL_ERROR:
546 return "FATAL_ERROR";
547 default:
548 return "UNKNOWN_ERROR";
552 static void ipw_dump_error_log(struct ipw_priv *priv,
553 struct ipw_fw_error *error)
555 u32 i;
557 if (!error) {
558 IPW_ERROR("Error allocating and capturing error log. "
559 "Nothing to dump.\n");
560 return;
563 IPW_ERROR("Start IPW Error Log Dump:\n");
564 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
565 error->status, error->config);
567 for (i = 0; i < error->elem_len; i++)
568 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
569 ipw_error_desc(error->elem[i].desc),
570 error->elem[i].time,
571 error->elem[i].blink1,
572 error->elem[i].blink2,
573 error->elem[i].link1,
574 error->elem[i].link2, error->elem[i].data);
575 for (i = 0; i < error->log_len; i++)
576 IPW_ERROR("%i\t0x%08x\t%i\n",
577 error->log[i].time,
578 error->log[i].data, error->log[i].event);
580 #endif
582 static inline int ipw_is_init(struct ipw_priv *priv)
584 return (priv->status & STATUS_INIT) ? 1 : 0;
587 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
589 u32 addr, field_info, field_len, field_count, total_len;
591 IPW_DEBUG_ORD("ordinal = %i\n", ord);
593 if (!priv || !val || !len) {
594 IPW_DEBUG_ORD("Invalid argument\n");
595 return -EINVAL;
598 /* verify device ordinal tables have been initialized */
599 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
600 IPW_DEBUG_ORD("Access ordinals before initialization\n");
601 return -EINVAL;
604 switch (IPW_ORD_TABLE_ID_MASK & ord) {
605 case IPW_ORD_TABLE_0_MASK:
607 * TABLE 0: Direct access to a table of 32 bit values
609 * This is a very simple table with the data directly
610 * read from the table
613 /* remove the table id from the ordinal */
614 ord &= IPW_ORD_TABLE_VALUE_MASK;
616 /* boundary check */
617 if (ord > priv->table0_len) {
618 IPW_DEBUG_ORD("ordinal value (%i) longer then "
619 "max (%i)\n", ord, priv->table0_len);
620 return -EINVAL;
623 /* verify we have enough room to store the value */
624 if (*len < sizeof(u32)) {
625 IPW_DEBUG_ORD("ordinal buffer length too small, "
626 "need %zd\n", sizeof(u32));
627 return -EINVAL;
630 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
631 ord, priv->table0_addr + (ord << 2));
633 *len = sizeof(u32);
634 ord <<= 2;
635 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
636 break;
638 case IPW_ORD_TABLE_1_MASK:
640 * TABLE 1: Indirect access to a table of 32 bit values
642 * This is a fairly large table of u32 values each
643 * representing starting addr for the data (which is
644 * also a u32)
647 /* remove the table id from the ordinal */
648 ord &= IPW_ORD_TABLE_VALUE_MASK;
650 /* boundary check */
651 if (ord > priv->table1_len) {
652 IPW_DEBUG_ORD("ordinal value too long\n");
653 return -EINVAL;
656 /* verify we have enough room to store the value */
657 if (*len < sizeof(u32)) {
658 IPW_DEBUG_ORD("ordinal buffer length too small, "
659 "need %zd\n", sizeof(u32));
660 return -EINVAL;
663 *((u32 *) val) =
664 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
665 *len = sizeof(u32);
666 break;
668 case IPW_ORD_TABLE_2_MASK:
670 * TABLE 2: Indirect access to a table of variable sized values
672 * This table consist of six values, each containing
673 * - dword containing the starting offset of the data
674 * - dword containing the lengh in the first 16bits
675 * and the count in the second 16bits
678 /* remove the table id from the ordinal */
679 ord &= IPW_ORD_TABLE_VALUE_MASK;
681 /* boundary check */
682 if (ord > priv->table2_len) {
683 IPW_DEBUG_ORD("ordinal value too long\n");
684 return -EINVAL;
687 /* get the address of statistic */
688 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
690 /* get the second DW of statistics ;
691 * two 16-bit words - first is length, second is count */
692 field_info =
693 ipw_read_reg32(priv,
694 priv->table2_addr + (ord << 3) +
695 sizeof(u32));
697 /* get each entry length */
698 field_len = *((u16 *) & field_info);
700 /* get number of entries */
701 field_count = *(((u16 *) & field_info) + 1);
703 /* abort if not enought memory */
704 total_len = field_len * field_count;
705 if (total_len > *len) {
706 *len = total_len;
707 return -EINVAL;
710 *len = total_len;
711 if (!total_len)
712 return 0;
714 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
715 "field_info = 0x%08x\n",
716 addr, total_len, field_info);
717 ipw_read_indirect(priv, addr, val, total_len);
718 break;
720 default:
721 IPW_DEBUG_ORD("Invalid ordinal!\n");
722 return -EINVAL;
726 return 0;
729 static void ipw_init_ordinals(struct ipw_priv *priv)
731 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
732 priv->table0_len = ipw_read32(priv, priv->table0_addr);
734 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
735 priv->table0_addr, priv->table0_len);
737 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
738 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
740 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
741 priv->table1_addr, priv->table1_len);
743 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
744 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
745 priv->table2_len &= 0x0000ffff; /* use first two bytes */
747 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
748 priv->table2_addr, priv->table2_len);
752 static u32 ipw_register_toggle(u32 reg)
754 reg &= ~IPW_START_STANDBY;
755 if (reg & IPW_GATE_ODMA)
756 reg &= ~IPW_GATE_ODMA;
757 if (reg & IPW_GATE_IDMA)
758 reg &= ~IPW_GATE_IDMA;
759 if (reg & IPW_GATE_ADMA)
760 reg &= ~IPW_GATE_ADMA;
761 return reg;
765 * LED behavior:
766 * - On radio ON, turn on any LEDs that require to be on during start
767 * - On initialization, start unassociated blink
768 * - On association, disable unassociated blink
769 * - On disassociation, start unassociated blink
770 * - On radio OFF, turn off any LEDs started during radio on
773 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
774 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
775 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
777 static void ipw_led_link_on(struct ipw_priv *priv)
779 unsigned long flags;
780 u32 led;
782 /* If configured to not use LEDs, or nic_type is 1,
783 * then we don't toggle a LINK led */
784 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
785 return;
787 spin_lock_irqsave(&priv->lock, flags);
789 if (!(priv->status & STATUS_RF_KILL_MASK) &&
790 !(priv->status & STATUS_LED_LINK_ON)) {
791 IPW_DEBUG_LED("Link LED On\n");
792 led = ipw_read_reg32(priv, IPW_EVENT_REG);
793 led |= priv->led_association_on;
795 led = ipw_register_toggle(led);
797 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
798 ipw_write_reg32(priv, IPW_EVENT_REG, led);
800 priv->status |= STATUS_LED_LINK_ON;
802 /* If we aren't associated, schedule turning the LED off */
803 if (!(priv->status & STATUS_ASSOCIATED))
804 queue_delayed_work(priv->workqueue,
805 &priv->led_link_off,
806 LD_TIME_LINK_ON);
809 spin_unlock_irqrestore(&priv->lock, flags);
812 static void ipw_bg_led_link_on(void *data)
814 struct ipw_priv *priv = data;
815 mutex_lock(&priv->mutex);
816 ipw_led_link_on(data);
817 mutex_unlock(&priv->mutex);
820 static void ipw_led_link_off(struct ipw_priv *priv)
822 unsigned long flags;
823 u32 led;
825 /* If configured not to use LEDs, or nic type is 1,
826 * then we don't goggle the LINK led. */
827 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
828 return;
830 spin_lock_irqsave(&priv->lock, flags);
832 if (priv->status & STATUS_LED_LINK_ON) {
833 led = ipw_read_reg32(priv, IPW_EVENT_REG);
834 led &= priv->led_association_off;
835 led = ipw_register_toggle(led);
837 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
838 ipw_write_reg32(priv, IPW_EVENT_REG, led);
840 IPW_DEBUG_LED("Link LED Off\n");
842 priv->status &= ~STATUS_LED_LINK_ON;
844 /* If we aren't associated and the radio is on, schedule
845 * turning the LED on (blink while unassociated) */
846 if (!(priv->status & STATUS_RF_KILL_MASK) &&
847 !(priv->status & STATUS_ASSOCIATED))
848 queue_delayed_work(priv->workqueue, &priv->led_link_on,
849 LD_TIME_LINK_OFF);
853 spin_unlock_irqrestore(&priv->lock, flags);
856 static void ipw_bg_led_link_off(void *data)
858 struct ipw_priv *priv = data;
859 mutex_lock(&priv->mutex);
860 ipw_led_link_off(data);
861 mutex_unlock(&priv->mutex);
864 static void __ipw_led_activity_on(struct ipw_priv *priv)
866 u32 led;
868 if (priv->config & CFG_NO_LED)
869 return;
871 if (priv->status & STATUS_RF_KILL_MASK)
872 return;
874 if (!(priv->status & STATUS_LED_ACT_ON)) {
875 led = ipw_read_reg32(priv, IPW_EVENT_REG);
876 led |= priv->led_activity_on;
878 led = ipw_register_toggle(led);
880 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
881 ipw_write_reg32(priv, IPW_EVENT_REG, led);
883 IPW_DEBUG_LED("Activity LED On\n");
885 priv->status |= STATUS_LED_ACT_ON;
887 cancel_delayed_work(&priv->led_act_off);
888 queue_delayed_work(priv->workqueue, &priv->led_act_off,
889 LD_TIME_ACT_ON);
890 } else {
891 /* Reschedule LED off for full time period */
892 cancel_delayed_work(&priv->led_act_off);
893 queue_delayed_work(priv->workqueue, &priv->led_act_off,
894 LD_TIME_ACT_ON);
898 #if 0
899 void ipw_led_activity_on(struct ipw_priv *priv)
901 unsigned long flags;
902 spin_lock_irqsave(&priv->lock, flags);
903 __ipw_led_activity_on(priv);
904 spin_unlock_irqrestore(&priv->lock, flags);
906 #endif /* 0 */
908 static void ipw_led_activity_off(struct ipw_priv *priv)
910 unsigned long flags;
911 u32 led;
913 if (priv->config & CFG_NO_LED)
914 return;
916 spin_lock_irqsave(&priv->lock, flags);
918 if (priv->status & STATUS_LED_ACT_ON) {
919 led = ipw_read_reg32(priv, IPW_EVENT_REG);
920 led &= priv->led_activity_off;
922 led = ipw_register_toggle(led);
924 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
925 ipw_write_reg32(priv, IPW_EVENT_REG, led);
927 IPW_DEBUG_LED("Activity LED Off\n");
929 priv->status &= ~STATUS_LED_ACT_ON;
932 spin_unlock_irqrestore(&priv->lock, flags);
935 static void ipw_bg_led_activity_off(void *data)
937 struct ipw_priv *priv = data;
938 mutex_lock(&priv->mutex);
939 ipw_led_activity_off(data);
940 mutex_unlock(&priv->mutex);
943 static void ipw_led_band_on(struct ipw_priv *priv)
945 unsigned long flags;
946 u32 led;
948 /* Only nic type 1 supports mode LEDs */
949 if (priv->config & CFG_NO_LED ||
950 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
951 return;
953 spin_lock_irqsave(&priv->lock, flags);
955 led = ipw_read_reg32(priv, IPW_EVENT_REG);
956 if (priv->assoc_network->mode == IEEE_A) {
957 led |= priv->led_ofdm_on;
958 led &= priv->led_association_off;
959 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
960 } else if (priv->assoc_network->mode == IEEE_G) {
961 led |= priv->led_ofdm_on;
962 led |= priv->led_association_on;
963 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
964 } else {
965 led &= priv->led_ofdm_off;
966 led |= priv->led_association_on;
967 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
970 led = ipw_register_toggle(led);
972 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
973 ipw_write_reg32(priv, IPW_EVENT_REG, led);
975 spin_unlock_irqrestore(&priv->lock, flags);
978 static void ipw_led_band_off(struct ipw_priv *priv)
980 unsigned long flags;
981 u32 led;
983 /* Only nic type 1 supports mode LEDs */
984 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
985 return;
987 spin_lock_irqsave(&priv->lock, flags);
989 led = ipw_read_reg32(priv, IPW_EVENT_REG);
990 led &= priv->led_ofdm_off;
991 led &= priv->led_association_off;
993 led = ipw_register_toggle(led);
995 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
996 ipw_write_reg32(priv, IPW_EVENT_REG, led);
998 spin_unlock_irqrestore(&priv->lock, flags);
1001 static void ipw_led_radio_on(struct ipw_priv *priv)
1003 ipw_led_link_on(priv);
1006 static void ipw_led_radio_off(struct ipw_priv *priv)
1008 ipw_led_activity_off(priv);
1009 ipw_led_link_off(priv);
1012 static void ipw_led_link_up(struct ipw_priv *priv)
1014 /* Set the Link Led on for all nic types */
1015 ipw_led_link_on(priv);
1018 static void ipw_led_link_down(struct ipw_priv *priv)
1020 ipw_led_activity_off(priv);
1021 ipw_led_link_off(priv);
1023 if (priv->status & STATUS_RF_KILL_MASK)
1024 ipw_led_radio_off(priv);
1027 static void ipw_led_init(struct ipw_priv *priv)
1029 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1031 /* Set the default PINs for the link and activity leds */
1032 priv->led_activity_on = IPW_ACTIVITY_LED;
1033 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1035 priv->led_association_on = IPW_ASSOCIATED_LED;
1036 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1038 /* Set the default PINs for the OFDM leds */
1039 priv->led_ofdm_on = IPW_OFDM_LED;
1040 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1042 switch (priv->nic_type) {
1043 case EEPROM_NIC_TYPE_1:
1044 /* In this NIC type, the LEDs are reversed.... */
1045 priv->led_activity_on = IPW_ASSOCIATED_LED;
1046 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1047 priv->led_association_on = IPW_ACTIVITY_LED;
1048 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1050 if (!(priv->config & CFG_NO_LED))
1051 ipw_led_band_on(priv);
1053 /* And we don't blink link LEDs for this nic, so
1054 * just return here */
1055 return;
1057 case EEPROM_NIC_TYPE_3:
1058 case EEPROM_NIC_TYPE_2:
1059 case EEPROM_NIC_TYPE_4:
1060 case EEPROM_NIC_TYPE_0:
1061 break;
1063 default:
1064 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1065 priv->nic_type);
1066 priv->nic_type = EEPROM_NIC_TYPE_0;
1067 break;
1070 if (!(priv->config & CFG_NO_LED)) {
1071 if (priv->status & STATUS_ASSOCIATED)
1072 ipw_led_link_on(priv);
1073 else
1074 ipw_led_link_off(priv);
1078 static void ipw_led_shutdown(struct ipw_priv *priv)
1080 ipw_led_activity_off(priv);
1081 ipw_led_link_off(priv);
1082 ipw_led_band_off(priv);
1083 cancel_delayed_work(&priv->led_link_on);
1084 cancel_delayed_work(&priv->led_link_off);
1085 cancel_delayed_work(&priv->led_act_off);
1089 * The following adds a new attribute to the sysfs representation
1090 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1091 * used for controling the debug level.
1093 * See the level definitions in ipw for details.
1095 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1097 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1100 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1101 size_t count)
1103 char *p = (char *)buf;
1104 u32 val;
1106 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1107 p++;
1108 if (p[0] == 'x' || p[0] == 'X')
1109 p++;
1110 val = simple_strtoul(p, &p, 16);
1111 } else
1112 val = simple_strtoul(p, &p, 10);
1113 if (p == buf)
1114 printk(KERN_INFO DRV_NAME
1115 ": %s is not in hex or decimal form.\n", buf);
1116 else
1117 ipw_debug_level = val;
1119 return strnlen(buf, count);
1122 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1123 show_debug_level, store_debug_level);
1125 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1127 /* length = 1st dword in log */
1128 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1131 static void ipw_capture_event_log(struct ipw_priv *priv,
1132 u32 log_len, struct ipw_event *log)
1134 u32 base;
1136 if (log_len) {
1137 base = ipw_read32(priv, IPW_EVENT_LOG);
1138 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1139 (u8 *) log, sizeof(*log) * log_len);
1143 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1145 struct ipw_fw_error *error;
1146 u32 log_len = ipw_get_event_log_len(priv);
1147 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1148 u32 elem_len = ipw_read_reg32(priv, base);
1150 error = kmalloc(sizeof(*error) +
1151 sizeof(*error->elem) * elem_len +
1152 sizeof(*error->log) * log_len, GFP_ATOMIC);
1153 if (!error) {
1154 IPW_ERROR("Memory allocation for firmware error log "
1155 "failed.\n");
1156 return NULL;
1158 error->jiffies = jiffies;
1159 error->status = priv->status;
1160 error->config = priv->config;
1161 error->elem_len = elem_len;
1162 error->log_len = log_len;
1163 error->elem = (struct ipw_error_elem *)error->payload;
1164 error->log = (struct ipw_event *)(error->elem + elem_len);
1166 ipw_capture_event_log(priv, log_len, error->log);
1168 if (elem_len)
1169 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1170 sizeof(*error->elem) * elem_len);
1172 return error;
1175 static void ipw_free_error_log(struct ipw_fw_error *error)
1177 if (error)
1178 kfree(error);
1181 static ssize_t show_event_log(struct device *d,
1182 struct device_attribute *attr, char *buf)
1184 struct ipw_priv *priv = dev_get_drvdata(d);
1185 u32 log_len = ipw_get_event_log_len(priv);
1186 struct ipw_event log[log_len];
1187 u32 len = 0, i;
1189 ipw_capture_event_log(priv, log_len, log);
1191 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1192 for (i = 0; i < log_len; i++)
1193 len += snprintf(buf + len, PAGE_SIZE - len,
1194 "\n%08X%08X%08X",
1195 log[i].time, log[i].event, log[i].data);
1196 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1197 return len;
1200 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1202 static ssize_t show_error(struct device *d,
1203 struct device_attribute *attr, char *buf)
1205 struct ipw_priv *priv = dev_get_drvdata(d);
1206 u32 len = 0, i;
1207 if (!priv->error)
1208 return 0;
1209 len += snprintf(buf + len, PAGE_SIZE - len,
1210 "%08lX%08X%08X%08X",
1211 priv->error->jiffies,
1212 priv->error->status,
1213 priv->error->config, priv->error->elem_len);
1214 for (i = 0; i < priv->error->elem_len; i++)
1215 len += snprintf(buf + len, PAGE_SIZE - len,
1216 "\n%08X%08X%08X%08X%08X%08X%08X",
1217 priv->error->elem[i].time,
1218 priv->error->elem[i].desc,
1219 priv->error->elem[i].blink1,
1220 priv->error->elem[i].blink2,
1221 priv->error->elem[i].link1,
1222 priv->error->elem[i].link2,
1223 priv->error->elem[i].data);
1225 len += snprintf(buf + len, PAGE_SIZE - len,
1226 "\n%08X", priv->error->log_len);
1227 for (i = 0; i < priv->error->log_len; i++)
1228 len += snprintf(buf + len, PAGE_SIZE - len,
1229 "\n%08X%08X%08X",
1230 priv->error->log[i].time,
1231 priv->error->log[i].event,
1232 priv->error->log[i].data);
1233 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1234 return len;
1237 static ssize_t clear_error(struct device *d,
1238 struct device_attribute *attr,
1239 const char *buf, size_t count)
1241 struct ipw_priv *priv = dev_get_drvdata(d);
1242 if (priv->error) {
1243 ipw_free_error_log(priv->error);
1244 priv->error = NULL;
1246 return count;
1249 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1251 static ssize_t show_cmd_log(struct device *d,
1252 struct device_attribute *attr, char *buf)
1254 struct ipw_priv *priv = dev_get_drvdata(d);
1255 u32 len = 0, i;
1256 if (!priv->cmdlog)
1257 return 0;
1258 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1259 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1260 i = (i + 1) % priv->cmdlog_len) {
1261 len +=
1262 snprintf(buf + len, PAGE_SIZE - len,
1263 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1264 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1265 priv->cmdlog[i].cmd.len);
1266 len +=
1267 snprintk_buf(buf + len, PAGE_SIZE - len,
1268 (u8 *) priv->cmdlog[i].cmd.param,
1269 priv->cmdlog[i].cmd.len);
1270 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1272 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1273 return len;
1276 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1278 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1279 char *buf)
1281 struct ipw_priv *priv = dev_get_drvdata(d);
1282 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1285 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1286 const char *buf, size_t count)
1288 struct ipw_priv *priv = dev_get_drvdata(d);
1289 #ifdef CONFIG_IPW2200_DEBUG
1290 struct net_device *dev = priv->net_dev;
1291 #endif
1292 char buffer[] = "00000000";
1293 unsigned long len =
1294 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1295 unsigned long val;
1296 char *p = buffer;
1298 IPW_DEBUG_INFO("enter\n");
1300 strncpy(buffer, buf, len);
1301 buffer[len] = 0;
1303 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1304 p++;
1305 if (p[0] == 'x' || p[0] == 'X')
1306 p++;
1307 val = simple_strtoul(p, &p, 16);
1308 } else
1309 val = simple_strtoul(p, &p, 10);
1310 if (p == buffer) {
1311 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1312 } else {
1313 priv->ieee->scan_age = val;
1314 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1317 IPW_DEBUG_INFO("exit\n");
1318 return len;
1321 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1323 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1324 char *buf)
1326 struct ipw_priv *priv = dev_get_drvdata(d);
1327 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1330 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1331 const char *buf, size_t count)
1333 struct ipw_priv *priv = dev_get_drvdata(d);
1335 IPW_DEBUG_INFO("enter\n");
1337 if (count == 0)
1338 return 0;
1340 if (*buf == 0) {
1341 IPW_DEBUG_LED("Disabling LED control.\n");
1342 priv->config |= CFG_NO_LED;
1343 ipw_led_shutdown(priv);
1344 } else {
1345 IPW_DEBUG_LED("Enabling LED control.\n");
1346 priv->config &= ~CFG_NO_LED;
1347 ipw_led_init(priv);
1350 IPW_DEBUG_INFO("exit\n");
1351 return count;
1354 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1356 static ssize_t show_status(struct device *d,
1357 struct device_attribute *attr, char *buf)
1359 struct ipw_priv *p = d->driver_data;
1360 return sprintf(buf, "0x%08x\n", (int)p->status);
1363 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1365 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1366 char *buf)
1368 struct ipw_priv *p = d->driver_data;
1369 return sprintf(buf, "0x%08x\n", (int)p->config);
1372 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1374 static ssize_t show_nic_type(struct device *d,
1375 struct device_attribute *attr, char *buf)
1377 struct ipw_priv *priv = d->driver_data;
1378 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1381 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1383 static ssize_t show_ucode_version(struct device *d,
1384 struct device_attribute *attr, char *buf)
1386 u32 len = sizeof(u32), tmp = 0;
1387 struct ipw_priv *p = d->driver_data;
1389 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1390 return 0;
1392 return sprintf(buf, "0x%08x\n", tmp);
1395 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1397 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1398 char *buf)
1400 u32 len = sizeof(u32), tmp = 0;
1401 struct ipw_priv *p = d->driver_data;
1403 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1404 return 0;
1406 return sprintf(buf, "0x%08x\n", tmp);
1409 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1412 * Add a device attribute to view/control the delay between eeprom
1413 * operations.
1415 static ssize_t show_eeprom_delay(struct device *d,
1416 struct device_attribute *attr, char *buf)
1418 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1419 return sprintf(buf, "%i\n", n);
1421 static ssize_t store_eeprom_delay(struct device *d,
1422 struct device_attribute *attr,
1423 const char *buf, size_t count)
1425 struct ipw_priv *p = d->driver_data;
1426 sscanf(buf, "%i", &p->eeprom_delay);
1427 return strnlen(buf, count);
1430 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1431 show_eeprom_delay, store_eeprom_delay);
1433 static ssize_t show_command_event_reg(struct device *d,
1434 struct device_attribute *attr, char *buf)
1436 u32 reg = 0;
1437 struct ipw_priv *p = d->driver_data;
1439 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1440 return sprintf(buf, "0x%08x\n", reg);
1442 static ssize_t store_command_event_reg(struct device *d,
1443 struct device_attribute *attr,
1444 const char *buf, size_t count)
1446 u32 reg;
1447 struct ipw_priv *p = d->driver_data;
1449 sscanf(buf, "%x", &reg);
1450 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1451 return strnlen(buf, count);
1454 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1455 show_command_event_reg, store_command_event_reg);
1457 static ssize_t show_mem_gpio_reg(struct device *d,
1458 struct device_attribute *attr, char *buf)
1460 u32 reg = 0;
1461 struct ipw_priv *p = d->driver_data;
1463 reg = ipw_read_reg32(p, 0x301100);
1464 return sprintf(buf, "0x%08x\n", reg);
1466 static ssize_t store_mem_gpio_reg(struct device *d,
1467 struct device_attribute *attr,
1468 const char *buf, size_t count)
1470 u32 reg;
1471 struct ipw_priv *p = d->driver_data;
1473 sscanf(buf, "%x", &reg);
1474 ipw_write_reg32(p, 0x301100, reg);
1475 return strnlen(buf, count);
1478 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1479 show_mem_gpio_reg, store_mem_gpio_reg);
1481 static ssize_t show_indirect_dword(struct device *d,
1482 struct device_attribute *attr, char *buf)
1484 u32 reg = 0;
1485 struct ipw_priv *priv = d->driver_data;
1487 if (priv->status & STATUS_INDIRECT_DWORD)
1488 reg = ipw_read_reg32(priv, priv->indirect_dword);
1489 else
1490 reg = 0;
1492 return sprintf(buf, "0x%08x\n", reg);
1494 static ssize_t store_indirect_dword(struct device *d,
1495 struct device_attribute *attr,
1496 const char *buf, size_t count)
1498 struct ipw_priv *priv = d->driver_data;
1500 sscanf(buf, "%x", &priv->indirect_dword);
1501 priv->status |= STATUS_INDIRECT_DWORD;
1502 return strnlen(buf, count);
1505 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1506 show_indirect_dword, store_indirect_dword);
1508 static ssize_t show_indirect_byte(struct device *d,
1509 struct device_attribute *attr, char *buf)
1511 u8 reg = 0;
1512 struct ipw_priv *priv = d->driver_data;
1514 if (priv->status & STATUS_INDIRECT_BYTE)
1515 reg = ipw_read_reg8(priv, priv->indirect_byte);
1516 else
1517 reg = 0;
1519 return sprintf(buf, "0x%02x\n", reg);
1521 static ssize_t store_indirect_byte(struct device *d,
1522 struct device_attribute *attr,
1523 const char *buf, size_t count)
1525 struct ipw_priv *priv = d->driver_data;
1527 sscanf(buf, "%x", &priv->indirect_byte);
1528 priv->status |= STATUS_INDIRECT_BYTE;
1529 return strnlen(buf, count);
1532 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1533 show_indirect_byte, store_indirect_byte);
1535 static ssize_t show_direct_dword(struct device *d,
1536 struct device_attribute *attr, char *buf)
1538 u32 reg = 0;
1539 struct ipw_priv *priv = d->driver_data;
1541 if (priv->status & STATUS_DIRECT_DWORD)
1542 reg = ipw_read32(priv, priv->direct_dword);
1543 else
1544 reg = 0;
1546 return sprintf(buf, "0x%08x\n", reg);
1548 static ssize_t store_direct_dword(struct device *d,
1549 struct device_attribute *attr,
1550 const char *buf, size_t count)
1552 struct ipw_priv *priv = d->driver_data;
1554 sscanf(buf, "%x", &priv->direct_dword);
1555 priv->status |= STATUS_DIRECT_DWORD;
1556 return strnlen(buf, count);
1559 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1560 show_direct_dword, store_direct_dword);
1562 static int rf_kill_active(struct ipw_priv *priv)
1564 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1565 priv->status |= STATUS_RF_KILL_HW;
1566 else
1567 priv->status &= ~STATUS_RF_KILL_HW;
1569 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1572 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1573 char *buf)
1575 /* 0 - RF kill not enabled
1576 1 - SW based RF kill active (sysfs)
1577 2 - HW based RF kill active
1578 3 - Both HW and SW baed RF kill active */
1579 struct ipw_priv *priv = d->driver_data;
1580 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1581 (rf_kill_active(priv) ? 0x2 : 0x0);
1582 return sprintf(buf, "%i\n", val);
1585 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1587 if ((disable_radio ? 1 : 0) ==
1588 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1589 return 0;
1591 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1592 disable_radio ? "OFF" : "ON");
1594 if (disable_radio) {
1595 priv->status |= STATUS_RF_KILL_SW;
1597 if (priv->workqueue)
1598 cancel_delayed_work(&priv->request_scan);
1599 queue_work(priv->workqueue, &priv->down);
1600 } else {
1601 priv->status &= ~STATUS_RF_KILL_SW;
1602 if (rf_kill_active(priv)) {
1603 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1604 "disabled by HW switch\n");
1605 /* Make sure the RF_KILL check timer is running */
1606 cancel_delayed_work(&priv->rf_kill);
1607 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1608 2 * HZ);
1609 } else
1610 queue_work(priv->workqueue, &priv->up);
1613 return 1;
1616 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1617 const char *buf, size_t count)
1619 struct ipw_priv *priv = d->driver_data;
1621 ipw_radio_kill_sw(priv, buf[0] == '1');
1623 return count;
1626 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1628 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1629 char *buf)
1631 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1632 int pos = 0, len = 0;
1633 if (priv->config & CFG_SPEED_SCAN) {
1634 while (priv->speed_scan[pos] != 0)
1635 len += sprintf(&buf[len], "%d ",
1636 priv->speed_scan[pos++]);
1637 return len + sprintf(&buf[len], "\n");
1640 return sprintf(buf, "0\n");
1643 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1644 const char *buf, size_t count)
1646 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1647 int channel, pos = 0;
1648 const char *p = buf;
1650 /* list of space separated channels to scan, optionally ending with 0 */
1651 while ((channel = simple_strtol(p, NULL, 0))) {
1652 if (pos == MAX_SPEED_SCAN - 1) {
1653 priv->speed_scan[pos] = 0;
1654 break;
1657 if (ipw_is_valid_channel(priv->ieee, channel))
1658 priv->speed_scan[pos++] = channel;
1659 else
1660 IPW_WARNING("Skipping invalid channel request: %d\n",
1661 channel);
1662 p = strchr(p, ' ');
1663 if (!p)
1664 break;
1665 while (*p == ' ' || *p == '\t')
1666 p++;
1669 if (pos == 0)
1670 priv->config &= ~CFG_SPEED_SCAN;
1671 else {
1672 priv->speed_scan_pos = 0;
1673 priv->config |= CFG_SPEED_SCAN;
1676 return count;
1679 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1680 store_speed_scan);
1682 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1683 char *buf)
1685 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1686 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1689 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1690 const char *buf, size_t count)
1692 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1693 if (buf[0] == '1')
1694 priv->config |= CFG_NET_STATS;
1695 else
1696 priv->config &= ~CFG_NET_STATS;
1698 return count;
1701 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1702 show_net_stats, store_net_stats);
1704 static void notify_wx_assoc_event(struct ipw_priv *priv)
1706 union iwreq_data wrqu;
1707 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1708 if (priv->status & STATUS_ASSOCIATED)
1709 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1710 else
1711 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1712 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1715 static void ipw_irq_tasklet(struct ipw_priv *priv)
1717 u32 inta, inta_mask, handled = 0;
1718 unsigned long flags;
1719 int rc = 0;
1721 spin_lock_irqsave(&priv->lock, flags);
1723 inta = ipw_read32(priv, IPW_INTA_RW);
1724 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1725 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1727 /* Add any cached INTA values that need to be handled */
1728 inta |= priv->isr_inta;
1730 /* handle all the justifications for the interrupt */
1731 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1732 ipw_rx(priv);
1733 handled |= IPW_INTA_BIT_RX_TRANSFER;
1736 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1737 IPW_DEBUG_HC("Command completed.\n");
1738 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1739 priv->status &= ~STATUS_HCMD_ACTIVE;
1740 wake_up_interruptible(&priv->wait_command_queue);
1741 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1744 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1745 IPW_DEBUG_TX("TX_QUEUE_1\n");
1746 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1747 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1750 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1751 IPW_DEBUG_TX("TX_QUEUE_2\n");
1752 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1753 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1756 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1757 IPW_DEBUG_TX("TX_QUEUE_3\n");
1758 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1759 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1762 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1763 IPW_DEBUG_TX("TX_QUEUE_4\n");
1764 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1765 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1768 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1769 IPW_WARNING("STATUS_CHANGE\n");
1770 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1773 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1774 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1775 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1778 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1779 IPW_WARNING("HOST_CMD_DONE\n");
1780 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1783 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1784 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1785 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1788 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1789 IPW_WARNING("PHY_OFF_DONE\n");
1790 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1793 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1794 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1795 priv->status |= STATUS_RF_KILL_HW;
1796 wake_up_interruptible(&priv->wait_command_queue);
1797 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1798 cancel_delayed_work(&priv->request_scan);
1799 schedule_work(&priv->link_down);
1800 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1801 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1804 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1805 IPW_ERROR("Firmware error detected. Restarting.\n");
1806 if (priv->error) {
1807 IPW_ERROR("Sysfs 'error' log already exists.\n");
1808 #ifdef CONFIG_IPW2200_DEBUG
1809 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1810 struct ipw_fw_error *error =
1811 ipw_alloc_error_log(priv);
1812 ipw_dump_error_log(priv, error);
1813 if (error)
1814 ipw_free_error_log(error);
1816 #endif
1817 } else {
1818 priv->error = ipw_alloc_error_log(priv);
1819 if (priv->error)
1820 IPW_ERROR("Sysfs 'error' log captured.\n");
1821 else
1822 IPW_ERROR("Error allocating sysfs 'error' "
1823 "log.\n");
1824 #ifdef CONFIG_IPW2200_DEBUG
1825 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1826 ipw_dump_error_log(priv, priv->error);
1827 #endif
1830 /* XXX: If hardware encryption is for WPA/WPA2,
1831 * we have to notify the supplicant. */
1832 if (priv->ieee->sec.encrypt) {
1833 priv->status &= ~STATUS_ASSOCIATED;
1834 notify_wx_assoc_event(priv);
1837 /* Keep the restart process from trying to send host
1838 * commands by clearing the INIT status bit */
1839 priv->status &= ~STATUS_INIT;
1841 /* Cancel currently queued command. */
1842 priv->status &= ~STATUS_HCMD_ACTIVE;
1843 wake_up_interruptible(&priv->wait_command_queue);
1845 queue_work(priv->workqueue, &priv->adapter_restart);
1846 handled |= IPW_INTA_BIT_FATAL_ERROR;
1849 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
1850 IPW_ERROR("Parity error\n");
1851 handled |= IPW_INTA_BIT_PARITY_ERROR;
1854 if (handled != inta) {
1855 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1858 /* enable all interrupts */
1859 ipw_enable_interrupts(priv);
1861 spin_unlock_irqrestore(&priv->lock, flags);
1864 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
1865 static char *get_cmd_string(u8 cmd)
1867 switch (cmd) {
1868 IPW_CMD(HOST_COMPLETE);
1869 IPW_CMD(POWER_DOWN);
1870 IPW_CMD(SYSTEM_CONFIG);
1871 IPW_CMD(MULTICAST_ADDRESS);
1872 IPW_CMD(SSID);
1873 IPW_CMD(ADAPTER_ADDRESS);
1874 IPW_CMD(PORT_TYPE);
1875 IPW_CMD(RTS_THRESHOLD);
1876 IPW_CMD(FRAG_THRESHOLD);
1877 IPW_CMD(POWER_MODE);
1878 IPW_CMD(WEP_KEY);
1879 IPW_CMD(TGI_TX_KEY);
1880 IPW_CMD(SCAN_REQUEST);
1881 IPW_CMD(SCAN_REQUEST_EXT);
1882 IPW_CMD(ASSOCIATE);
1883 IPW_CMD(SUPPORTED_RATES);
1884 IPW_CMD(SCAN_ABORT);
1885 IPW_CMD(TX_FLUSH);
1886 IPW_CMD(QOS_PARAMETERS);
1887 IPW_CMD(DINO_CONFIG);
1888 IPW_CMD(RSN_CAPABILITIES);
1889 IPW_CMD(RX_KEY);
1890 IPW_CMD(CARD_DISABLE);
1891 IPW_CMD(SEED_NUMBER);
1892 IPW_CMD(TX_POWER);
1893 IPW_CMD(COUNTRY_INFO);
1894 IPW_CMD(AIRONET_INFO);
1895 IPW_CMD(AP_TX_POWER);
1896 IPW_CMD(CCKM_INFO);
1897 IPW_CMD(CCX_VER_INFO);
1898 IPW_CMD(SET_CALIBRATION);
1899 IPW_CMD(SENSITIVITY_CALIB);
1900 IPW_CMD(RETRY_LIMIT);
1901 IPW_CMD(IPW_PRE_POWER_DOWN);
1902 IPW_CMD(VAP_BEACON_TEMPLATE);
1903 IPW_CMD(VAP_DTIM_PERIOD);
1904 IPW_CMD(EXT_SUPPORTED_RATES);
1905 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
1906 IPW_CMD(VAP_QUIET_INTERVALS);
1907 IPW_CMD(VAP_CHANNEL_SWITCH);
1908 IPW_CMD(VAP_MANDATORY_CHANNELS);
1909 IPW_CMD(VAP_CELL_PWR_LIMIT);
1910 IPW_CMD(VAP_CF_PARAM_SET);
1911 IPW_CMD(VAP_SET_BEACONING_STATE);
1912 IPW_CMD(MEASUREMENT);
1913 IPW_CMD(POWER_CAPABILITY);
1914 IPW_CMD(SUPPORTED_CHANNELS);
1915 IPW_CMD(TPC_REPORT);
1916 IPW_CMD(WME_INFO);
1917 IPW_CMD(PRODUCTION_COMMAND);
1918 default:
1919 return "UNKNOWN";
1923 #define HOST_COMPLETE_TIMEOUT HZ
1925 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1927 int rc = 0;
1928 unsigned long flags;
1930 spin_lock_irqsave(&priv->lock, flags);
1931 if (priv->status & STATUS_HCMD_ACTIVE) {
1932 IPW_ERROR("Failed to send %s: Already sending a command.\n",
1933 get_cmd_string(cmd->cmd));
1934 spin_unlock_irqrestore(&priv->lock, flags);
1935 return -EAGAIN;
1938 priv->status |= STATUS_HCMD_ACTIVE;
1940 if (priv->cmdlog) {
1941 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
1942 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
1943 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
1944 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
1945 cmd->len);
1946 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
1949 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
1950 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
1951 priv->status);
1953 #ifndef DEBUG_CMD_WEP_KEY
1954 if (cmd->cmd == IPW_CMD_WEP_KEY)
1955 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
1956 else
1957 #endif
1958 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
1960 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
1961 if (rc) {
1962 priv->status &= ~STATUS_HCMD_ACTIVE;
1963 IPW_ERROR("Failed to send %s: Reason %d\n",
1964 get_cmd_string(cmd->cmd), rc);
1965 spin_unlock_irqrestore(&priv->lock, flags);
1966 goto exit;
1968 spin_unlock_irqrestore(&priv->lock, flags);
1970 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
1971 !(priv->
1972 status & STATUS_HCMD_ACTIVE),
1973 HOST_COMPLETE_TIMEOUT);
1974 if (rc == 0) {
1975 spin_lock_irqsave(&priv->lock, flags);
1976 if (priv->status & STATUS_HCMD_ACTIVE) {
1977 IPW_ERROR("Failed to send %s: Command timed out.\n",
1978 get_cmd_string(cmd->cmd));
1979 priv->status &= ~STATUS_HCMD_ACTIVE;
1980 spin_unlock_irqrestore(&priv->lock, flags);
1981 rc = -EIO;
1982 goto exit;
1984 spin_unlock_irqrestore(&priv->lock, flags);
1985 } else
1986 rc = 0;
1988 if (priv->status & STATUS_RF_KILL_HW) {
1989 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
1990 get_cmd_string(cmd->cmd));
1991 rc = -EIO;
1992 goto exit;
1995 exit:
1996 if (priv->cmdlog) {
1997 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
1998 priv->cmdlog_pos %= priv->cmdlog_len;
2000 return rc;
2003 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2005 struct host_cmd cmd = {
2006 .cmd = command,
2009 return __ipw_send_cmd(priv, &cmd);
2012 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2013 void *data)
2015 struct host_cmd cmd = {
2016 .cmd = command,
2017 .len = len,
2018 .param = data,
2021 return __ipw_send_cmd(priv, &cmd);
2024 static int ipw_send_host_complete(struct ipw_priv *priv)
2026 if (!priv) {
2027 IPW_ERROR("Invalid args\n");
2028 return -1;
2031 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2034 static int ipw_send_system_config(struct ipw_priv *priv,
2035 struct ipw_sys_config *config)
2037 if (!priv || !config) {
2038 IPW_ERROR("Invalid args\n");
2039 return -1;
2042 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, sizeof(*config),
2043 config);
2046 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2048 if (!priv || !ssid) {
2049 IPW_ERROR("Invalid args\n");
2050 return -1;
2053 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2054 ssid);
2057 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2059 if (!priv || !mac) {
2060 IPW_ERROR("Invalid args\n");
2061 return -1;
2064 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2065 priv->net_dev->name, MAC_ARG(mac));
2067 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2071 * NOTE: This must be executed from our workqueue as it results in udelay
2072 * being called which may corrupt the keyboard if executed on default
2073 * workqueue
2075 static void ipw_adapter_restart(void *adapter)
2077 struct ipw_priv *priv = adapter;
2079 if (priv->status & STATUS_RF_KILL_MASK)
2080 return;
2082 ipw_down(priv);
2084 if (priv->assoc_network &&
2085 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2086 ipw_remove_current_network(priv);
2088 if (ipw_up(priv)) {
2089 IPW_ERROR("Failed to up device\n");
2090 return;
2094 static void ipw_bg_adapter_restart(void *data)
2096 struct ipw_priv *priv = data;
2097 mutex_lock(&priv->mutex);
2098 ipw_adapter_restart(data);
2099 mutex_unlock(&priv->mutex);
2102 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2104 static void ipw_scan_check(void *data)
2106 struct ipw_priv *priv = data;
2107 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2108 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2109 "adapter after (%dms).\n",
2110 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2111 queue_work(priv->workqueue, &priv->adapter_restart);
2115 static void ipw_bg_scan_check(void *data)
2117 struct ipw_priv *priv = data;
2118 mutex_lock(&priv->mutex);
2119 ipw_scan_check(data);
2120 mutex_unlock(&priv->mutex);
2123 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2124 struct ipw_scan_request_ext *request)
2126 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2127 sizeof(*request), request);
2130 static int ipw_send_scan_abort(struct ipw_priv *priv)
2132 if (!priv) {
2133 IPW_ERROR("Invalid args\n");
2134 return -1;
2137 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2140 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2142 struct ipw_sensitivity_calib calib = {
2143 .beacon_rssi_raw = sens,
2146 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2147 &calib);
2150 static int ipw_send_associate(struct ipw_priv *priv,
2151 struct ipw_associate *associate)
2153 struct ipw_associate tmp_associate;
2155 if (!priv || !associate) {
2156 IPW_ERROR("Invalid args\n");
2157 return -1;
2160 memcpy(&tmp_associate, associate, sizeof(*associate));
2161 tmp_associate.policy_support =
2162 cpu_to_le16(tmp_associate.policy_support);
2163 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2164 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2165 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2166 tmp_associate.listen_interval =
2167 cpu_to_le16(tmp_associate.listen_interval);
2168 tmp_associate.beacon_interval =
2169 cpu_to_le16(tmp_associate.beacon_interval);
2170 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2172 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2173 &tmp_associate);
2176 static int ipw_send_supported_rates(struct ipw_priv *priv,
2177 struct ipw_supported_rates *rates)
2179 if (!priv || !rates) {
2180 IPW_ERROR("Invalid args\n");
2181 return -1;
2184 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2185 rates);
2188 static int ipw_set_random_seed(struct ipw_priv *priv)
2190 u32 val;
2192 if (!priv) {
2193 IPW_ERROR("Invalid args\n");
2194 return -1;
2197 get_random_bytes(&val, sizeof(val));
2199 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2202 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2204 if (!priv) {
2205 IPW_ERROR("Invalid args\n");
2206 return -1;
2209 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2210 &phy_off);
2213 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2215 if (!priv || !power) {
2216 IPW_ERROR("Invalid args\n");
2217 return -1;
2220 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2223 static int ipw_set_tx_power(struct ipw_priv *priv)
2225 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
2226 struct ipw_tx_power tx_power;
2227 s8 max_power;
2228 int i;
2230 memset(&tx_power, 0, sizeof(tx_power));
2232 /* configure device for 'G' band */
2233 tx_power.ieee_mode = IPW_G_MODE;
2234 tx_power.num_channels = geo->bg_channels;
2235 for (i = 0; i < geo->bg_channels; i++) {
2236 max_power = geo->bg[i].max_power;
2237 tx_power.channels_tx_power[i].channel_number =
2238 geo->bg[i].channel;
2239 tx_power.channels_tx_power[i].tx_power = max_power ?
2240 min(max_power, priv->tx_power) : priv->tx_power;
2242 if (ipw_send_tx_power(priv, &tx_power))
2243 return -EIO;
2245 /* configure device to also handle 'B' band */
2246 tx_power.ieee_mode = IPW_B_MODE;
2247 if (ipw_send_tx_power(priv, &tx_power))
2248 return -EIO;
2250 /* configure device to also handle 'A' band */
2251 if (priv->ieee->abg_true) {
2252 tx_power.ieee_mode = IPW_A_MODE;
2253 tx_power.num_channels = geo->a_channels;
2254 for (i = 0; i < tx_power.num_channels; i++) {
2255 max_power = geo->a[i].max_power;
2256 tx_power.channels_tx_power[i].channel_number =
2257 geo->a[i].channel;
2258 tx_power.channels_tx_power[i].tx_power = max_power ?
2259 min(max_power, priv->tx_power) : priv->tx_power;
2261 if (ipw_send_tx_power(priv, &tx_power))
2262 return -EIO;
2264 return 0;
2267 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2269 struct ipw_rts_threshold rts_threshold = {
2270 .rts_threshold = rts,
2273 if (!priv) {
2274 IPW_ERROR("Invalid args\n");
2275 return -1;
2278 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2279 sizeof(rts_threshold), &rts_threshold);
2282 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2284 struct ipw_frag_threshold frag_threshold = {
2285 .frag_threshold = frag,
2288 if (!priv) {
2289 IPW_ERROR("Invalid args\n");
2290 return -1;
2293 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2294 sizeof(frag_threshold), &frag_threshold);
2297 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2299 u32 param;
2301 if (!priv) {
2302 IPW_ERROR("Invalid args\n");
2303 return -1;
2306 /* If on battery, set to 3, if AC set to CAM, else user
2307 * level */
2308 switch (mode) {
2309 case IPW_POWER_BATTERY:
2310 param = IPW_POWER_INDEX_3;
2311 break;
2312 case IPW_POWER_AC:
2313 param = IPW_POWER_MODE_CAM;
2314 break;
2315 default:
2316 param = mode;
2317 break;
2320 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2321 &param);
2324 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2326 struct ipw_retry_limit retry_limit = {
2327 .short_retry_limit = slimit,
2328 .long_retry_limit = llimit
2331 if (!priv) {
2332 IPW_ERROR("Invalid args\n");
2333 return -1;
2336 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2337 &retry_limit);
2341 * The IPW device contains a Microwire compatible EEPROM that stores
2342 * various data like the MAC address. Usually the firmware has exclusive
2343 * access to the eeprom, but during device initialization (before the
2344 * device driver has sent the HostComplete command to the firmware) the
2345 * device driver has read access to the EEPROM by way of indirect addressing
2346 * through a couple of memory mapped registers.
2348 * The following is a simplified implementation for pulling data out of the
2349 * the eeprom, along with some helper functions to find information in
2350 * the per device private data's copy of the eeprom.
2352 * NOTE: To better understand how these functions work (i.e what is a chip
2353 * select and why do have to keep driving the eeprom clock?), read
2354 * just about any data sheet for a Microwire compatible EEPROM.
2357 /* write a 32 bit value into the indirect accessor register */
2358 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2360 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2362 /* the eeprom requires some time to complete the operation */
2363 udelay(p->eeprom_delay);
2365 return;
2368 /* perform a chip select operation */
2369 static void eeprom_cs(struct ipw_priv *priv)
2371 eeprom_write_reg(priv, 0);
2372 eeprom_write_reg(priv, EEPROM_BIT_CS);
2373 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2374 eeprom_write_reg(priv, EEPROM_BIT_CS);
2377 /* perform a chip select operation */
2378 static void eeprom_disable_cs(struct ipw_priv *priv)
2380 eeprom_write_reg(priv, EEPROM_BIT_CS);
2381 eeprom_write_reg(priv, 0);
2382 eeprom_write_reg(priv, EEPROM_BIT_SK);
2385 /* push a single bit down to the eeprom */
2386 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2388 int d = (bit ? EEPROM_BIT_DI : 0);
2389 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2390 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2393 /* push an opcode followed by an address down to the eeprom */
2394 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2396 int i;
2398 eeprom_cs(priv);
2399 eeprom_write_bit(priv, 1);
2400 eeprom_write_bit(priv, op & 2);
2401 eeprom_write_bit(priv, op & 1);
2402 for (i = 7; i >= 0; i--) {
2403 eeprom_write_bit(priv, addr & (1 << i));
2407 /* pull 16 bits off the eeprom, one bit at a time */
2408 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2410 int i;
2411 u16 r = 0;
2413 /* Send READ Opcode */
2414 eeprom_op(priv, EEPROM_CMD_READ, addr);
2416 /* Send dummy bit */
2417 eeprom_write_reg(priv, EEPROM_BIT_CS);
2419 /* Read the byte off the eeprom one bit at a time */
2420 for (i = 0; i < 16; i++) {
2421 u32 data = 0;
2422 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2423 eeprom_write_reg(priv, EEPROM_BIT_CS);
2424 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2425 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2428 /* Send another dummy bit */
2429 eeprom_write_reg(priv, 0);
2430 eeprom_disable_cs(priv);
2432 return r;
2435 /* helper function for pulling the mac address out of the private */
2436 /* data's copy of the eeprom data */
2437 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2439 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2443 * Either the device driver (i.e. the host) or the firmware can
2444 * load eeprom data into the designated region in SRAM. If neither
2445 * happens then the FW will shutdown with a fatal error.
2447 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2448 * bit needs region of shared SRAM needs to be non-zero.
2450 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2452 int i;
2453 u16 *eeprom = (u16 *) priv->eeprom;
2455 IPW_DEBUG_TRACE(">>\n");
2457 /* read entire contents of eeprom into private buffer */
2458 for (i = 0; i < 128; i++)
2459 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2462 If the data looks correct, then copy it to our private
2463 copy. Otherwise let the firmware know to perform the operation
2464 on its own.
2466 if (priv->eeprom[EEPROM_VERSION] != 0) {
2467 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2469 /* write the eeprom data to sram */
2470 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2471 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2473 /* Do not load eeprom data on fatal error or suspend */
2474 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2475 } else {
2476 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2478 /* Load eeprom data on fatal error or suspend */
2479 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2482 IPW_DEBUG_TRACE("<<\n");
2485 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2487 count >>= 2;
2488 if (!count)
2489 return;
2490 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2491 while (count--)
2492 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2495 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2497 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2498 CB_NUMBER_OF_ELEMENTS_SMALL *
2499 sizeof(struct command_block));
2502 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2503 { /* start dma engine but no transfers yet */
2505 IPW_DEBUG_FW(">> : \n");
2507 /* Start the dma */
2508 ipw_fw_dma_reset_command_blocks(priv);
2510 /* Write CB base address */
2511 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2513 IPW_DEBUG_FW("<< : \n");
2514 return 0;
2517 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2519 u32 control = 0;
2521 IPW_DEBUG_FW(">> :\n");
2523 //set the Stop and Abort bit
2524 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2525 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2526 priv->sram_desc.last_cb_index = 0;
2528 IPW_DEBUG_FW("<< \n");
2531 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2532 struct command_block *cb)
2534 u32 address =
2535 IPW_SHARED_SRAM_DMA_CONTROL +
2536 (sizeof(struct command_block) * index);
2537 IPW_DEBUG_FW(">> :\n");
2539 ipw_write_indirect(priv, address, (u8 *) cb,
2540 (int)sizeof(struct command_block));
2542 IPW_DEBUG_FW("<< :\n");
2543 return 0;
2547 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2549 u32 control = 0;
2550 u32 index = 0;
2552 IPW_DEBUG_FW(">> :\n");
2554 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2555 ipw_fw_dma_write_command_block(priv, index,
2556 &priv->sram_desc.cb_list[index]);
2558 /* Enable the DMA in the CSR register */
2559 ipw_clear_bit(priv, IPW_RESET_REG,
2560 IPW_RESET_REG_MASTER_DISABLED |
2561 IPW_RESET_REG_STOP_MASTER);
2563 /* Set the Start bit. */
2564 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2565 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2567 IPW_DEBUG_FW("<< :\n");
2568 return 0;
2571 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2573 u32 address;
2574 u32 register_value = 0;
2575 u32 cb_fields_address = 0;
2577 IPW_DEBUG_FW(">> :\n");
2578 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2579 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2581 /* Read the DMA Controlor register */
2582 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2583 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2585 /* Print the CB values */
2586 cb_fields_address = address;
2587 register_value = ipw_read_reg32(priv, cb_fields_address);
2588 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2590 cb_fields_address += sizeof(u32);
2591 register_value = ipw_read_reg32(priv, cb_fields_address);
2592 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2594 cb_fields_address += sizeof(u32);
2595 register_value = ipw_read_reg32(priv, cb_fields_address);
2596 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2597 register_value);
2599 cb_fields_address += sizeof(u32);
2600 register_value = ipw_read_reg32(priv, cb_fields_address);
2601 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2603 IPW_DEBUG_FW(">> :\n");
2606 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2608 u32 current_cb_address = 0;
2609 u32 current_cb_index = 0;
2611 IPW_DEBUG_FW("<< :\n");
2612 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2614 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2615 sizeof(struct command_block);
2617 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2618 current_cb_index, current_cb_address);
2620 IPW_DEBUG_FW(">> :\n");
2621 return current_cb_index;
2625 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2626 u32 src_address,
2627 u32 dest_address,
2628 u32 length,
2629 int interrupt_enabled, int is_last)
2632 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2633 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2634 CB_DEST_SIZE_LONG;
2635 struct command_block *cb;
2636 u32 last_cb_element = 0;
2638 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2639 src_address, dest_address, length);
2641 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2642 return -1;
2644 last_cb_element = priv->sram_desc.last_cb_index;
2645 cb = &priv->sram_desc.cb_list[last_cb_element];
2646 priv->sram_desc.last_cb_index++;
2648 /* Calculate the new CB control word */
2649 if (interrupt_enabled)
2650 control |= CB_INT_ENABLED;
2652 if (is_last)
2653 control |= CB_LAST_VALID;
2655 control |= length;
2657 /* Calculate the CB Element's checksum value */
2658 cb->status = control ^ src_address ^ dest_address;
2660 /* Copy the Source and Destination addresses */
2661 cb->dest_addr = dest_address;
2662 cb->source_addr = src_address;
2664 /* Copy the Control Word last */
2665 cb->control = control;
2667 return 0;
2670 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2671 u32 src_phys, u32 dest_address, u32 length)
2673 u32 bytes_left = length;
2674 u32 src_offset = 0;
2675 u32 dest_offset = 0;
2676 int status = 0;
2677 IPW_DEBUG_FW(">> \n");
2678 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2679 src_phys, dest_address, length);
2680 while (bytes_left > CB_MAX_LENGTH) {
2681 status = ipw_fw_dma_add_command_block(priv,
2682 src_phys + src_offset,
2683 dest_address +
2684 dest_offset,
2685 CB_MAX_LENGTH, 0, 0);
2686 if (status) {
2687 IPW_DEBUG_FW_INFO(": Failed\n");
2688 return -1;
2689 } else
2690 IPW_DEBUG_FW_INFO(": Added new cb\n");
2692 src_offset += CB_MAX_LENGTH;
2693 dest_offset += CB_MAX_LENGTH;
2694 bytes_left -= CB_MAX_LENGTH;
2697 /* add the buffer tail */
2698 if (bytes_left > 0) {
2699 status =
2700 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2701 dest_address + dest_offset,
2702 bytes_left, 0, 0);
2703 if (status) {
2704 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2705 return -1;
2706 } else
2707 IPW_DEBUG_FW_INFO
2708 (": Adding new cb - the buffer tail\n");
2711 IPW_DEBUG_FW("<< \n");
2712 return 0;
2715 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2717 u32 current_index = 0, previous_index;
2718 u32 watchdog = 0;
2720 IPW_DEBUG_FW(">> : \n");
2722 current_index = ipw_fw_dma_command_block_index(priv);
2723 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2724 (int)priv->sram_desc.last_cb_index);
2726 while (current_index < priv->sram_desc.last_cb_index) {
2727 udelay(50);
2728 previous_index = current_index;
2729 current_index = ipw_fw_dma_command_block_index(priv);
2731 if (previous_index < current_index) {
2732 watchdog = 0;
2733 continue;
2735 if (++watchdog > 400) {
2736 IPW_DEBUG_FW_INFO("Timeout\n");
2737 ipw_fw_dma_dump_command_block(priv);
2738 ipw_fw_dma_abort(priv);
2739 return -1;
2743 ipw_fw_dma_abort(priv);
2745 /*Disable the DMA in the CSR register */
2746 ipw_set_bit(priv, IPW_RESET_REG,
2747 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2749 IPW_DEBUG_FW("<< dmaWaitSync \n");
2750 return 0;
2753 static void ipw_remove_current_network(struct ipw_priv *priv)
2755 struct list_head *element, *safe;
2756 struct ieee80211_network *network = NULL;
2757 unsigned long flags;
2759 spin_lock_irqsave(&priv->ieee->lock, flags);
2760 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2761 network = list_entry(element, struct ieee80211_network, list);
2762 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2763 list_del(element);
2764 list_add_tail(&network->list,
2765 &priv->ieee->network_free_list);
2768 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2772 * Check that card is still alive.
2773 * Reads debug register from domain0.
2774 * If card is present, pre-defined value should
2775 * be found there.
2777 * @param priv
2778 * @return 1 if card is present, 0 otherwise
2780 static inline int ipw_alive(struct ipw_priv *priv)
2782 return ipw_read32(priv, 0x90) == 0xd55555d5;
2785 /* timeout in msec, attempted in 10-msec quanta */
2786 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2787 int timeout)
2789 int i = 0;
2791 do {
2792 if ((ipw_read32(priv, addr) & mask) == mask)
2793 return i;
2794 mdelay(10);
2795 i += 10;
2796 } while (i < timeout);
2798 return -ETIME;
2801 /* These functions load the firmware and micro code for the operation of
2802 * the ipw hardware. It assumes the buffer has all the bits for the
2803 * image and the caller is handling the memory allocation and clean up.
2806 static int ipw_stop_master(struct ipw_priv *priv)
2808 int rc;
2810 IPW_DEBUG_TRACE(">> \n");
2811 /* stop master. typical delay - 0 */
2812 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2814 /* timeout is in msec, polled in 10-msec quanta */
2815 rc = ipw_poll_bit(priv, IPW_RESET_REG,
2816 IPW_RESET_REG_MASTER_DISABLED, 100);
2817 if (rc < 0) {
2818 IPW_ERROR("wait for stop master failed after 100ms\n");
2819 return -1;
2822 IPW_DEBUG_INFO("stop master %dms\n", rc);
2824 return rc;
2827 static void ipw_arc_release(struct ipw_priv *priv)
2829 IPW_DEBUG_TRACE(">> \n");
2830 mdelay(5);
2832 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2834 /* no one knows timing, for safety add some delay */
2835 mdelay(5);
2838 struct fw_header {
2839 u32 version;
2840 u32 mode;
2843 struct fw_chunk {
2844 u32 address;
2845 u32 length;
2848 #define IPW_FW_MAJOR_VERSION 2
2849 #define IPW_FW_MINOR_VERSION 4
2851 #define IPW_FW_MINOR(x) ((x & 0xff) >> 8)
2852 #define IPW_FW_MAJOR(x) (x & 0xff)
2854 #define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | IPW_FW_MAJOR_VERSION)
2856 #define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \
2857 "." __stringify(IPW_FW_MINOR_VERSION) "-"
2859 #if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0
2860 #define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw"
2861 #else
2862 #define IPW_FW_NAME(x) "ipw2200_" x ".fw"
2863 #endif
2865 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2867 int rc = 0, i, addr;
2868 u8 cr = 0;
2869 u16 *image;
2871 image = (u16 *) data;
2873 IPW_DEBUG_TRACE(">> \n");
2875 rc = ipw_stop_master(priv);
2877 if (rc < 0)
2878 return rc;
2880 // spin_lock_irqsave(&priv->lock, flags);
2882 for (addr = IPW_SHARED_LOWER_BOUND;
2883 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
2884 ipw_write32(priv, addr, 0);
2887 /* no ucode (yet) */
2888 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
2889 /* destroy DMA queues */
2890 /* reset sequence */
2892 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
2893 ipw_arc_release(priv);
2894 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
2895 mdelay(1);
2897 /* reset PHY */
2898 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
2899 mdelay(1);
2901 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
2902 mdelay(1);
2904 /* enable ucode store */
2905 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
2906 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
2907 mdelay(1);
2909 /* write ucode */
2911 * @bug
2912 * Do NOT set indirect address register once and then
2913 * store data to indirect data register in the loop.
2914 * It seems very reasonable, but in this case DINO do not
2915 * accept ucode. It is essential to set address each time.
2917 /* load new ipw uCode */
2918 for (i = 0; i < len / 2; i++)
2919 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
2920 cpu_to_le16(image[i]));
2922 /* enable DINO */
2923 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
2924 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
2926 /* this is where the igx / win driver deveates from the VAP driver. */
2928 /* wait for alive response */
2929 for (i = 0; i < 100; i++) {
2930 /* poll for incoming data */
2931 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
2932 if (cr & DINO_RXFIFO_DATA)
2933 break;
2934 mdelay(1);
2937 if (cr & DINO_RXFIFO_DATA) {
2938 /* alive_command_responce size is NOT multiple of 4 */
2939 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
2941 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
2942 response_buffer[i] =
2943 le32_to_cpu(ipw_read_reg32(priv,
2944 IPW_BASEBAND_RX_FIFO_READ));
2945 memcpy(&priv->dino_alive, response_buffer,
2946 sizeof(priv->dino_alive));
2947 if (priv->dino_alive.alive_command == 1
2948 && priv->dino_alive.ucode_valid == 1) {
2949 rc = 0;
2950 IPW_DEBUG_INFO
2951 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
2952 "of %02d/%02d/%02d %02d:%02d\n",
2953 priv->dino_alive.software_revision,
2954 priv->dino_alive.software_revision,
2955 priv->dino_alive.device_identifier,
2956 priv->dino_alive.device_identifier,
2957 priv->dino_alive.time_stamp[0],
2958 priv->dino_alive.time_stamp[1],
2959 priv->dino_alive.time_stamp[2],
2960 priv->dino_alive.time_stamp[3],
2961 priv->dino_alive.time_stamp[4]);
2962 } else {
2963 IPW_DEBUG_INFO("Microcode is not alive\n");
2964 rc = -EINVAL;
2966 } else {
2967 IPW_DEBUG_INFO("No alive response from DINO\n");
2968 rc = -ETIME;
2971 /* disable DINO, otherwise for some reason
2972 firmware have problem getting alive resp. */
2973 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
2975 // spin_unlock_irqrestore(&priv->lock, flags);
2977 return rc;
2980 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
2982 int rc = -1;
2983 int offset = 0;
2984 struct fw_chunk *chunk;
2985 dma_addr_t shared_phys;
2986 u8 *shared_virt;
2988 IPW_DEBUG_TRACE("<< : \n");
2989 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
2991 if (!shared_virt)
2992 return -ENOMEM;
2994 memmove(shared_virt, data, len);
2996 /* Start the Dma */
2997 rc = ipw_fw_dma_enable(priv);
2999 if (priv->sram_desc.last_cb_index > 0) {
3000 /* the DMA is already ready this would be a bug. */
3001 BUG();
3002 goto out;
3005 do {
3006 chunk = (struct fw_chunk *)(data + offset);
3007 offset += sizeof(struct fw_chunk);
3008 /* build DMA packet and queue up for sending */
3009 /* dma to chunk->address, the chunk->length bytes from data +
3010 * offeset*/
3011 /* Dma loading */
3012 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3013 le32_to_cpu(chunk->address),
3014 le32_to_cpu(chunk->length));
3015 if (rc) {
3016 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3017 goto out;
3020 offset += le32_to_cpu(chunk->length);
3021 } while (offset < len);
3023 /* Run the DMA and wait for the answer */
3024 rc = ipw_fw_dma_kick(priv);
3025 if (rc) {
3026 IPW_ERROR("dmaKick Failed\n");
3027 goto out;
3030 rc = ipw_fw_dma_wait(priv);
3031 if (rc) {
3032 IPW_ERROR("dmaWaitSync Failed\n");
3033 goto out;
3035 out:
3036 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3037 return rc;
3040 /* stop nic */
3041 static int ipw_stop_nic(struct ipw_priv *priv)
3043 int rc = 0;
3045 /* stop */
3046 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3048 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3049 IPW_RESET_REG_MASTER_DISABLED, 500);
3050 if (rc < 0) {
3051 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3052 return rc;
3055 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3057 return rc;
3060 static void ipw_start_nic(struct ipw_priv *priv)
3062 IPW_DEBUG_TRACE(">>\n");
3064 /* prvHwStartNic release ARC */
3065 ipw_clear_bit(priv, IPW_RESET_REG,
3066 IPW_RESET_REG_MASTER_DISABLED |
3067 IPW_RESET_REG_STOP_MASTER |
3068 CBD_RESET_REG_PRINCETON_RESET);
3070 /* enable power management */
3071 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3072 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3074 IPW_DEBUG_TRACE("<<\n");
3077 static int ipw_init_nic(struct ipw_priv *priv)
3079 int rc;
3081 IPW_DEBUG_TRACE(">>\n");
3082 /* reset */
3083 /*prvHwInitNic */
3084 /* set "initialization complete" bit to move adapter to D0 state */
3085 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3087 /* low-level PLL activation */
3088 ipw_write32(priv, IPW_READ_INT_REGISTER,
3089 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3091 /* wait for clock stabilization */
3092 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3093 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3094 if (rc < 0)
3095 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3097 /* assert SW reset */
3098 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3100 udelay(10);
3102 /* set "initialization complete" bit to move adapter to D0 state */
3103 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3105 IPW_DEBUG_TRACE(">>\n");
3106 return 0;
3109 /* Call this function from process context, it will sleep in request_firmware.
3110 * Probe is an ok place to call this from.
3112 static int ipw_reset_nic(struct ipw_priv *priv)
3114 int rc = 0;
3115 unsigned long flags;
3117 IPW_DEBUG_TRACE(">>\n");
3119 rc = ipw_init_nic(priv);
3121 spin_lock_irqsave(&priv->lock, flags);
3122 /* Clear the 'host command active' bit... */
3123 priv->status &= ~STATUS_HCMD_ACTIVE;
3124 wake_up_interruptible(&priv->wait_command_queue);
3125 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3126 wake_up_interruptible(&priv->wait_state);
3127 spin_unlock_irqrestore(&priv->lock, flags);
3129 IPW_DEBUG_TRACE("<<\n");
3130 return rc;
3133 static int ipw_get_fw(struct ipw_priv *priv,
3134 const struct firmware **fw, const char *name)
3136 struct fw_header *header;
3137 int rc;
3139 /* ask firmware_class module to get the boot firmware off disk */
3140 rc = request_firmware(fw, name, &priv->pci_dev->dev);
3141 if (rc < 0) {
3142 IPW_ERROR("%s load failed: Reason %d\n", name, rc);
3143 return rc;
3146 header = (struct fw_header *)(*fw)->data;
3147 if (IPW_FW_MAJOR(le32_to_cpu(header->version)) != IPW_FW_MAJOR_VERSION) {
3148 IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n",
3149 name,
3150 IPW_FW_MAJOR(le32_to_cpu(header->version)),
3151 IPW_FW_MAJOR_VERSION);
3152 return -EINVAL;
3155 IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n",
3156 name,
3157 IPW_FW_MAJOR(le32_to_cpu(header->version)),
3158 IPW_FW_MINOR(le32_to_cpu(header->version)),
3159 (*fw)->size - sizeof(struct fw_header));
3160 return 0;
3163 #define IPW_RX_BUF_SIZE (3000)
3165 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3166 struct ipw_rx_queue *rxq)
3168 unsigned long flags;
3169 int i;
3171 spin_lock_irqsave(&rxq->lock, flags);
3173 INIT_LIST_HEAD(&rxq->rx_free);
3174 INIT_LIST_HEAD(&rxq->rx_used);
3176 /* Fill the rx_used queue with _all_ of the Rx buffers */
3177 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3178 /* In the reset function, these buffers may have been allocated
3179 * to an SKB, so we need to unmap and free potential storage */
3180 if (rxq->pool[i].skb != NULL) {
3181 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3182 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3183 dev_kfree_skb(rxq->pool[i].skb);
3184 rxq->pool[i].skb = NULL;
3186 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3189 /* Set us so that we have processed and used all buffers, but have
3190 * not restocked the Rx queue with fresh buffers */
3191 rxq->read = rxq->write = 0;
3192 rxq->processed = RX_QUEUE_SIZE - 1;
3193 rxq->free_count = 0;
3194 spin_unlock_irqrestore(&rxq->lock, flags);
3197 #ifdef CONFIG_PM
3198 static int fw_loaded = 0;
3199 static const struct firmware *bootfw = NULL;
3200 static const struct firmware *firmware = NULL;
3201 static const struct firmware *ucode = NULL;
3203 static void free_firmware(void)
3205 if (fw_loaded) {
3206 release_firmware(bootfw);
3207 release_firmware(ucode);
3208 release_firmware(firmware);
3209 bootfw = ucode = firmware = NULL;
3210 fw_loaded = 0;
3213 #else
3214 #define free_firmware() do {} while (0)
3215 #endif
3217 static int ipw_load(struct ipw_priv *priv)
3219 #ifndef CONFIG_PM
3220 const struct firmware *bootfw = NULL;
3221 const struct firmware *firmware = NULL;
3222 const struct firmware *ucode = NULL;
3223 #endif
3224 char *ucode_name;
3225 char *fw_name;
3226 int rc = 0, retries = 3;
3228 switch (priv->ieee->iw_mode) {
3229 case IW_MODE_ADHOC:
3230 ucode_name = IPW_FW_NAME("ibss_ucode");
3231 fw_name = IPW_FW_NAME("ibss");
3232 break;
3233 #ifdef CONFIG_IPW2200_MONITOR
3234 case IW_MODE_MONITOR:
3235 ucode_name = IPW_FW_NAME("sniffer_ucode");
3236 fw_name = IPW_FW_NAME("sniffer");
3237 break;
3238 #endif
3239 case IW_MODE_INFRA:
3240 ucode_name = IPW_FW_NAME("bss_ucode");
3241 fw_name = IPW_FW_NAME("bss");
3242 break;
3243 default:
3244 rc = -EINVAL;
3247 if (rc < 0)
3248 goto error;
3250 if (!priv->rxq)
3251 priv->rxq = ipw_rx_queue_alloc(priv);
3252 else
3253 ipw_rx_queue_reset(priv, priv->rxq);
3254 if (!priv->rxq) {
3255 IPW_ERROR("Unable to initialize Rx queue\n");
3256 goto error;
3259 retry:
3260 /* Ensure interrupts are disabled */
3261 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3262 priv->status &= ~STATUS_INT_ENABLED;
3264 /* ack pending interrupts */
3265 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3267 ipw_stop_nic(priv);
3269 rc = ipw_reset_nic(priv);
3270 if (rc < 0) {
3271 IPW_ERROR("Unable to reset NIC\n");
3272 goto error;
3275 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3276 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3278 #ifdef CONFIG_PM
3279 if (!fw_loaded) {
3280 #endif
3281 rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot"));
3282 if (rc < 0)
3283 goto error;
3284 #ifdef CONFIG_PM
3286 #endif
3287 /* DMA the initial boot firmware into the device */
3288 rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header),
3289 bootfw->size - sizeof(struct fw_header));
3290 if (rc < 0) {
3291 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3292 goto error;
3295 /* kick start the device */
3296 ipw_start_nic(priv);
3298 /* wait for the device to finish its initial startup sequence */
3299 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3300 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3301 if (rc < 0) {
3302 IPW_ERROR("device failed to boot initial fw image\n");
3303 goto error;
3305 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3307 /* ack fw init done interrupt */
3308 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3310 #ifdef CONFIG_PM
3311 if (!fw_loaded) {
3312 #endif
3313 rc = ipw_get_fw(priv, &ucode, ucode_name);
3314 if (rc < 0)
3315 goto error;
3316 #ifdef CONFIG_PM
3318 #endif
3320 /* DMA the ucode into the device */
3321 rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header),
3322 ucode->size - sizeof(struct fw_header));
3323 if (rc < 0) {
3324 IPW_ERROR("Unable to load ucode: %d\n", rc);
3325 goto error;
3328 /* stop nic */
3329 ipw_stop_nic(priv);
3331 #ifdef CONFIG_PM
3332 if (!fw_loaded) {
3333 #endif
3334 rc = ipw_get_fw(priv, &firmware, fw_name);
3335 if (rc < 0)
3336 goto error;
3337 #ifdef CONFIG_PM
3339 #endif
3341 /* DMA bss firmware into the device */
3342 rc = ipw_load_firmware(priv, firmware->data +
3343 sizeof(struct fw_header),
3344 firmware->size - sizeof(struct fw_header));
3345 if (rc < 0) {
3346 IPW_ERROR("Unable to load firmware: %d\n", rc);
3347 goto error;
3349 #ifdef CONFIG_PM
3350 fw_loaded = 1;
3351 #endif
3353 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3355 rc = ipw_queue_reset(priv);
3356 if (rc < 0) {
3357 IPW_ERROR("Unable to initialize queues\n");
3358 goto error;
3361 /* Ensure interrupts are disabled */
3362 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3363 /* ack pending interrupts */
3364 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3366 /* kick start the device */
3367 ipw_start_nic(priv);
3369 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3370 if (retries > 0) {
3371 IPW_WARNING("Parity error. Retrying init.\n");
3372 retries--;
3373 goto retry;
3376 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3377 rc = -EIO;
3378 goto error;
3381 /* wait for the device */
3382 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3383 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3384 if (rc < 0) {
3385 IPW_ERROR("device failed to start within 500ms\n");
3386 goto error;
3388 IPW_DEBUG_INFO("device response after %dms\n", rc);
3390 /* ack fw init done interrupt */
3391 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3393 /* read eeprom data and initialize the eeprom region of sram */
3394 priv->eeprom_delay = 1;
3395 ipw_eeprom_init_sram(priv);
3397 /* enable interrupts */
3398 ipw_enable_interrupts(priv);
3400 /* Ensure our queue has valid packets */
3401 ipw_rx_queue_replenish(priv);
3403 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3405 /* ack pending interrupts */
3406 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3408 #ifndef CONFIG_PM
3409 release_firmware(bootfw);
3410 release_firmware(ucode);
3411 release_firmware(firmware);
3412 #endif
3413 return 0;
3415 error:
3416 if (priv->rxq) {
3417 ipw_rx_queue_free(priv, priv->rxq);
3418 priv->rxq = NULL;
3420 ipw_tx_queue_free(priv);
3421 if (bootfw)
3422 release_firmware(bootfw);
3423 if (ucode)
3424 release_firmware(ucode);
3425 if (firmware)
3426 release_firmware(firmware);
3427 #ifdef CONFIG_PM
3428 fw_loaded = 0;
3429 bootfw = ucode = firmware = NULL;
3430 #endif
3432 return rc;
3436 * DMA services
3438 * Theory of operation
3440 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3441 * 2 empty entries always kept in the buffer to protect from overflow.
3443 * For Tx queue, there are low mark and high mark limits. If, after queuing
3444 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3445 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3446 * Tx queue resumed.
3448 * The IPW operates with six queues, one receive queue in the device's
3449 * sram, one transmit queue for sending commands to the device firmware,
3450 * and four transmit queues for data.
3452 * The four transmit queues allow for performing quality of service (qos)
3453 * transmissions as per the 802.11 protocol. Currently Linux does not
3454 * provide a mechanism to the user for utilizing prioritized queues, so
3455 * we only utilize the first data transmit queue (queue1).
3459 * Driver allocates buffers of this size for Rx
3462 static inline int ipw_queue_space(const struct clx2_queue *q)
3464 int s = q->last_used - q->first_empty;
3465 if (s <= 0)
3466 s += q->n_bd;
3467 s -= 2; /* keep some reserve to not confuse empty and full situations */
3468 if (s < 0)
3469 s = 0;
3470 return s;
3473 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3475 return (++index == n_bd) ? 0 : index;
3479 * Initialize common DMA queue structure
3481 * @param q queue to init
3482 * @param count Number of BD's to allocate. Should be power of 2
3483 * @param read_register Address for 'read' register
3484 * (not offset within BAR, full address)
3485 * @param write_register Address for 'write' register
3486 * (not offset within BAR, full address)
3487 * @param base_register Address for 'base' register
3488 * (not offset within BAR, full address)
3489 * @param size Address for 'size' register
3490 * (not offset within BAR, full address)
3492 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3493 int count, u32 read, u32 write, u32 base, u32 size)
3495 q->n_bd = count;
3497 q->low_mark = q->n_bd / 4;
3498 if (q->low_mark < 4)
3499 q->low_mark = 4;
3501 q->high_mark = q->n_bd / 8;
3502 if (q->high_mark < 2)
3503 q->high_mark = 2;
3505 q->first_empty = q->last_used = 0;
3506 q->reg_r = read;
3507 q->reg_w = write;
3509 ipw_write32(priv, base, q->dma_addr);
3510 ipw_write32(priv, size, count);
3511 ipw_write32(priv, read, 0);
3512 ipw_write32(priv, write, 0);
3514 _ipw_read32(priv, 0x90);
3517 static int ipw_queue_tx_init(struct ipw_priv *priv,
3518 struct clx2_tx_queue *q,
3519 int count, u32 read, u32 write, u32 base, u32 size)
3521 struct pci_dev *dev = priv->pci_dev;
3523 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3524 if (!q->txb) {
3525 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3526 return -ENOMEM;
3529 q->bd =
3530 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3531 if (!q->bd) {
3532 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3533 sizeof(q->bd[0]) * count);
3534 kfree(q->txb);
3535 q->txb = NULL;
3536 return -ENOMEM;
3539 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3540 return 0;
3544 * Free one TFD, those at index [txq->q.last_used].
3545 * Do NOT advance any indexes
3547 * @param dev
3548 * @param txq
3550 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3551 struct clx2_tx_queue *txq)
3553 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3554 struct pci_dev *dev = priv->pci_dev;
3555 int i;
3557 /* classify bd */
3558 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3559 /* nothing to cleanup after for host commands */
3560 return;
3562 /* sanity check */
3563 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3564 IPW_ERROR("Too many chunks: %i\n",
3565 le32_to_cpu(bd->u.data.num_chunks));
3566 /** @todo issue fatal error, it is quite serious situation */
3567 return;
3570 /* unmap chunks if any */
3571 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3572 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3573 le16_to_cpu(bd->u.data.chunk_len[i]),
3574 PCI_DMA_TODEVICE);
3575 if (txq->txb[txq->q.last_used]) {
3576 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3577 txq->txb[txq->q.last_used] = NULL;
3583 * Deallocate DMA queue.
3585 * Empty queue by removing and destroying all BD's.
3586 * Free all buffers.
3588 * @param dev
3589 * @param q
3591 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3593 struct clx2_queue *q = &txq->q;
3594 struct pci_dev *dev = priv->pci_dev;
3596 if (q->n_bd == 0)
3597 return;
3599 /* first, empty all BD's */
3600 for (; q->first_empty != q->last_used;
3601 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3602 ipw_queue_tx_free_tfd(priv, txq);
3605 /* free buffers belonging to queue itself */
3606 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3607 q->dma_addr);
3608 kfree(txq->txb);
3610 /* 0 fill whole structure */
3611 memset(txq, 0, sizeof(*txq));
3615 * Destroy all DMA queues and structures
3617 * @param priv
3619 static void ipw_tx_queue_free(struct ipw_priv *priv)
3621 /* Tx CMD queue */
3622 ipw_queue_tx_free(priv, &priv->txq_cmd);
3624 /* Tx queues */
3625 ipw_queue_tx_free(priv, &priv->txq[0]);
3626 ipw_queue_tx_free(priv, &priv->txq[1]);
3627 ipw_queue_tx_free(priv, &priv->txq[2]);
3628 ipw_queue_tx_free(priv, &priv->txq[3]);
3631 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3633 /* First 3 bytes are manufacturer */
3634 bssid[0] = priv->mac_addr[0];
3635 bssid[1] = priv->mac_addr[1];
3636 bssid[2] = priv->mac_addr[2];
3638 /* Last bytes are random */
3639 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3641 bssid[0] &= 0xfe; /* clear multicast bit */
3642 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3645 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3647 struct ipw_station_entry entry;
3648 int i;
3650 for (i = 0; i < priv->num_stations; i++) {
3651 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3652 /* Another node is active in network */
3653 priv->missed_adhoc_beacons = 0;
3654 if (!(priv->config & CFG_STATIC_CHANNEL))
3655 /* when other nodes drop out, we drop out */
3656 priv->config &= ~CFG_ADHOC_PERSIST;
3658 return i;
3662 if (i == MAX_STATIONS)
3663 return IPW_INVALID_STATION;
3665 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3667 entry.reserved = 0;
3668 entry.support_mode = 0;
3669 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3670 memcpy(priv->stations[i], bssid, ETH_ALEN);
3671 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3672 &entry, sizeof(entry));
3673 priv->num_stations++;
3675 return i;
3678 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3680 int i;
3682 for (i = 0; i < priv->num_stations; i++)
3683 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3684 return i;
3686 return IPW_INVALID_STATION;
3689 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3691 int err;
3693 if (priv->status & STATUS_ASSOCIATING) {
3694 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3695 queue_work(priv->workqueue, &priv->disassociate);
3696 return;
3699 if (!(priv->status & STATUS_ASSOCIATED)) {
3700 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3701 return;
3704 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3705 "on channel %d.\n",
3706 MAC_ARG(priv->assoc_request.bssid),
3707 priv->assoc_request.channel);
3709 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3710 priv->status |= STATUS_DISASSOCIATING;
3712 if (quiet)
3713 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3714 else
3715 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3717 err = ipw_send_associate(priv, &priv->assoc_request);
3718 if (err) {
3719 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3720 "failed.\n");
3721 return;
3726 static int ipw_disassociate(void *data)
3728 struct ipw_priv *priv = data;
3729 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3730 return 0;
3731 ipw_send_disassociate(data, 0);
3732 return 1;
3735 static void ipw_bg_disassociate(void *data)
3737 struct ipw_priv *priv = data;
3738 mutex_lock(&priv->mutex);
3739 ipw_disassociate(data);
3740 mutex_unlock(&priv->mutex);
3743 static void ipw_system_config(void *data)
3745 struct ipw_priv *priv = data;
3746 ipw_send_system_config(priv, &priv->sys_config);
3749 struct ipw_status_code {
3750 u16 status;
3751 const char *reason;
3754 static const struct ipw_status_code ipw_status_codes[] = {
3755 {0x00, "Successful"},
3756 {0x01, "Unspecified failure"},
3757 {0x0A, "Cannot support all requested capabilities in the "
3758 "Capability information field"},
3759 {0x0B, "Reassociation denied due to inability to confirm that "
3760 "association exists"},
3761 {0x0C, "Association denied due to reason outside the scope of this "
3762 "standard"},
3763 {0x0D,
3764 "Responding station does not support the specified authentication "
3765 "algorithm"},
3766 {0x0E,
3767 "Received an Authentication frame with authentication sequence "
3768 "transaction sequence number out of expected sequence"},
3769 {0x0F, "Authentication rejected because of challenge failure"},
3770 {0x10, "Authentication rejected due to timeout waiting for next "
3771 "frame in sequence"},
3772 {0x11, "Association denied because AP is unable to handle additional "
3773 "associated stations"},
3774 {0x12,
3775 "Association denied due to requesting station not supporting all "
3776 "of the datarates in the BSSBasicServiceSet Parameter"},
3777 {0x13,
3778 "Association denied due to requesting station not supporting "
3779 "short preamble operation"},
3780 {0x14,
3781 "Association denied due to requesting station not supporting "
3782 "PBCC encoding"},
3783 {0x15,
3784 "Association denied due to requesting station not supporting "
3785 "channel agility"},
3786 {0x19,
3787 "Association denied due to requesting station not supporting "
3788 "short slot operation"},
3789 {0x1A,
3790 "Association denied due to requesting station not supporting "
3791 "DSSS-OFDM operation"},
3792 {0x28, "Invalid Information Element"},
3793 {0x29, "Group Cipher is not valid"},
3794 {0x2A, "Pairwise Cipher is not valid"},
3795 {0x2B, "AKMP is not valid"},
3796 {0x2C, "Unsupported RSN IE version"},
3797 {0x2D, "Invalid RSN IE Capabilities"},
3798 {0x2E, "Cipher suite is rejected per security policy"},
3801 #ifdef CONFIG_IPW2200_DEBUG
3802 static const char *ipw_get_status_code(u16 status)
3804 int i;
3805 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3806 if (ipw_status_codes[i].status == (status & 0xff))
3807 return ipw_status_codes[i].reason;
3808 return "Unknown status value.";
3810 #endif
3812 static void inline average_init(struct average *avg)
3814 memset(avg, 0, sizeof(*avg));
3817 static void average_add(struct average *avg, s16 val)
3819 avg->sum -= avg->entries[avg->pos];
3820 avg->sum += val;
3821 avg->entries[avg->pos++] = val;
3822 if (unlikely(avg->pos == AVG_ENTRIES)) {
3823 avg->init = 1;
3824 avg->pos = 0;
3828 static s16 average_value(struct average *avg)
3830 if (!unlikely(avg->init)) {
3831 if (avg->pos)
3832 return avg->sum / avg->pos;
3833 return 0;
3836 return avg->sum / AVG_ENTRIES;
3839 static void ipw_reset_stats(struct ipw_priv *priv)
3841 u32 len = sizeof(u32);
3843 priv->quality = 0;
3845 average_init(&priv->average_missed_beacons);
3846 average_init(&priv->average_rssi);
3847 average_init(&priv->average_noise);
3849 priv->last_rate = 0;
3850 priv->last_missed_beacons = 0;
3851 priv->last_rx_packets = 0;
3852 priv->last_tx_packets = 0;
3853 priv->last_tx_failures = 0;
3855 /* Firmware managed, reset only when NIC is restarted, so we have to
3856 * normalize on the current value */
3857 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3858 &priv->last_rx_err, &len);
3859 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3860 &priv->last_tx_failures, &len);
3862 /* Driver managed, reset with each association */
3863 priv->missed_adhoc_beacons = 0;
3864 priv->missed_beacons = 0;
3865 priv->tx_packets = 0;
3866 priv->rx_packets = 0;
3870 static u32 ipw_get_max_rate(struct ipw_priv *priv)
3872 u32 i = 0x80000000;
3873 u32 mask = priv->rates_mask;
3874 /* If currently associated in B mode, restrict the maximum
3875 * rate match to B rates */
3876 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3877 mask &= IEEE80211_CCK_RATES_MASK;
3879 /* TODO: Verify that the rate is supported by the current rates
3880 * list. */
3882 while (i && !(mask & i))
3883 i >>= 1;
3884 switch (i) {
3885 case IEEE80211_CCK_RATE_1MB_MASK:
3886 return 1000000;
3887 case IEEE80211_CCK_RATE_2MB_MASK:
3888 return 2000000;
3889 case IEEE80211_CCK_RATE_5MB_MASK:
3890 return 5500000;
3891 case IEEE80211_OFDM_RATE_6MB_MASK:
3892 return 6000000;
3893 case IEEE80211_OFDM_RATE_9MB_MASK:
3894 return 9000000;
3895 case IEEE80211_CCK_RATE_11MB_MASK:
3896 return 11000000;
3897 case IEEE80211_OFDM_RATE_12MB_MASK:
3898 return 12000000;
3899 case IEEE80211_OFDM_RATE_18MB_MASK:
3900 return 18000000;
3901 case IEEE80211_OFDM_RATE_24MB_MASK:
3902 return 24000000;
3903 case IEEE80211_OFDM_RATE_36MB_MASK:
3904 return 36000000;
3905 case IEEE80211_OFDM_RATE_48MB_MASK:
3906 return 48000000;
3907 case IEEE80211_OFDM_RATE_54MB_MASK:
3908 return 54000000;
3911 if (priv->ieee->mode == IEEE_B)
3912 return 11000000;
3913 else
3914 return 54000000;
3917 static u32 ipw_get_current_rate(struct ipw_priv *priv)
3919 u32 rate, len = sizeof(rate);
3920 int err;
3922 if (!(priv->status & STATUS_ASSOCIATED))
3923 return 0;
3925 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
3926 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
3927 &len);
3928 if (err) {
3929 IPW_DEBUG_INFO("failed querying ordinals.\n");
3930 return 0;
3932 } else
3933 return ipw_get_max_rate(priv);
3935 switch (rate) {
3936 case IPW_TX_RATE_1MB:
3937 return 1000000;
3938 case IPW_TX_RATE_2MB:
3939 return 2000000;
3940 case IPW_TX_RATE_5MB:
3941 return 5500000;
3942 case IPW_TX_RATE_6MB:
3943 return 6000000;
3944 case IPW_TX_RATE_9MB:
3945 return 9000000;
3946 case IPW_TX_RATE_11MB:
3947 return 11000000;
3948 case IPW_TX_RATE_12MB:
3949 return 12000000;
3950 case IPW_TX_RATE_18MB:
3951 return 18000000;
3952 case IPW_TX_RATE_24MB:
3953 return 24000000;
3954 case IPW_TX_RATE_36MB:
3955 return 36000000;
3956 case IPW_TX_RATE_48MB:
3957 return 48000000;
3958 case IPW_TX_RATE_54MB:
3959 return 54000000;
3962 return 0;
3965 #define IPW_STATS_INTERVAL (2 * HZ)
3966 static void ipw_gather_stats(struct ipw_priv *priv)
3968 u32 rx_err, rx_err_delta, rx_packets_delta;
3969 u32 tx_failures, tx_failures_delta, tx_packets_delta;
3970 u32 missed_beacons_percent, missed_beacons_delta;
3971 u32 quality = 0;
3972 u32 len = sizeof(u32);
3973 s16 rssi;
3974 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
3975 rate_quality;
3976 u32 max_rate;
3978 if (!(priv->status & STATUS_ASSOCIATED)) {
3979 priv->quality = 0;
3980 return;
3983 /* Update the statistics */
3984 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
3985 &priv->missed_beacons, &len);
3986 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
3987 priv->last_missed_beacons = priv->missed_beacons;
3988 if (priv->assoc_request.beacon_interval) {
3989 missed_beacons_percent = missed_beacons_delta *
3990 (HZ * priv->assoc_request.beacon_interval) /
3991 (IPW_STATS_INTERVAL * 10);
3992 } else {
3993 missed_beacons_percent = 0;
3995 average_add(&priv->average_missed_beacons, missed_beacons_percent);
3997 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
3998 rx_err_delta = rx_err - priv->last_rx_err;
3999 priv->last_rx_err = rx_err;
4001 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4002 tx_failures_delta = tx_failures - priv->last_tx_failures;
4003 priv->last_tx_failures = tx_failures;
4005 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4006 priv->last_rx_packets = priv->rx_packets;
4008 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4009 priv->last_tx_packets = priv->tx_packets;
4011 /* Calculate quality based on the following:
4013 * Missed beacon: 100% = 0, 0% = 70% missed
4014 * Rate: 60% = 1Mbs, 100% = Max
4015 * Rx and Tx errors represent a straight % of total Rx/Tx
4016 * RSSI: 100% = > -50, 0% = < -80
4017 * Rx errors: 100% = 0, 0% = 50% missed
4019 * The lowest computed quality is used.
4022 #define BEACON_THRESHOLD 5
4023 beacon_quality = 100 - missed_beacons_percent;
4024 if (beacon_quality < BEACON_THRESHOLD)
4025 beacon_quality = 0;
4026 else
4027 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4028 (100 - BEACON_THRESHOLD);
4029 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4030 beacon_quality, missed_beacons_percent);
4032 priv->last_rate = ipw_get_current_rate(priv);
4033 max_rate = ipw_get_max_rate(priv);
4034 rate_quality = priv->last_rate * 40 / max_rate + 60;
4035 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4036 rate_quality, priv->last_rate / 1000000);
4038 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4039 rx_quality = 100 - (rx_err_delta * 100) /
4040 (rx_packets_delta + rx_err_delta);
4041 else
4042 rx_quality = 100;
4043 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4044 rx_quality, rx_err_delta, rx_packets_delta);
4046 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4047 tx_quality = 100 - (tx_failures_delta * 100) /
4048 (tx_packets_delta + tx_failures_delta);
4049 else
4050 tx_quality = 100;
4051 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4052 tx_quality, tx_failures_delta, tx_packets_delta);
4054 rssi = average_value(&priv->average_rssi);
4055 signal_quality =
4056 (100 *
4057 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4058 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4059 (priv->ieee->perfect_rssi - rssi) *
4060 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4061 62 * (priv->ieee->perfect_rssi - rssi))) /
4062 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4063 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4064 if (signal_quality > 100)
4065 signal_quality = 100;
4066 else if (signal_quality < 1)
4067 signal_quality = 0;
4069 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4070 signal_quality, rssi);
4072 quality = min(beacon_quality,
4073 min(rate_quality,
4074 min(tx_quality, min(rx_quality, signal_quality))));
4075 if (quality == beacon_quality)
4076 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4077 quality);
4078 if (quality == rate_quality)
4079 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4080 quality);
4081 if (quality == tx_quality)
4082 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4083 quality);
4084 if (quality == rx_quality)
4085 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4086 quality);
4087 if (quality == signal_quality)
4088 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4089 quality);
4091 priv->quality = quality;
4093 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4094 IPW_STATS_INTERVAL);
4097 static void ipw_bg_gather_stats(void *data)
4099 struct ipw_priv *priv = data;
4100 mutex_lock(&priv->mutex);
4101 ipw_gather_stats(data);
4102 mutex_unlock(&priv->mutex);
4105 /* Missed beacon behavior:
4106 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4107 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4108 * Above disassociate threshold, give up and stop scanning.
4109 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4110 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4111 int missed_count)
4113 priv->notif_missed_beacons = missed_count;
4115 if (missed_count > priv->disassociate_threshold &&
4116 priv->status & STATUS_ASSOCIATED) {
4117 /* If associated and we've hit the missed
4118 * beacon threshold, disassociate, turn
4119 * off roaming, and abort any active scans */
4120 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4121 IPW_DL_STATE | IPW_DL_ASSOC,
4122 "Missed beacon: %d - disassociate\n", missed_count);
4123 priv->status &= ~STATUS_ROAMING;
4124 if (priv->status & STATUS_SCANNING) {
4125 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4126 IPW_DL_STATE,
4127 "Aborting scan with missed beacon.\n");
4128 queue_work(priv->workqueue, &priv->abort_scan);
4131 queue_work(priv->workqueue, &priv->disassociate);
4132 return;
4135 if (priv->status & STATUS_ROAMING) {
4136 /* If we are currently roaming, then just
4137 * print a debug statement... */
4138 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4139 "Missed beacon: %d - roam in progress\n",
4140 missed_count);
4141 return;
4144 if (roaming &&
4145 (missed_count > priv->roaming_threshold &&
4146 missed_count <= priv->disassociate_threshold)) {
4147 /* If we are not already roaming, set the ROAM
4148 * bit in the status and kick off a scan.
4149 * This can happen several times before we reach
4150 * disassociate_threshold. */
4151 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4152 "Missed beacon: %d - initiate "
4153 "roaming\n", missed_count);
4154 if (!(priv->status & STATUS_ROAMING)) {
4155 priv->status |= STATUS_ROAMING;
4156 if (!(priv->status & STATUS_SCANNING))
4157 queue_work(priv->workqueue,
4158 &priv->request_scan);
4160 return;
4163 if (priv->status & STATUS_SCANNING) {
4164 /* Stop scan to keep fw from getting
4165 * stuck (only if we aren't roaming --
4166 * otherwise we'll never scan more than 2 or 3
4167 * channels..) */
4168 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4169 "Aborting scan with missed beacon.\n");
4170 queue_work(priv->workqueue, &priv->abort_scan);
4173 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4177 * Handle host notification packet.
4178 * Called from interrupt routine
4180 static void ipw_rx_notification(struct ipw_priv *priv,
4181 struct ipw_rx_notification *notif)
4183 notif->size = le16_to_cpu(notif->size);
4185 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4187 switch (notif->subtype) {
4188 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4189 struct notif_association *assoc = &notif->u.assoc;
4191 switch (assoc->state) {
4192 case CMAS_ASSOCIATED:{
4193 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4194 IPW_DL_ASSOC,
4195 "associated: '%s' " MAC_FMT
4196 " \n",
4197 escape_essid(priv->essid,
4198 priv->essid_len),
4199 MAC_ARG(priv->bssid));
4201 switch (priv->ieee->iw_mode) {
4202 case IW_MODE_INFRA:
4203 memcpy(priv->ieee->bssid,
4204 priv->bssid, ETH_ALEN);
4205 break;
4207 case IW_MODE_ADHOC:
4208 memcpy(priv->ieee->bssid,
4209 priv->bssid, ETH_ALEN);
4211 /* clear out the station table */
4212 priv->num_stations = 0;
4214 IPW_DEBUG_ASSOC
4215 ("queueing adhoc check\n");
4216 queue_delayed_work(priv->
4217 workqueue,
4218 &priv->
4219 adhoc_check,
4220 priv->
4221 assoc_request.
4222 beacon_interval);
4223 break;
4226 priv->status &= ~STATUS_ASSOCIATING;
4227 priv->status |= STATUS_ASSOCIATED;
4228 queue_work(priv->workqueue,
4229 &priv->system_config);
4231 #ifdef CONFIG_IPW_QOS
4232 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4233 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4234 if ((priv->status & STATUS_AUTH) &&
4235 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4236 == IEEE80211_STYPE_ASSOC_RESP)) {
4237 if ((sizeof
4238 (struct
4239 ieee80211_assoc_response)
4240 <= notif->size)
4241 && (notif->size <= 2314)) {
4242 struct
4243 ieee80211_rx_stats
4244 stats = {
4245 .len =
4246 notif->
4247 size - 1,
4250 IPW_DEBUG_QOS
4251 ("QoS Associate "
4252 "size %d\n",
4253 notif->size);
4254 ieee80211_rx_mgt(priv->
4255 ieee,
4256 (struct
4257 ieee80211_hdr_4addr
4259 &notif->u.raw, &stats);
4262 #endif
4264 schedule_work(&priv->link_up);
4266 break;
4269 case CMAS_AUTHENTICATED:{
4270 if (priv->
4271 status & (STATUS_ASSOCIATED |
4272 STATUS_AUTH)) {
4273 #ifdef CONFIG_IPW2200_DEBUG
4274 struct notif_authenticate *auth
4275 = &notif->u.auth;
4276 IPW_DEBUG(IPW_DL_NOTIF |
4277 IPW_DL_STATE |
4278 IPW_DL_ASSOC,
4279 "deauthenticated: '%s' "
4280 MAC_FMT
4281 ": (0x%04X) - %s \n",
4282 escape_essid(priv->
4283 essid,
4284 priv->
4285 essid_len),
4286 MAC_ARG(priv->bssid),
4287 ntohs(auth->status),
4288 ipw_get_status_code
4289 (ntohs
4290 (auth->status)));
4291 #endif
4293 priv->status &=
4294 ~(STATUS_ASSOCIATING |
4295 STATUS_AUTH |
4296 STATUS_ASSOCIATED);
4298 schedule_work(&priv->link_down);
4299 break;
4302 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4303 IPW_DL_ASSOC,
4304 "authenticated: '%s' " MAC_FMT
4305 "\n",
4306 escape_essid(priv->essid,
4307 priv->essid_len),
4308 MAC_ARG(priv->bssid));
4309 break;
4312 case CMAS_INIT:{
4313 if (priv->status & STATUS_AUTH) {
4314 struct
4315 ieee80211_assoc_response
4316 *resp;
4317 resp =
4318 (struct
4319 ieee80211_assoc_response
4320 *)&notif->u.raw;
4321 IPW_DEBUG(IPW_DL_NOTIF |
4322 IPW_DL_STATE |
4323 IPW_DL_ASSOC,
4324 "association failed (0x%04X): %s\n",
4325 ntohs(resp->status),
4326 ipw_get_status_code
4327 (ntohs
4328 (resp->status)));
4331 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4332 IPW_DL_ASSOC,
4333 "disassociated: '%s' " MAC_FMT
4334 " \n",
4335 escape_essid(priv->essid,
4336 priv->essid_len),
4337 MAC_ARG(priv->bssid));
4339 priv->status &=
4340 ~(STATUS_DISASSOCIATING |
4341 STATUS_ASSOCIATING |
4342 STATUS_ASSOCIATED | STATUS_AUTH);
4343 if (priv->assoc_network
4344 && (priv->assoc_network->
4345 capability &
4346 WLAN_CAPABILITY_IBSS))
4347 ipw_remove_current_network
4348 (priv);
4350 schedule_work(&priv->link_down);
4352 break;
4355 case CMAS_RX_ASSOC_RESP:
4356 break;
4358 default:
4359 IPW_ERROR("assoc: unknown (%d)\n",
4360 assoc->state);
4361 break;
4364 break;
4367 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4368 struct notif_authenticate *auth = &notif->u.auth;
4369 switch (auth->state) {
4370 case CMAS_AUTHENTICATED:
4371 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4372 "authenticated: '%s' " MAC_FMT " \n",
4373 escape_essid(priv->essid,
4374 priv->essid_len),
4375 MAC_ARG(priv->bssid));
4376 priv->status |= STATUS_AUTH;
4377 break;
4379 case CMAS_INIT:
4380 if (priv->status & STATUS_AUTH) {
4381 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4382 IPW_DL_ASSOC,
4383 "authentication failed (0x%04X): %s\n",
4384 ntohs(auth->status),
4385 ipw_get_status_code(ntohs
4386 (auth->
4387 status)));
4389 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4390 IPW_DL_ASSOC,
4391 "deauthenticated: '%s' " MAC_FMT "\n",
4392 escape_essid(priv->essid,
4393 priv->essid_len),
4394 MAC_ARG(priv->bssid));
4396 priv->status &= ~(STATUS_ASSOCIATING |
4397 STATUS_AUTH |
4398 STATUS_ASSOCIATED);
4400 schedule_work(&priv->link_down);
4401 break;
4403 case CMAS_TX_AUTH_SEQ_1:
4404 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4405 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4406 break;
4407 case CMAS_RX_AUTH_SEQ_2:
4408 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4409 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4410 break;
4411 case CMAS_AUTH_SEQ_1_PASS:
4412 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4413 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4414 break;
4415 case CMAS_AUTH_SEQ_1_FAIL:
4416 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4417 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4418 break;
4419 case CMAS_TX_AUTH_SEQ_3:
4420 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4421 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4422 break;
4423 case CMAS_RX_AUTH_SEQ_4:
4424 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4425 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4426 break;
4427 case CMAS_AUTH_SEQ_2_PASS:
4428 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4429 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4430 break;
4431 case CMAS_AUTH_SEQ_2_FAIL:
4432 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4433 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4434 break;
4435 case CMAS_TX_ASSOC:
4436 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4437 IPW_DL_ASSOC, "TX_ASSOC\n");
4438 break;
4439 case CMAS_RX_ASSOC_RESP:
4440 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4441 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4443 break;
4444 case CMAS_ASSOCIATED:
4445 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4446 IPW_DL_ASSOC, "ASSOCIATED\n");
4447 break;
4448 default:
4449 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4450 auth->state);
4451 break;
4453 break;
4456 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4457 struct notif_channel_result *x =
4458 &notif->u.channel_result;
4460 if (notif->size == sizeof(*x)) {
4461 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4462 x->channel_num);
4463 } else {
4464 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4465 "(should be %zd)\n",
4466 notif->size, sizeof(*x));
4468 break;
4471 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4472 struct notif_scan_complete *x = &notif->u.scan_complete;
4473 if (notif->size == sizeof(*x)) {
4474 IPW_DEBUG_SCAN
4475 ("Scan completed: type %d, %d channels, "
4476 "%d status\n", x->scan_type,
4477 x->num_channels, x->status);
4478 } else {
4479 IPW_ERROR("Scan completed of wrong size %d "
4480 "(should be %zd)\n",
4481 notif->size, sizeof(*x));
4484 priv->status &=
4485 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4487 wake_up_interruptible(&priv->wait_state);
4488 cancel_delayed_work(&priv->scan_check);
4490 if (priv->status & STATUS_EXIT_PENDING)
4491 break;
4493 priv->ieee->scans++;
4495 #ifdef CONFIG_IPW2200_MONITOR
4496 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4497 priv->status |= STATUS_SCAN_FORCED;
4498 queue_work(priv->workqueue,
4499 &priv->request_scan);
4500 break;
4502 priv->status &= ~STATUS_SCAN_FORCED;
4503 #endif /* CONFIG_IPW2200_MONITOR */
4505 if (!(priv->status & (STATUS_ASSOCIATED |
4506 STATUS_ASSOCIATING |
4507 STATUS_ROAMING |
4508 STATUS_DISASSOCIATING)))
4509 queue_work(priv->workqueue, &priv->associate);
4510 else if (priv->status & STATUS_ROAMING) {
4511 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4512 /* If a scan completed and we are in roam mode, then
4513 * the scan that completed was the one requested as a
4514 * result of entering roam... so, schedule the
4515 * roam work */
4516 queue_work(priv->workqueue,
4517 &priv->roam);
4518 else
4519 /* Don't schedule if we aborted the scan */
4520 priv->status &= ~STATUS_ROAMING;
4521 } else if (priv->status & STATUS_SCAN_PENDING)
4522 queue_work(priv->workqueue,
4523 &priv->request_scan);
4524 else if (priv->config & CFG_BACKGROUND_SCAN
4525 && priv->status & STATUS_ASSOCIATED)
4526 queue_delayed_work(priv->workqueue,
4527 &priv->request_scan, HZ);
4528 break;
4531 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4532 struct notif_frag_length *x = &notif->u.frag_len;
4534 if (notif->size == sizeof(*x))
4535 IPW_ERROR("Frag length: %d\n",
4536 le16_to_cpu(x->frag_length));
4537 else
4538 IPW_ERROR("Frag length of wrong size %d "
4539 "(should be %zd)\n",
4540 notif->size, sizeof(*x));
4541 break;
4544 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4545 struct notif_link_deterioration *x =
4546 &notif->u.link_deterioration;
4548 if (notif->size == sizeof(*x)) {
4549 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4550 "link deterioration: '%s' " MAC_FMT
4551 " \n", escape_essid(priv->essid,
4552 priv->essid_len),
4553 MAC_ARG(priv->bssid));
4554 memcpy(&priv->last_link_deterioration, x,
4555 sizeof(*x));
4556 } else {
4557 IPW_ERROR("Link Deterioration of wrong size %d "
4558 "(should be %zd)\n",
4559 notif->size, sizeof(*x));
4561 break;
4564 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4565 IPW_ERROR("Dino config\n");
4566 if (priv->hcmd
4567 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4568 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4570 break;
4573 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4574 struct notif_beacon_state *x = &notif->u.beacon_state;
4575 if (notif->size != sizeof(*x)) {
4576 IPW_ERROR
4577 ("Beacon state of wrong size %d (should "
4578 "be %zd)\n", notif->size, sizeof(*x));
4579 break;
4582 if (le32_to_cpu(x->state) ==
4583 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4584 ipw_handle_missed_beacon(priv,
4585 le32_to_cpu(x->
4586 number));
4588 break;
4591 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4592 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4593 if (notif->size == sizeof(*x)) {
4594 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4595 "0x%02x station %d\n",
4596 x->key_state, x->security_type,
4597 x->station_index);
4598 break;
4601 IPW_ERROR
4602 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4603 notif->size, sizeof(*x));
4604 break;
4607 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4608 struct notif_calibration *x = &notif->u.calibration;
4610 if (notif->size == sizeof(*x)) {
4611 memcpy(&priv->calib, x, sizeof(*x));
4612 IPW_DEBUG_INFO("TODO: Calibration\n");
4613 break;
4616 IPW_ERROR
4617 ("Calibration of wrong size %d (should be %zd)\n",
4618 notif->size, sizeof(*x));
4619 break;
4622 case HOST_NOTIFICATION_NOISE_STATS:{
4623 if (notif->size == sizeof(u32)) {
4624 priv->last_noise =
4625 (u8) (le32_to_cpu(notif->u.noise.value) &
4626 0xff);
4627 average_add(&priv->average_noise,
4628 priv->last_noise);
4629 break;
4632 IPW_ERROR
4633 ("Noise stat is wrong size %d (should be %zd)\n",
4634 notif->size, sizeof(u32));
4635 break;
4638 default:
4639 IPW_DEBUG_NOTIF("Unknown notification: "
4640 "subtype=%d,flags=0x%2x,size=%d\n",
4641 notif->subtype, notif->flags, notif->size);
4646 * Destroys all DMA structures and initialise them again
4648 * @param priv
4649 * @return error code
4651 static int ipw_queue_reset(struct ipw_priv *priv)
4653 int rc = 0;
4654 /** @todo customize queue sizes */
4655 int nTx = 64, nTxCmd = 8;
4656 ipw_tx_queue_free(priv);
4657 /* Tx CMD queue */
4658 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4659 IPW_TX_CMD_QUEUE_READ_INDEX,
4660 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4661 IPW_TX_CMD_QUEUE_BD_BASE,
4662 IPW_TX_CMD_QUEUE_BD_SIZE);
4663 if (rc) {
4664 IPW_ERROR("Tx Cmd queue init failed\n");
4665 goto error;
4667 /* Tx queue(s) */
4668 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4669 IPW_TX_QUEUE_0_READ_INDEX,
4670 IPW_TX_QUEUE_0_WRITE_INDEX,
4671 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4672 if (rc) {
4673 IPW_ERROR("Tx 0 queue init failed\n");
4674 goto error;
4676 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4677 IPW_TX_QUEUE_1_READ_INDEX,
4678 IPW_TX_QUEUE_1_WRITE_INDEX,
4679 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4680 if (rc) {
4681 IPW_ERROR("Tx 1 queue init failed\n");
4682 goto error;
4684 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4685 IPW_TX_QUEUE_2_READ_INDEX,
4686 IPW_TX_QUEUE_2_WRITE_INDEX,
4687 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4688 if (rc) {
4689 IPW_ERROR("Tx 2 queue init failed\n");
4690 goto error;
4692 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4693 IPW_TX_QUEUE_3_READ_INDEX,
4694 IPW_TX_QUEUE_3_WRITE_INDEX,
4695 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4696 if (rc) {
4697 IPW_ERROR("Tx 3 queue init failed\n");
4698 goto error;
4700 /* statistics */
4701 priv->rx_bufs_min = 0;
4702 priv->rx_pend_max = 0;
4703 return rc;
4705 error:
4706 ipw_tx_queue_free(priv);
4707 return rc;
4711 * Reclaim Tx queue entries no more used by NIC.
4713 * When FW adwances 'R' index, all entries between old and
4714 * new 'R' index need to be reclaimed. As result, some free space
4715 * forms. If there is enough free space (> low mark), wake Tx queue.
4717 * @note Need to protect against garbage in 'R' index
4718 * @param priv
4719 * @param txq
4720 * @param qindex
4721 * @return Number of used entries remains in the queue
4723 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4724 struct clx2_tx_queue *txq, int qindex)
4726 u32 hw_tail;
4727 int used;
4728 struct clx2_queue *q = &txq->q;
4730 hw_tail = ipw_read32(priv, q->reg_r);
4731 if (hw_tail >= q->n_bd) {
4732 IPW_ERROR
4733 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4734 hw_tail, q->n_bd);
4735 goto done;
4737 for (; q->last_used != hw_tail;
4738 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4739 ipw_queue_tx_free_tfd(priv, txq);
4740 priv->tx_packets++;
4742 done:
4743 if ((ipw_queue_space(q) > q->low_mark) &&
4744 (qindex >= 0) &&
4745 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4746 netif_wake_queue(priv->net_dev);
4747 used = q->first_empty - q->last_used;
4748 if (used < 0)
4749 used += q->n_bd;
4751 return used;
4754 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4755 int len, int sync)
4757 struct clx2_tx_queue *txq = &priv->txq_cmd;
4758 struct clx2_queue *q = &txq->q;
4759 struct tfd_frame *tfd;
4761 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4762 IPW_ERROR("No space for Tx\n");
4763 return -EBUSY;
4766 tfd = &txq->bd[q->first_empty];
4767 txq->txb[q->first_empty] = NULL;
4769 memset(tfd, 0, sizeof(*tfd));
4770 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4771 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4772 priv->hcmd_seq++;
4773 tfd->u.cmd.index = hcmd;
4774 tfd->u.cmd.length = len;
4775 memcpy(tfd->u.cmd.payload, buf, len);
4776 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4777 ipw_write32(priv, q->reg_w, q->first_empty);
4778 _ipw_read32(priv, 0x90);
4780 return 0;
4784 * Rx theory of operation
4786 * The host allocates 32 DMA target addresses and passes the host address
4787 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4788 * 0 to 31
4790 * Rx Queue Indexes
4791 * The host/firmware share two index registers for managing the Rx buffers.
4793 * The READ index maps to the first position that the firmware may be writing
4794 * to -- the driver can read up to (but not including) this position and get
4795 * good data.
4796 * The READ index is managed by the firmware once the card is enabled.
4798 * The WRITE index maps to the last position the driver has read from -- the
4799 * position preceding WRITE is the last slot the firmware can place a packet.
4801 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4802 * WRITE = READ.
4804 * During initialization the host sets up the READ queue position to the first
4805 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4807 * When the firmware places a packet in a buffer it will advance the READ index
4808 * and fire the RX interrupt. The driver can then query the READ index and
4809 * process as many packets as possible, moving the WRITE index forward as it
4810 * resets the Rx queue buffers with new memory.
4812 * The management in the driver is as follows:
4813 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
4814 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4815 * to replensish the ipw->rxq->rx_free.
4816 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4817 * ipw->rxq is replenished and the READ INDEX is updated (updating the
4818 * 'processed' and 'read' driver indexes as well)
4819 * + A received packet is processed and handed to the kernel network stack,
4820 * detached from the ipw->rxq. The driver 'processed' index is updated.
4821 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4822 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4823 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
4824 * were enough free buffers and RX_STALLED is set it is cleared.
4827 * Driver sequence:
4829 * ipw_rx_queue_alloc() Allocates rx_free
4830 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
4831 * ipw_rx_queue_restock
4832 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
4833 * queue, updates firmware pointers, and updates
4834 * the WRITE index. If insufficient rx_free buffers
4835 * are available, schedules ipw_rx_queue_replenish
4837 * -- enable interrupts --
4838 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
4839 * READ INDEX, detaching the SKB from the pool.
4840 * Moves the packet buffer from queue to rx_used.
4841 * Calls ipw_rx_queue_restock to refill any empty
4842 * slots.
4843 * ...
4848 * If there are slots in the RX queue that need to be restocked,
4849 * and we have free pre-allocated buffers, fill the ranks as much
4850 * as we can pulling from rx_free.
4852 * This moves the 'write' index forward to catch up with 'processed', and
4853 * also updates the memory address in the firmware to reference the new
4854 * target buffer.
4856 static void ipw_rx_queue_restock(struct ipw_priv *priv)
4858 struct ipw_rx_queue *rxq = priv->rxq;
4859 struct list_head *element;
4860 struct ipw_rx_mem_buffer *rxb;
4861 unsigned long flags;
4862 int write;
4864 spin_lock_irqsave(&rxq->lock, flags);
4865 write = rxq->write;
4866 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
4867 element = rxq->rx_free.next;
4868 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4869 list_del(element);
4871 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
4872 rxb->dma_addr);
4873 rxq->queue[rxq->write] = rxb;
4874 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
4875 rxq->free_count--;
4877 spin_unlock_irqrestore(&rxq->lock, flags);
4879 /* If the pre-allocated buffer pool is dropping low, schedule to
4880 * refill it */
4881 if (rxq->free_count <= RX_LOW_WATERMARK)
4882 queue_work(priv->workqueue, &priv->rx_replenish);
4884 /* If we've added more space for the firmware to place data, tell it */
4885 if (write != rxq->write)
4886 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
4890 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
4891 * Also restock the Rx queue via ipw_rx_queue_restock.
4893 * This is called as a scheduled work item (except for during intialization)
4895 static void ipw_rx_queue_replenish(void *data)
4897 struct ipw_priv *priv = data;
4898 struct ipw_rx_queue *rxq = priv->rxq;
4899 struct list_head *element;
4900 struct ipw_rx_mem_buffer *rxb;
4901 unsigned long flags;
4903 spin_lock_irqsave(&rxq->lock, flags);
4904 while (!list_empty(&rxq->rx_used)) {
4905 element = rxq->rx_used.next;
4906 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4907 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
4908 if (!rxb->skb) {
4909 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
4910 priv->net_dev->name);
4911 /* We don't reschedule replenish work here -- we will
4912 * call the restock method and if it still needs
4913 * more buffers it will schedule replenish */
4914 break;
4916 list_del(element);
4918 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
4919 rxb->dma_addr =
4920 pci_map_single(priv->pci_dev, rxb->skb->data,
4921 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4923 list_add_tail(&rxb->list, &rxq->rx_free);
4924 rxq->free_count++;
4926 spin_unlock_irqrestore(&rxq->lock, flags);
4928 ipw_rx_queue_restock(priv);
4931 static void ipw_bg_rx_queue_replenish(void *data)
4933 struct ipw_priv *priv = data;
4934 mutex_lock(&priv->mutex);
4935 ipw_rx_queue_replenish(data);
4936 mutex_unlock(&priv->mutex);
4939 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4940 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
4941 * This free routine walks the list of POOL entries and if SKB is set to
4942 * non NULL it is unmapped and freed
4944 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
4946 int i;
4948 if (!rxq)
4949 return;
4951 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4952 if (rxq->pool[i].skb != NULL) {
4953 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
4954 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4955 dev_kfree_skb(rxq->pool[i].skb);
4959 kfree(rxq);
4962 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4964 struct ipw_rx_queue *rxq;
4965 int i;
4967 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
4968 if (unlikely(!rxq)) {
4969 IPW_ERROR("memory allocation failed\n");
4970 return NULL;
4972 spin_lock_init(&rxq->lock);
4973 INIT_LIST_HEAD(&rxq->rx_free);
4974 INIT_LIST_HEAD(&rxq->rx_used);
4976 /* Fill the rx_used queue with _all_ of the Rx buffers */
4977 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4978 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4980 /* Set us so that we have processed and used all buffers, but have
4981 * not restocked the Rx queue with fresh buffers */
4982 rxq->read = rxq->write = 0;
4983 rxq->processed = RX_QUEUE_SIZE - 1;
4984 rxq->free_count = 0;
4986 return rxq;
4989 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
4991 rate &= ~IEEE80211_BASIC_RATE_MASK;
4992 if (ieee_mode == IEEE_A) {
4993 switch (rate) {
4994 case IEEE80211_OFDM_RATE_6MB:
4995 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
4996 1 : 0;
4997 case IEEE80211_OFDM_RATE_9MB:
4998 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
4999 1 : 0;
5000 case IEEE80211_OFDM_RATE_12MB:
5001 return priv->
5002 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5003 case IEEE80211_OFDM_RATE_18MB:
5004 return priv->
5005 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5006 case IEEE80211_OFDM_RATE_24MB:
5007 return priv->
5008 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5009 case IEEE80211_OFDM_RATE_36MB:
5010 return priv->
5011 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5012 case IEEE80211_OFDM_RATE_48MB:
5013 return priv->
5014 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5015 case IEEE80211_OFDM_RATE_54MB:
5016 return priv->
5017 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5018 default:
5019 return 0;
5023 /* B and G mixed */
5024 switch (rate) {
5025 case IEEE80211_CCK_RATE_1MB:
5026 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5027 case IEEE80211_CCK_RATE_2MB:
5028 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5029 case IEEE80211_CCK_RATE_5MB:
5030 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5031 case IEEE80211_CCK_RATE_11MB:
5032 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5035 /* If we are limited to B modulations, bail at this point */
5036 if (ieee_mode == IEEE_B)
5037 return 0;
5039 /* G */
5040 switch (rate) {
5041 case IEEE80211_OFDM_RATE_6MB:
5042 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5043 case IEEE80211_OFDM_RATE_9MB:
5044 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5045 case IEEE80211_OFDM_RATE_12MB:
5046 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5047 case IEEE80211_OFDM_RATE_18MB:
5048 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5049 case IEEE80211_OFDM_RATE_24MB:
5050 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5051 case IEEE80211_OFDM_RATE_36MB:
5052 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5053 case IEEE80211_OFDM_RATE_48MB:
5054 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5055 case IEEE80211_OFDM_RATE_54MB:
5056 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5059 return 0;
5062 static int ipw_compatible_rates(struct ipw_priv *priv,
5063 const struct ieee80211_network *network,
5064 struct ipw_supported_rates *rates)
5066 int num_rates, i;
5068 memset(rates, 0, sizeof(*rates));
5069 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5070 rates->num_rates = 0;
5071 for (i = 0; i < num_rates; i++) {
5072 if (!ipw_is_rate_in_mask(priv, network->mode,
5073 network->rates[i])) {
5075 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5076 IPW_DEBUG_SCAN("Adding masked mandatory "
5077 "rate %02X\n",
5078 network->rates[i]);
5079 rates->supported_rates[rates->num_rates++] =
5080 network->rates[i];
5081 continue;
5084 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5085 network->rates[i], priv->rates_mask);
5086 continue;
5089 rates->supported_rates[rates->num_rates++] = network->rates[i];
5092 num_rates = min(network->rates_ex_len,
5093 (u8) (IPW_MAX_RATES - num_rates));
5094 for (i = 0; i < num_rates; i++) {
5095 if (!ipw_is_rate_in_mask(priv, network->mode,
5096 network->rates_ex[i])) {
5097 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5098 IPW_DEBUG_SCAN("Adding masked mandatory "
5099 "rate %02X\n",
5100 network->rates_ex[i]);
5101 rates->supported_rates[rates->num_rates++] =
5102 network->rates[i];
5103 continue;
5106 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5107 network->rates_ex[i], priv->rates_mask);
5108 continue;
5111 rates->supported_rates[rates->num_rates++] =
5112 network->rates_ex[i];
5115 return 1;
5118 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5119 const struct ipw_supported_rates *src)
5121 u8 i;
5122 for (i = 0; i < src->num_rates; i++)
5123 dest->supported_rates[i] = src->supported_rates[i];
5124 dest->num_rates = src->num_rates;
5127 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5128 * mask should ever be used -- right now all callers to add the scan rates are
5129 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5130 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5131 u8 modulation, u32 rate_mask)
5133 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5134 IEEE80211_BASIC_RATE_MASK : 0;
5136 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5137 rates->supported_rates[rates->num_rates++] =
5138 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5140 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5141 rates->supported_rates[rates->num_rates++] =
5142 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5144 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5145 rates->supported_rates[rates->num_rates++] = basic_mask |
5146 IEEE80211_CCK_RATE_5MB;
5148 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5149 rates->supported_rates[rates->num_rates++] = basic_mask |
5150 IEEE80211_CCK_RATE_11MB;
5153 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5154 u8 modulation, u32 rate_mask)
5156 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5157 IEEE80211_BASIC_RATE_MASK : 0;
5159 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5160 rates->supported_rates[rates->num_rates++] = basic_mask |
5161 IEEE80211_OFDM_RATE_6MB;
5163 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5164 rates->supported_rates[rates->num_rates++] =
5165 IEEE80211_OFDM_RATE_9MB;
5167 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5168 rates->supported_rates[rates->num_rates++] = basic_mask |
5169 IEEE80211_OFDM_RATE_12MB;
5171 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5172 rates->supported_rates[rates->num_rates++] =
5173 IEEE80211_OFDM_RATE_18MB;
5175 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5176 rates->supported_rates[rates->num_rates++] = basic_mask |
5177 IEEE80211_OFDM_RATE_24MB;
5179 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5180 rates->supported_rates[rates->num_rates++] =
5181 IEEE80211_OFDM_RATE_36MB;
5183 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5184 rates->supported_rates[rates->num_rates++] =
5185 IEEE80211_OFDM_RATE_48MB;
5187 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5188 rates->supported_rates[rates->num_rates++] =
5189 IEEE80211_OFDM_RATE_54MB;
5192 struct ipw_network_match {
5193 struct ieee80211_network *network;
5194 struct ipw_supported_rates rates;
5197 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5198 struct ipw_network_match *match,
5199 struct ieee80211_network *network,
5200 int roaming)
5202 struct ipw_supported_rates rates;
5204 /* Verify that this network's capability is compatible with the
5205 * current mode (AdHoc or Infrastructure) */
5206 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5207 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5208 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5209 "capability mismatch.\n",
5210 escape_essid(network->ssid, network->ssid_len),
5211 MAC_ARG(network->bssid));
5212 return 0;
5215 /* If we do not have an ESSID for this AP, we can not associate with
5216 * it */
5217 if (network->flags & NETWORK_EMPTY_ESSID) {
5218 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5219 "because of hidden ESSID.\n",
5220 escape_essid(network->ssid, network->ssid_len),
5221 MAC_ARG(network->bssid));
5222 return 0;
5225 if (unlikely(roaming)) {
5226 /* If we are roaming, then ensure check if this is a valid
5227 * network to try and roam to */
5228 if ((network->ssid_len != match->network->ssid_len) ||
5229 memcmp(network->ssid, match->network->ssid,
5230 network->ssid_len)) {
5231 IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5232 "because of non-network ESSID.\n",
5233 escape_essid(network->ssid,
5234 network->ssid_len),
5235 MAC_ARG(network->bssid));
5236 return 0;
5238 } else {
5239 /* If an ESSID has been configured then compare the broadcast
5240 * ESSID to ours */
5241 if ((priv->config & CFG_STATIC_ESSID) &&
5242 ((network->ssid_len != priv->essid_len) ||
5243 memcmp(network->ssid, priv->essid,
5244 min(network->ssid_len, priv->essid_len)))) {
5245 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5247 strncpy(escaped,
5248 escape_essid(network->ssid, network->ssid_len),
5249 sizeof(escaped));
5250 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5251 "because of ESSID mismatch: '%s'.\n",
5252 escaped, MAC_ARG(network->bssid),
5253 escape_essid(priv->essid,
5254 priv->essid_len));
5255 return 0;
5259 /* If the old network rate is better than this one, don't bother
5260 * testing everything else. */
5262 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5263 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5264 "current network.\n",
5265 escape_essid(match->network->ssid,
5266 match->network->ssid_len));
5267 return 0;
5268 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5269 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5270 "current network.\n",
5271 escape_essid(match->network->ssid,
5272 match->network->ssid_len));
5273 return 0;
5276 /* Now go through and see if the requested network is valid... */
5277 if (priv->ieee->scan_age != 0 &&
5278 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5279 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5280 "because of age: %ums.\n",
5281 escape_essid(network->ssid, network->ssid_len),
5282 MAC_ARG(network->bssid),
5283 jiffies_to_msecs(jiffies -
5284 network->last_scanned));
5285 return 0;
5288 if ((priv->config & CFG_STATIC_CHANNEL) &&
5289 (network->channel != priv->channel)) {
5290 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5291 "because of channel mismatch: %d != %d.\n",
5292 escape_essid(network->ssid, network->ssid_len),
5293 MAC_ARG(network->bssid),
5294 network->channel, priv->channel);
5295 return 0;
5298 /* Verify privacy compatability */
5299 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5300 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5301 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5302 "because of privacy mismatch: %s != %s.\n",
5303 escape_essid(network->ssid, network->ssid_len),
5304 MAC_ARG(network->bssid),
5305 priv->
5306 capability & CAP_PRIVACY_ON ? "on" : "off",
5307 network->
5308 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5309 "off");
5310 return 0;
5313 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5314 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5315 "because of the same BSSID match: " MAC_FMT
5316 ".\n", escape_essid(network->ssid,
5317 network->ssid_len),
5318 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5319 return 0;
5322 /* Filter out any incompatible freq / mode combinations */
5323 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5324 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5325 "because of invalid frequency/mode "
5326 "combination.\n",
5327 escape_essid(network->ssid, network->ssid_len),
5328 MAC_ARG(network->bssid));
5329 return 0;
5332 /* Ensure that the rates supported by the driver are compatible with
5333 * this AP, including verification of basic rates (mandatory) */
5334 if (!ipw_compatible_rates(priv, network, &rates)) {
5335 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5336 "because configured rate mask excludes "
5337 "AP mandatory rate.\n",
5338 escape_essid(network->ssid, network->ssid_len),
5339 MAC_ARG(network->bssid));
5340 return 0;
5343 if (rates.num_rates == 0) {
5344 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5345 "because of no compatible rates.\n",
5346 escape_essid(network->ssid, network->ssid_len),
5347 MAC_ARG(network->bssid));
5348 return 0;
5351 /* TODO: Perform any further minimal comparititive tests. We do not
5352 * want to put too much policy logic here; intelligent scan selection
5353 * should occur within a generic IEEE 802.11 user space tool. */
5355 /* Set up 'new' AP to this network */
5356 ipw_copy_rates(&match->rates, &rates);
5357 match->network = network;
5358 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5359 escape_essid(network->ssid, network->ssid_len),
5360 MAC_ARG(network->bssid));
5362 return 1;
5365 static void ipw_merge_adhoc_network(void *data)
5367 struct ipw_priv *priv = data;
5368 struct ieee80211_network *network = NULL;
5369 struct ipw_network_match match = {
5370 .network = priv->assoc_network
5373 if ((priv->status & STATUS_ASSOCIATED) &&
5374 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5375 /* First pass through ROAM process -- look for a better
5376 * network */
5377 unsigned long flags;
5379 spin_lock_irqsave(&priv->ieee->lock, flags);
5380 list_for_each_entry(network, &priv->ieee->network_list, list) {
5381 if (network != priv->assoc_network)
5382 ipw_find_adhoc_network(priv, &match, network,
5385 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5387 if (match.network == priv->assoc_network) {
5388 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5389 "merge to.\n");
5390 return;
5393 mutex_lock(&priv->mutex);
5394 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5395 IPW_DEBUG_MERGE("remove network %s\n",
5396 escape_essid(priv->essid,
5397 priv->essid_len));
5398 ipw_remove_current_network(priv);
5401 ipw_disassociate(priv);
5402 priv->assoc_network = match.network;
5403 mutex_unlock(&priv->mutex);
5404 return;
5408 static int ipw_best_network(struct ipw_priv *priv,
5409 struct ipw_network_match *match,
5410 struct ieee80211_network *network, int roaming)
5412 struct ipw_supported_rates rates;
5414 /* Verify that this network's capability is compatible with the
5415 * current mode (AdHoc or Infrastructure) */
5416 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5417 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5418 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5419 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5420 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5421 "capability mismatch.\n",
5422 escape_essid(network->ssid, network->ssid_len),
5423 MAC_ARG(network->bssid));
5424 return 0;
5427 /* If we do not have an ESSID for this AP, we can not associate with
5428 * it */
5429 if (network->flags & NETWORK_EMPTY_ESSID) {
5430 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5431 "because of hidden ESSID.\n",
5432 escape_essid(network->ssid, network->ssid_len),
5433 MAC_ARG(network->bssid));
5434 return 0;
5437 if (unlikely(roaming)) {
5438 /* If we are roaming, then ensure check if this is a valid
5439 * network to try and roam to */
5440 if ((network->ssid_len != match->network->ssid_len) ||
5441 memcmp(network->ssid, match->network->ssid,
5442 network->ssid_len)) {
5443 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5444 "because of non-network ESSID.\n",
5445 escape_essid(network->ssid,
5446 network->ssid_len),
5447 MAC_ARG(network->bssid));
5448 return 0;
5450 } else {
5451 /* If an ESSID has been configured then compare the broadcast
5452 * ESSID to ours */
5453 if ((priv->config & CFG_STATIC_ESSID) &&
5454 ((network->ssid_len != priv->essid_len) ||
5455 memcmp(network->ssid, priv->essid,
5456 min(network->ssid_len, priv->essid_len)))) {
5457 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5458 strncpy(escaped,
5459 escape_essid(network->ssid, network->ssid_len),
5460 sizeof(escaped));
5461 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5462 "because of ESSID mismatch: '%s'.\n",
5463 escaped, MAC_ARG(network->bssid),
5464 escape_essid(priv->essid,
5465 priv->essid_len));
5466 return 0;
5470 /* If the old network rate is better than this one, don't bother
5471 * testing everything else. */
5472 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5473 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5474 strncpy(escaped,
5475 escape_essid(network->ssid, network->ssid_len),
5476 sizeof(escaped));
5477 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5478 "'%s (" MAC_FMT ")' has a stronger signal.\n",
5479 escaped, MAC_ARG(network->bssid),
5480 escape_essid(match->network->ssid,
5481 match->network->ssid_len),
5482 MAC_ARG(match->network->bssid));
5483 return 0;
5486 /* If this network has already had an association attempt within the
5487 * last 3 seconds, do not try and associate again... */
5488 if (network->last_associate &&
5489 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5490 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5491 "because of storming (%ums since last "
5492 "assoc attempt).\n",
5493 escape_essid(network->ssid, network->ssid_len),
5494 MAC_ARG(network->bssid),
5495 jiffies_to_msecs(jiffies -
5496 network->last_associate));
5497 return 0;
5500 /* Now go through and see if the requested network is valid... */
5501 if (priv->ieee->scan_age != 0 &&
5502 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5503 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5504 "because of age: %ums.\n",
5505 escape_essid(network->ssid, network->ssid_len),
5506 MAC_ARG(network->bssid),
5507 jiffies_to_msecs(jiffies -
5508 network->last_scanned));
5509 return 0;
5512 if ((priv->config & CFG_STATIC_CHANNEL) &&
5513 (network->channel != priv->channel)) {
5514 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5515 "because of channel mismatch: %d != %d.\n",
5516 escape_essid(network->ssid, network->ssid_len),
5517 MAC_ARG(network->bssid),
5518 network->channel, priv->channel);
5519 return 0;
5522 /* Verify privacy compatability */
5523 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5524 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5525 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5526 "because of privacy mismatch: %s != %s.\n",
5527 escape_essid(network->ssid, network->ssid_len),
5528 MAC_ARG(network->bssid),
5529 priv->capability & CAP_PRIVACY_ON ? "on" :
5530 "off",
5531 network->capability &
5532 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5533 return 0;
5536 if (priv->ieee->wpa_enabled &&
5537 network->wpa_ie_len == 0 && network->rsn_ie_len == 0) {
5538 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5539 "because of WPA capability mismatch.\n",
5540 escape_essid(network->ssid, network->ssid_len),
5541 MAC_ARG(network->bssid));
5542 return 0;
5545 if ((priv->config & CFG_STATIC_BSSID) &&
5546 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5547 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5548 "because of BSSID mismatch: " MAC_FMT ".\n",
5549 escape_essid(network->ssid, network->ssid_len),
5550 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5551 return 0;
5554 /* Filter out any incompatible freq / mode combinations */
5555 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5556 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5557 "because of invalid frequency/mode "
5558 "combination.\n",
5559 escape_essid(network->ssid, network->ssid_len),
5560 MAC_ARG(network->bssid));
5561 return 0;
5564 /* Filter out invalid channel in current GEO */
5565 if (!ipw_is_valid_channel(priv->ieee, network->channel)) {
5566 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5567 "because of invalid channel in current GEO\n",
5568 escape_essid(network->ssid, network->ssid_len),
5569 MAC_ARG(network->bssid));
5570 return 0;
5573 /* Ensure that the rates supported by the driver are compatible with
5574 * this AP, including verification of basic rates (mandatory) */
5575 if (!ipw_compatible_rates(priv, network, &rates)) {
5576 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5577 "because configured rate mask excludes "
5578 "AP mandatory rate.\n",
5579 escape_essid(network->ssid, network->ssid_len),
5580 MAC_ARG(network->bssid));
5581 return 0;
5584 if (rates.num_rates == 0) {
5585 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5586 "because of no compatible rates.\n",
5587 escape_essid(network->ssid, network->ssid_len),
5588 MAC_ARG(network->bssid));
5589 return 0;
5592 /* TODO: Perform any further minimal comparititive tests. We do not
5593 * want to put too much policy logic here; intelligent scan selection
5594 * should occur within a generic IEEE 802.11 user space tool. */
5596 /* Set up 'new' AP to this network */
5597 ipw_copy_rates(&match->rates, &rates);
5598 match->network = network;
5600 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5601 escape_essid(network->ssid, network->ssid_len),
5602 MAC_ARG(network->bssid));
5604 return 1;
5607 static void ipw_adhoc_create(struct ipw_priv *priv,
5608 struct ieee80211_network *network)
5610 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
5611 int i;
5614 * For the purposes of scanning, we can set our wireless mode
5615 * to trigger scans across combinations of bands, but when it
5616 * comes to creating a new ad-hoc network, we have tell the FW
5617 * exactly which band to use.
5619 * We also have the possibility of an invalid channel for the
5620 * chossen band. Attempting to create a new ad-hoc network
5621 * with an invalid channel for wireless mode will trigger a
5622 * FW fatal error.
5625 switch (ipw_is_valid_channel(priv->ieee, priv->channel)) {
5626 case IEEE80211_52GHZ_BAND:
5627 network->mode = IEEE_A;
5628 i = ipw_channel_to_index(priv->ieee, priv->channel);
5629 if (i == -1)
5630 BUG();
5631 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5632 IPW_WARNING("Overriding invalid channel\n");
5633 priv->channel = geo->a[0].channel;
5635 break;
5637 case IEEE80211_24GHZ_BAND:
5638 if (priv->ieee->mode & IEEE_G)
5639 network->mode = IEEE_G;
5640 else
5641 network->mode = IEEE_B;
5642 i = ipw_channel_to_index(priv->ieee, priv->channel);
5643 if (i == -1)
5644 BUG();
5645 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5646 IPW_WARNING("Overriding invalid channel\n");
5647 priv->channel = geo->bg[0].channel;
5649 break;
5651 default:
5652 IPW_WARNING("Overriding invalid channel\n");
5653 if (priv->ieee->mode & IEEE_A) {
5654 network->mode = IEEE_A;
5655 priv->channel = geo->a[0].channel;
5656 } else if (priv->ieee->mode & IEEE_G) {
5657 network->mode = IEEE_G;
5658 priv->channel = geo->bg[0].channel;
5659 } else {
5660 network->mode = IEEE_B;
5661 priv->channel = geo->bg[0].channel;
5663 break;
5666 network->channel = priv->channel;
5667 priv->config |= CFG_ADHOC_PERSIST;
5668 ipw_create_bssid(priv, network->bssid);
5669 network->ssid_len = priv->essid_len;
5670 memcpy(network->ssid, priv->essid, priv->essid_len);
5671 memset(&network->stats, 0, sizeof(network->stats));
5672 network->capability = WLAN_CAPABILITY_IBSS;
5673 if (!(priv->config & CFG_PREAMBLE_LONG))
5674 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5675 if (priv->capability & CAP_PRIVACY_ON)
5676 network->capability |= WLAN_CAPABILITY_PRIVACY;
5677 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5678 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5679 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5680 memcpy(network->rates_ex,
5681 &priv->rates.supported_rates[network->rates_len],
5682 network->rates_ex_len);
5683 network->last_scanned = 0;
5684 network->flags = 0;
5685 network->last_associate = 0;
5686 network->time_stamp[0] = 0;
5687 network->time_stamp[1] = 0;
5688 network->beacon_interval = 100; /* Default */
5689 network->listen_interval = 10; /* Default */
5690 network->atim_window = 0; /* Default */
5691 network->wpa_ie_len = 0;
5692 network->rsn_ie_len = 0;
5695 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5697 struct ipw_tgi_tx_key key;
5699 if (!(priv->ieee->sec.flags & (1 << index)))
5700 return;
5702 key.key_id = index;
5703 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5704 key.security_type = type;
5705 key.station_index = 0; /* always 0 for BSS */
5706 key.flags = 0;
5707 /* 0 for new key; previous value of counter (after fatal error) */
5708 key.tx_counter[0] = 0;
5709 key.tx_counter[1] = 0;
5711 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5714 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5716 struct ipw_wep_key key;
5717 int i;
5719 key.cmd_id = DINO_CMD_WEP_KEY;
5720 key.seq_num = 0;
5722 /* Note: AES keys cannot be set for multiple times.
5723 * Only set it at the first time. */
5724 for (i = 0; i < 4; i++) {
5725 key.key_index = i | type;
5726 if (!(priv->ieee->sec.flags & (1 << i))) {
5727 key.key_size = 0;
5728 continue;
5731 key.key_size = priv->ieee->sec.key_sizes[i];
5732 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5734 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5738 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5740 if (priv->ieee->host_encrypt)
5741 return;
5743 switch (level) {
5744 case SEC_LEVEL_3:
5745 priv->sys_config.disable_unicast_decryption = 0;
5746 priv->ieee->host_decrypt = 0;
5747 break;
5748 case SEC_LEVEL_2:
5749 priv->sys_config.disable_unicast_decryption = 1;
5750 priv->ieee->host_decrypt = 1;
5751 break;
5752 case SEC_LEVEL_1:
5753 priv->sys_config.disable_unicast_decryption = 0;
5754 priv->ieee->host_decrypt = 0;
5755 break;
5756 case SEC_LEVEL_0:
5757 priv->sys_config.disable_unicast_decryption = 1;
5758 break;
5759 default:
5760 break;
5764 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5766 if (priv->ieee->host_encrypt)
5767 return;
5769 switch (level) {
5770 case SEC_LEVEL_3:
5771 priv->sys_config.disable_multicast_decryption = 0;
5772 break;
5773 case SEC_LEVEL_2:
5774 priv->sys_config.disable_multicast_decryption = 1;
5775 break;
5776 case SEC_LEVEL_1:
5777 priv->sys_config.disable_multicast_decryption = 0;
5778 break;
5779 case SEC_LEVEL_0:
5780 priv->sys_config.disable_multicast_decryption = 1;
5781 break;
5782 default:
5783 break;
5787 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5789 switch (priv->ieee->sec.level) {
5790 case SEC_LEVEL_3:
5791 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5792 ipw_send_tgi_tx_key(priv,
5793 DCT_FLAG_EXT_SECURITY_CCM,
5794 priv->ieee->sec.active_key);
5796 if (!priv->ieee->host_mc_decrypt)
5797 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5798 break;
5799 case SEC_LEVEL_2:
5800 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5801 ipw_send_tgi_tx_key(priv,
5802 DCT_FLAG_EXT_SECURITY_TKIP,
5803 priv->ieee->sec.active_key);
5804 break;
5805 case SEC_LEVEL_1:
5806 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5807 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5808 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5809 break;
5810 case SEC_LEVEL_0:
5811 default:
5812 break;
5816 static void ipw_adhoc_check(void *data)
5818 struct ipw_priv *priv = data;
5820 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
5821 !(priv->config & CFG_ADHOC_PERSIST)) {
5822 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
5823 IPW_DL_STATE | IPW_DL_ASSOC,
5824 "Missed beacon: %d - disassociate\n",
5825 priv->missed_adhoc_beacons);
5826 ipw_remove_current_network(priv);
5827 ipw_disassociate(priv);
5828 return;
5831 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
5832 priv->assoc_request.beacon_interval);
5835 static void ipw_bg_adhoc_check(void *data)
5837 struct ipw_priv *priv = data;
5838 mutex_lock(&priv->mutex);
5839 ipw_adhoc_check(data);
5840 mutex_unlock(&priv->mutex);
5843 #ifdef CONFIG_IPW2200_DEBUG
5844 static void ipw_debug_config(struct ipw_priv *priv)
5846 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
5847 "[CFG 0x%08X]\n", priv->config);
5848 if (priv->config & CFG_STATIC_CHANNEL)
5849 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
5850 else
5851 IPW_DEBUG_INFO("Channel unlocked.\n");
5852 if (priv->config & CFG_STATIC_ESSID)
5853 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
5854 escape_essid(priv->essid, priv->essid_len));
5855 else
5856 IPW_DEBUG_INFO("ESSID unlocked.\n");
5857 if (priv->config & CFG_STATIC_BSSID)
5858 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
5859 MAC_ARG(priv->bssid));
5860 else
5861 IPW_DEBUG_INFO("BSSID unlocked.\n");
5862 if (priv->capability & CAP_PRIVACY_ON)
5863 IPW_DEBUG_INFO("PRIVACY on\n");
5864 else
5865 IPW_DEBUG_INFO("PRIVACY off\n");
5866 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
5868 #else
5869 #define ipw_debug_config(x) do {} while (0)
5870 #endif
5872 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
5874 /* TODO: Verify that this works... */
5875 struct ipw_fixed_rate fr = {
5876 .tx_rates = priv->rates_mask
5878 u32 reg;
5879 u16 mask = 0;
5881 /* Identify 'current FW band' and match it with the fixed
5882 * Tx rates */
5884 switch (priv->ieee->freq_band) {
5885 case IEEE80211_52GHZ_BAND: /* A only */
5886 /* IEEE_A */
5887 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
5888 /* Invalid fixed rate mask */
5889 IPW_DEBUG_WX
5890 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5891 fr.tx_rates = 0;
5892 break;
5895 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
5896 break;
5898 default: /* 2.4Ghz or Mixed */
5899 /* IEEE_B */
5900 if (mode == IEEE_B) {
5901 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
5902 /* Invalid fixed rate mask */
5903 IPW_DEBUG_WX
5904 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5905 fr.tx_rates = 0;
5907 break;
5910 /* IEEE_G */
5911 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
5912 IEEE80211_OFDM_RATES_MASK)) {
5913 /* Invalid fixed rate mask */
5914 IPW_DEBUG_WX
5915 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5916 fr.tx_rates = 0;
5917 break;
5920 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
5921 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
5922 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
5925 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
5926 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
5927 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
5930 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
5931 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
5932 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
5935 fr.tx_rates |= mask;
5936 break;
5939 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
5940 ipw_write_reg32(priv, reg, *(u32 *) & fr);
5943 static void ipw_abort_scan(struct ipw_priv *priv)
5945 int err;
5947 if (priv->status & STATUS_SCAN_ABORTING) {
5948 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
5949 return;
5951 priv->status |= STATUS_SCAN_ABORTING;
5953 err = ipw_send_scan_abort(priv);
5954 if (err)
5955 IPW_DEBUG_HC("Request to abort scan failed.\n");
5958 static void ipw_add_scan_channels(struct ipw_priv *priv,
5959 struct ipw_scan_request_ext *scan,
5960 int scan_type)
5962 int channel_index = 0;
5963 const struct ieee80211_geo *geo;
5964 int i;
5966 geo = ipw_get_geo(priv->ieee);
5968 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
5969 int start = channel_index;
5970 for (i = 0; i < geo->a_channels; i++) {
5971 if ((priv->status & STATUS_ASSOCIATED) &&
5972 geo->a[i].channel == priv->channel)
5973 continue;
5974 channel_index++;
5975 scan->channels_list[channel_index] = geo->a[i].channel;
5976 ipw_set_scan_type(scan, channel_index,
5977 geo->a[i].
5978 flags & IEEE80211_CH_PASSIVE_ONLY ?
5979 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
5980 scan_type);
5983 if (start != channel_index) {
5984 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
5985 (channel_index - start);
5986 channel_index++;
5990 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
5991 int start = channel_index;
5992 if (priv->config & CFG_SPEED_SCAN) {
5993 int index;
5994 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
5995 /* nop out the list */
5996 [0] = 0
5999 u8 channel;
6000 while (channel_index < IPW_SCAN_CHANNELS) {
6001 channel =
6002 priv->speed_scan[priv->speed_scan_pos];
6003 if (channel == 0) {
6004 priv->speed_scan_pos = 0;
6005 channel = priv->speed_scan[0];
6007 if ((priv->status & STATUS_ASSOCIATED) &&
6008 channel == priv->channel) {
6009 priv->speed_scan_pos++;
6010 continue;
6013 /* If this channel has already been
6014 * added in scan, break from loop
6015 * and this will be the first channel
6016 * in the next scan.
6018 if (channels[channel - 1] != 0)
6019 break;
6021 channels[channel - 1] = 1;
6022 priv->speed_scan_pos++;
6023 channel_index++;
6024 scan->channels_list[channel_index] = channel;
6025 index =
6026 ipw_channel_to_index(priv->ieee, channel);
6027 ipw_set_scan_type(scan, channel_index,
6028 geo->bg[index].
6029 flags &
6030 IEEE80211_CH_PASSIVE_ONLY ?
6031 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6032 : scan_type);
6034 } else {
6035 for (i = 0; i < geo->bg_channels; i++) {
6036 if ((priv->status & STATUS_ASSOCIATED) &&
6037 geo->bg[i].channel == priv->channel)
6038 continue;
6039 channel_index++;
6040 scan->channels_list[channel_index] =
6041 geo->bg[i].channel;
6042 ipw_set_scan_type(scan, channel_index,
6043 geo->bg[i].
6044 flags &
6045 IEEE80211_CH_PASSIVE_ONLY ?
6046 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6047 : scan_type);
6051 if (start != channel_index) {
6052 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6053 (channel_index - start);
6058 static int ipw_request_scan(struct ipw_priv *priv)
6060 struct ipw_scan_request_ext scan;
6061 int err = 0, scan_type;
6063 if (!(priv->status & STATUS_INIT) ||
6064 (priv->status & STATUS_EXIT_PENDING))
6065 return 0;
6067 mutex_lock(&priv->mutex);
6069 if (priv->status & STATUS_SCANNING) {
6070 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6071 priv->status |= STATUS_SCAN_PENDING;
6072 goto done;
6075 if (!(priv->status & STATUS_SCAN_FORCED) &&
6076 priv->status & STATUS_SCAN_ABORTING) {
6077 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6078 priv->status |= STATUS_SCAN_PENDING;
6079 goto done;
6082 if (priv->status & STATUS_RF_KILL_MASK) {
6083 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6084 priv->status |= STATUS_SCAN_PENDING;
6085 goto done;
6088 memset(&scan, 0, sizeof(scan));
6090 if (priv->config & CFG_SPEED_SCAN)
6091 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6092 cpu_to_le16(30);
6093 else
6094 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6095 cpu_to_le16(20);
6097 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6098 cpu_to_le16(20);
6099 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6101 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6103 #ifdef CONFIG_IPW2200_MONITOR
6104 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6105 u8 channel;
6106 u8 band = 0;
6108 switch (ipw_is_valid_channel(priv->ieee, priv->channel)) {
6109 case IEEE80211_52GHZ_BAND:
6110 band = (u8) (IPW_A_MODE << 6) | 1;
6111 channel = priv->channel;
6112 break;
6114 case IEEE80211_24GHZ_BAND:
6115 band = (u8) (IPW_B_MODE << 6) | 1;
6116 channel = priv->channel;
6117 break;
6119 default:
6120 band = (u8) (IPW_B_MODE << 6) | 1;
6121 channel = 9;
6122 break;
6125 scan.channels_list[0] = band;
6126 scan.channels_list[1] = channel;
6127 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6129 /* NOTE: The card will sit on this channel for this time
6130 * period. Scan aborts are timing sensitive and frequently
6131 * result in firmware restarts. As such, it is best to
6132 * set a small dwell_time here and just keep re-issuing
6133 * scans. Otherwise fast channel hopping will not actually
6134 * hop channels.
6136 * TODO: Move SPEED SCAN support to all modes and bands */
6137 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6138 cpu_to_le16(2000);
6139 } else {
6140 #endif /* CONFIG_IPW2200_MONITOR */
6141 /* If we are roaming, then make this a directed scan for the
6142 * current network. Otherwise, ensure that every other scan
6143 * is a fast channel hop scan */
6144 if ((priv->status & STATUS_ROAMING)
6145 || (!(priv->status & STATUS_ASSOCIATED)
6146 && (priv->config & CFG_STATIC_ESSID)
6147 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6148 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6149 if (err) {
6150 IPW_DEBUG_HC("Attempt to send SSID command "
6151 "failed.\n");
6152 goto done;
6155 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6156 } else
6157 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6159 ipw_add_scan_channels(priv, &scan, scan_type);
6160 #ifdef CONFIG_IPW2200_MONITOR
6162 #endif
6164 err = ipw_send_scan_request_ext(priv, &scan);
6165 if (err) {
6166 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6167 goto done;
6170 priv->status |= STATUS_SCANNING;
6171 priv->status &= ~STATUS_SCAN_PENDING;
6172 queue_delayed_work(priv->workqueue, &priv->scan_check,
6173 IPW_SCAN_CHECK_WATCHDOG);
6174 done:
6175 mutex_unlock(&priv->mutex);
6176 return err;
6179 static void ipw_bg_abort_scan(void *data)
6181 struct ipw_priv *priv = data;
6182 mutex_lock(&priv->mutex);
6183 ipw_abort_scan(data);
6184 mutex_unlock(&priv->mutex);
6187 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6189 /* This is called when wpa_supplicant loads and closes the driver
6190 * interface. */
6191 priv->ieee->wpa_enabled = value;
6192 return 0;
6195 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6197 struct ieee80211_device *ieee = priv->ieee;
6198 struct ieee80211_security sec = {
6199 .flags = SEC_AUTH_MODE,
6201 int ret = 0;
6203 if (value & IW_AUTH_ALG_SHARED_KEY) {
6204 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6205 ieee->open_wep = 0;
6206 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6207 sec.auth_mode = WLAN_AUTH_OPEN;
6208 ieee->open_wep = 1;
6209 } else if (value & IW_AUTH_ALG_LEAP) {
6210 sec.auth_mode = WLAN_AUTH_LEAP;
6211 ieee->open_wep = 1;
6212 } else
6213 return -EINVAL;
6215 if (ieee->set_security)
6216 ieee->set_security(ieee->dev, &sec);
6217 else
6218 ret = -EOPNOTSUPP;
6220 return ret;
6223 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6224 int wpa_ie_len)
6226 /* make sure WPA is enabled */
6227 ipw_wpa_enable(priv, 1);
6229 ipw_disassociate(priv);
6232 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6233 char *capabilities, int length)
6235 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6237 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6238 capabilities);
6242 * WE-18 support
6245 /* SIOCSIWGENIE */
6246 static int ipw_wx_set_genie(struct net_device *dev,
6247 struct iw_request_info *info,
6248 union iwreq_data *wrqu, char *extra)
6250 struct ipw_priv *priv = ieee80211_priv(dev);
6251 struct ieee80211_device *ieee = priv->ieee;
6252 u8 *buf;
6253 int err = 0;
6255 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6256 (wrqu->data.length && extra == NULL))
6257 return -EINVAL;
6259 //mutex_lock(&priv->mutex);
6261 //if (!ieee->wpa_enabled) {
6262 // err = -EOPNOTSUPP;
6263 // goto out;
6266 if (wrqu->data.length) {
6267 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6268 if (buf == NULL) {
6269 err = -ENOMEM;
6270 goto out;
6273 memcpy(buf, extra, wrqu->data.length);
6274 kfree(ieee->wpa_ie);
6275 ieee->wpa_ie = buf;
6276 ieee->wpa_ie_len = wrqu->data.length;
6277 } else {
6278 kfree(ieee->wpa_ie);
6279 ieee->wpa_ie = NULL;
6280 ieee->wpa_ie_len = 0;
6283 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6284 out:
6285 //mutex_unlock(&priv->mutex);
6286 return err;
6289 /* SIOCGIWGENIE */
6290 static int ipw_wx_get_genie(struct net_device *dev,
6291 struct iw_request_info *info,
6292 union iwreq_data *wrqu, char *extra)
6294 struct ipw_priv *priv = ieee80211_priv(dev);
6295 struct ieee80211_device *ieee = priv->ieee;
6296 int err = 0;
6298 //mutex_lock(&priv->mutex);
6300 //if (!ieee->wpa_enabled) {
6301 // err = -EOPNOTSUPP;
6302 // goto out;
6305 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6306 wrqu->data.length = 0;
6307 goto out;
6310 if (wrqu->data.length < ieee->wpa_ie_len) {
6311 err = -E2BIG;
6312 goto out;
6315 wrqu->data.length = ieee->wpa_ie_len;
6316 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6318 out:
6319 //mutex_unlock(&priv->mutex);
6320 return err;
6323 static int wext_cipher2level(int cipher)
6325 switch (cipher) {
6326 case IW_AUTH_CIPHER_NONE:
6327 return SEC_LEVEL_0;
6328 case IW_AUTH_CIPHER_WEP40:
6329 case IW_AUTH_CIPHER_WEP104:
6330 return SEC_LEVEL_1;
6331 case IW_AUTH_CIPHER_TKIP:
6332 return SEC_LEVEL_2;
6333 case IW_AUTH_CIPHER_CCMP:
6334 return SEC_LEVEL_3;
6335 default:
6336 return -1;
6340 /* SIOCSIWAUTH */
6341 static int ipw_wx_set_auth(struct net_device *dev,
6342 struct iw_request_info *info,
6343 union iwreq_data *wrqu, char *extra)
6345 struct ipw_priv *priv = ieee80211_priv(dev);
6346 struct ieee80211_device *ieee = priv->ieee;
6347 struct iw_param *param = &wrqu->param;
6348 struct ieee80211_crypt_data *crypt;
6349 unsigned long flags;
6350 int ret = 0;
6352 switch (param->flags & IW_AUTH_INDEX) {
6353 case IW_AUTH_WPA_VERSION:
6354 break;
6355 case IW_AUTH_CIPHER_PAIRWISE:
6356 ipw_set_hw_decrypt_unicast(priv,
6357 wext_cipher2level(param->value));
6358 break;
6359 case IW_AUTH_CIPHER_GROUP:
6360 ipw_set_hw_decrypt_multicast(priv,
6361 wext_cipher2level(param->value));
6362 break;
6363 case IW_AUTH_KEY_MGMT:
6365 * ipw2200 does not use these parameters
6367 break;
6369 case IW_AUTH_TKIP_COUNTERMEASURES:
6370 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6371 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6372 break;
6374 flags = crypt->ops->get_flags(crypt->priv);
6376 if (param->value)
6377 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6378 else
6379 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6381 crypt->ops->set_flags(flags, crypt->priv);
6383 break;
6385 case IW_AUTH_DROP_UNENCRYPTED:{
6386 /* HACK:
6388 * wpa_supplicant calls set_wpa_enabled when the driver
6389 * is loaded and unloaded, regardless of if WPA is being
6390 * used. No other calls are made which can be used to
6391 * determine if encryption will be used or not prior to
6392 * association being expected. If encryption is not being
6393 * used, drop_unencrypted is set to false, else true -- we
6394 * can use this to determine if the CAP_PRIVACY_ON bit should
6395 * be set.
6397 struct ieee80211_security sec = {
6398 .flags = SEC_ENABLED,
6399 .enabled = param->value,
6401 priv->ieee->drop_unencrypted = param->value;
6402 /* We only change SEC_LEVEL for open mode. Others
6403 * are set by ipw_wpa_set_encryption.
6405 if (!param->value) {
6406 sec.flags |= SEC_LEVEL;
6407 sec.level = SEC_LEVEL_0;
6408 } else {
6409 sec.flags |= SEC_LEVEL;
6410 sec.level = SEC_LEVEL_1;
6412 if (priv->ieee->set_security)
6413 priv->ieee->set_security(priv->ieee->dev, &sec);
6414 break;
6417 case IW_AUTH_80211_AUTH_ALG:
6418 ret = ipw_wpa_set_auth_algs(priv, param->value);
6419 break;
6421 case IW_AUTH_WPA_ENABLED:
6422 ret = ipw_wpa_enable(priv, param->value);
6423 break;
6425 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6426 ieee->ieee802_1x = param->value;
6427 break;
6429 //case IW_AUTH_ROAMING_CONTROL:
6430 case IW_AUTH_PRIVACY_INVOKED:
6431 ieee->privacy_invoked = param->value;
6432 break;
6434 default:
6435 return -EOPNOTSUPP;
6437 return ret;
6440 /* SIOCGIWAUTH */
6441 static int ipw_wx_get_auth(struct net_device *dev,
6442 struct iw_request_info *info,
6443 union iwreq_data *wrqu, char *extra)
6445 struct ipw_priv *priv = ieee80211_priv(dev);
6446 struct ieee80211_device *ieee = priv->ieee;
6447 struct ieee80211_crypt_data *crypt;
6448 struct iw_param *param = &wrqu->param;
6449 int ret = 0;
6451 switch (param->flags & IW_AUTH_INDEX) {
6452 case IW_AUTH_WPA_VERSION:
6453 case IW_AUTH_CIPHER_PAIRWISE:
6454 case IW_AUTH_CIPHER_GROUP:
6455 case IW_AUTH_KEY_MGMT:
6457 * wpa_supplicant will control these internally
6459 ret = -EOPNOTSUPP;
6460 break;
6462 case IW_AUTH_TKIP_COUNTERMEASURES:
6463 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6464 if (!crypt || !crypt->ops->get_flags)
6465 break;
6467 param->value = (crypt->ops->get_flags(crypt->priv) &
6468 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6470 break;
6472 case IW_AUTH_DROP_UNENCRYPTED:
6473 param->value = ieee->drop_unencrypted;
6474 break;
6476 case IW_AUTH_80211_AUTH_ALG:
6477 param->value = ieee->sec.auth_mode;
6478 break;
6480 case IW_AUTH_WPA_ENABLED:
6481 param->value = ieee->wpa_enabled;
6482 break;
6484 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6485 param->value = ieee->ieee802_1x;
6486 break;
6488 case IW_AUTH_ROAMING_CONTROL:
6489 case IW_AUTH_PRIVACY_INVOKED:
6490 param->value = ieee->privacy_invoked;
6491 break;
6493 default:
6494 return -EOPNOTSUPP;
6496 return 0;
6499 /* SIOCSIWENCODEEXT */
6500 static int ipw_wx_set_encodeext(struct net_device *dev,
6501 struct iw_request_info *info,
6502 union iwreq_data *wrqu, char *extra)
6504 struct ipw_priv *priv = ieee80211_priv(dev);
6505 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6507 if (hwcrypto) {
6508 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6509 /* IPW HW can't build TKIP MIC,
6510 host decryption still needed */
6511 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6512 priv->ieee->host_mc_decrypt = 1;
6513 else {
6514 priv->ieee->host_encrypt = 0;
6515 priv->ieee->host_encrypt_msdu = 1;
6516 priv->ieee->host_decrypt = 1;
6518 } else {
6519 priv->ieee->host_encrypt = 0;
6520 priv->ieee->host_encrypt_msdu = 0;
6521 priv->ieee->host_decrypt = 0;
6522 priv->ieee->host_mc_decrypt = 0;
6526 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6529 /* SIOCGIWENCODEEXT */
6530 static int ipw_wx_get_encodeext(struct net_device *dev,
6531 struct iw_request_info *info,
6532 union iwreq_data *wrqu, char *extra)
6534 struct ipw_priv *priv = ieee80211_priv(dev);
6535 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6538 /* SIOCSIWMLME */
6539 static int ipw_wx_set_mlme(struct net_device *dev,
6540 struct iw_request_info *info,
6541 union iwreq_data *wrqu, char *extra)
6543 struct ipw_priv *priv = ieee80211_priv(dev);
6544 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6545 u16 reason;
6547 reason = cpu_to_le16(mlme->reason_code);
6549 switch (mlme->cmd) {
6550 case IW_MLME_DEAUTH:
6551 // silently ignore
6552 break;
6554 case IW_MLME_DISASSOC:
6555 ipw_disassociate(priv);
6556 break;
6558 default:
6559 return -EOPNOTSUPP;
6561 return 0;
6564 #ifdef CONFIG_IPW_QOS
6566 /* QoS */
6568 * get the modulation type of the current network or
6569 * the card current mode
6571 u8 ipw_qos_current_mode(struct ipw_priv * priv)
6573 u8 mode = 0;
6575 if (priv->status & STATUS_ASSOCIATED) {
6576 unsigned long flags;
6578 spin_lock_irqsave(&priv->ieee->lock, flags);
6579 mode = priv->assoc_network->mode;
6580 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6581 } else {
6582 mode = priv->ieee->mode;
6584 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6585 return mode;
6589 * Handle management frame beacon and probe response
6591 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6592 int active_network,
6593 struct ieee80211_network *network)
6595 u32 size = sizeof(struct ieee80211_qos_parameters);
6597 if (network->capability & WLAN_CAPABILITY_IBSS)
6598 network->qos_data.active = network->qos_data.supported;
6600 if (network->flags & NETWORK_HAS_QOS_MASK) {
6601 if (active_network &&
6602 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6603 network->qos_data.active = network->qos_data.supported;
6605 if ((network->qos_data.active == 1) && (active_network == 1) &&
6606 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6607 (network->qos_data.old_param_count !=
6608 network->qos_data.param_count)) {
6609 network->qos_data.old_param_count =
6610 network->qos_data.param_count;
6611 schedule_work(&priv->qos_activate);
6612 IPW_DEBUG_QOS("QoS parameters change call "
6613 "qos_activate\n");
6615 } else {
6616 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6617 memcpy(&network->qos_data.parameters,
6618 &def_parameters_CCK, size);
6619 else
6620 memcpy(&network->qos_data.parameters,
6621 &def_parameters_OFDM, size);
6623 if ((network->qos_data.active == 1) && (active_network == 1)) {
6624 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6625 schedule_work(&priv->qos_activate);
6628 network->qos_data.active = 0;
6629 network->qos_data.supported = 0;
6631 if ((priv->status & STATUS_ASSOCIATED) &&
6632 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6633 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6634 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6635 !(network->flags & NETWORK_EMPTY_ESSID))
6636 if ((network->ssid_len ==
6637 priv->assoc_network->ssid_len) &&
6638 !memcmp(network->ssid,
6639 priv->assoc_network->ssid,
6640 network->ssid_len)) {
6641 queue_work(priv->workqueue,
6642 &priv->merge_networks);
6646 return 0;
6650 * This function set up the firmware to support QoS. It sends
6651 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6653 static int ipw_qos_activate(struct ipw_priv *priv,
6654 struct ieee80211_qos_data *qos_network_data)
6656 int err;
6657 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6658 struct ieee80211_qos_parameters *active_one = NULL;
6659 u32 size = sizeof(struct ieee80211_qos_parameters);
6660 u32 burst_duration;
6661 int i;
6662 u8 type;
6664 type = ipw_qos_current_mode(priv);
6666 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6667 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6668 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6669 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6671 if (qos_network_data == NULL) {
6672 if (type == IEEE_B) {
6673 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6674 active_one = &def_parameters_CCK;
6675 } else
6676 active_one = &def_parameters_OFDM;
6678 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6679 burst_duration = ipw_qos_get_burst_duration(priv);
6680 for (i = 0; i < QOS_QUEUE_NUM; i++)
6681 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6682 (u16) burst_duration;
6683 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6684 if (type == IEEE_B) {
6685 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6686 type);
6687 if (priv->qos_data.qos_enable == 0)
6688 active_one = &def_parameters_CCK;
6689 else
6690 active_one = priv->qos_data.def_qos_parm_CCK;
6691 } else {
6692 if (priv->qos_data.qos_enable == 0)
6693 active_one = &def_parameters_OFDM;
6694 else
6695 active_one = priv->qos_data.def_qos_parm_OFDM;
6697 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6698 } else {
6699 unsigned long flags;
6700 int active;
6702 spin_lock_irqsave(&priv->ieee->lock, flags);
6703 active_one = &(qos_network_data->parameters);
6704 qos_network_data->old_param_count =
6705 qos_network_data->param_count;
6706 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6707 active = qos_network_data->supported;
6708 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6710 if (active == 0) {
6711 burst_duration = ipw_qos_get_burst_duration(priv);
6712 for (i = 0; i < QOS_QUEUE_NUM; i++)
6713 qos_parameters[QOS_PARAM_SET_ACTIVE].
6714 tx_op_limit[i] = (u16) burst_duration;
6718 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6719 err = ipw_send_qos_params_command(priv,
6720 (struct ieee80211_qos_parameters *)
6721 &(qos_parameters[0]));
6722 if (err)
6723 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6725 return err;
6729 * send IPW_CMD_WME_INFO to the firmware
6731 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6733 int ret = 0;
6734 struct ieee80211_qos_information_element qos_info;
6736 if (priv == NULL)
6737 return -1;
6739 qos_info.elementID = QOS_ELEMENT_ID;
6740 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6742 qos_info.version = QOS_VERSION_1;
6743 qos_info.ac_info = 0;
6745 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6746 qos_info.qui_type = QOS_OUI_TYPE;
6747 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6749 ret = ipw_send_qos_info_command(priv, &qos_info);
6750 if (ret != 0) {
6751 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6753 return ret;
6757 * Set the QoS parameter with the association request structure
6759 static int ipw_qos_association(struct ipw_priv *priv,
6760 struct ieee80211_network *network)
6762 int err = 0;
6763 struct ieee80211_qos_data *qos_data = NULL;
6764 struct ieee80211_qos_data ibss_data = {
6765 .supported = 1,
6766 .active = 1,
6769 switch (priv->ieee->iw_mode) {
6770 case IW_MODE_ADHOC:
6771 if (!(network->capability & WLAN_CAPABILITY_IBSS))
6772 BUG();
6774 qos_data = &ibss_data;
6775 break;
6777 case IW_MODE_INFRA:
6778 qos_data = &network->qos_data;
6779 break;
6781 default:
6782 BUG();
6783 break;
6786 err = ipw_qos_activate(priv, qos_data);
6787 if (err) {
6788 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6789 return err;
6792 if (priv->qos_data.qos_enable && qos_data->supported) {
6793 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6794 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6795 return ipw_qos_set_info_element(priv);
6798 return 0;
6802 * handling the beaconing responces. if we get different QoS setting
6803 * of the network from the the associated setting adjust the QoS
6804 * setting
6806 static int ipw_qos_association_resp(struct ipw_priv *priv,
6807 struct ieee80211_network *network)
6809 int ret = 0;
6810 unsigned long flags;
6811 u32 size = sizeof(struct ieee80211_qos_parameters);
6812 int set_qos_param = 0;
6814 if ((priv == NULL) || (network == NULL) ||
6815 (priv->assoc_network == NULL))
6816 return ret;
6818 if (!(priv->status & STATUS_ASSOCIATED))
6819 return ret;
6821 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
6822 return ret;
6824 spin_lock_irqsave(&priv->ieee->lock, flags);
6825 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
6826 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
6827 sizeof(struct ieee80211_qos_data));
6828 priv->assoc_network->qos_data.active = 1;
6829 if ((network->qos_data.old_param_count !=
6830 network->qos_data.param_count)) {
6831 set_qos_param = 1;
6832 network->qos_data.old_param_count =
6833 network->qos_data.param_count;
6836 } else {
6837 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
6838 memcpy(&priv->assoc_network->qos_data.parameters,
6839 &def_parameters_CCK, size);
6840 else
6841 memcpy(&priv->assoc_network->qos_data.parameters,
6842 &def_parameters_OFDM, size);
6843 priv->assoc_network->qos_data.active = 0;
6844 priv->assoc_network->qos_data.supported = 0;
6845 set_qos_param = 1;
6848 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6850 if (set_qos_param == 1)
6851 schedule_work(&priv->qos_activate);
6853 return ret;
6856 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
6858 u32 ret = 0;
6860 if ((priv == NULL))
6861 return 0;
6863 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
6864 ret = priv->qos_data.burst_duration_CCK;
6865 else
6866 ret = priv->qos_data.burst_duration_OFDM;
6868 return ret;
6872 * Initialize the setting of QoS global
6874 static void ipw_qos_init(struct ipw_priv *priv, int enable,
6875 int burst_enable, u32 burst_duration_CCK,
6876 u32 burst_duration_OFDM)
6878 priv->qos_data.qos_enable = enable;
6880 if (priv->qos_data.qos_enable) {
6881 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
6882 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
6883 IPW_DEBUG_QOS("QoS is enabled\n");
6884 } else {
6885 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
6886 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
6887 IPW_DEBUG_QOS("QoS is not enabled\n");
6890 priv->qos_data.burst_enable = burst_enable;
6892 if (burst_enable) {
6893 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
6894 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
6895 } else {
6896 priv->qos_data.burst_duration_CCK = 0;
6897 priv->qos_data.burst_duration_OFDM = 0;
6902 * map the packet priority to the right TX Queue
6904 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
6906 if (priority > 7 || !priv->qos_data.qos_enable)
6907 priority = 0;
6909 return from_priority_to_tx_queue[priority] - 1;
6913 * add QoS parameter to the TX command
6915 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
6916 u16 priority,
6917 struct tfd_data *tfd, u8 unicast)
6919 int ret = 0;
6920 int tx_queue_id = 0;
6921 struct ieee80211_qos_data *qos_data = NULL;
6922 int active, supported;
6923 unsigned long flags;
6925 if (!(priv->status & STATUS_ASSOCIATED))
6926 return 0;
6928 qos_data = &priv->assoc_network->qos_data;
6930 spin_lock_irqsave(&priv->ieee->lock, flags);
6932 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6933 if (unicast == 0)
6934 qos_data->active = 0;
6935 else
6936 qos_data->active = qos_data->supported;
6939 active = qos_data->active;
6940 supported = qos_data->supported;
6942 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6944 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
6945 "unicast %d\n",
6946 priv->qos_data.qos_enable, active, supported, unicast);
6947 if (active && priv->qos_data.qos_enable) {
6948 ret = from_priority_to_tx_queue[priority];
6949 tx_queue_id = ret - 1;
6950 IPW_DEBUG_QOS("QoS packet priority is %d \n", priority);
6951 if (priority <= 7) {
6952 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
6953 tfd->tfd.tfd_26.mchdr.qos_ctrl = priority;
6954 tfd->tfd.tfd_26.mchdr.frame_ctl |=
6955 IEEE80211_STYPE_QOS_DATA;
6957 if (priv->qos_data.qos_no_ack_mask &
6958 (1UL << tx_queue_id)) {
6959 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
6960 tfd->tfd.tfd_26.mchdr.qos_ctrl |=
6961 CTRL_QOS_NO_ACK;
6966 return ret;
6970 * background support to run QoS activate functionality
6972 static void ipw_bg_qos_activate(void *data)
6974 struct ipw_priv *priv = data;
6976 if (priv == NULL)
6977 return;
6979 mutex_lock(&priv->mutex);
6981 if (priv->status & STATUS_ASSOCIATED)
6982 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
6984 mutex_unlock(&priv->mutex);
6987 static int ipw_handle_probe_response(struct net_device *dev,
6988 struct ieee80211_probe_response *resp,
6989 struct ieee80211_network *network)
6991 struct ipw_priv *priv = ieee80211_priv(dev);
6992 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
6993 (network == priv->assoc_network));
6995 ipw_qos_handle_probe_response(priv, active_network, network);
6997 return 0;
7000 static int ipw_handle_beacon(struct net_device *dev,
7001 struct ieee80211_beacon *resp,
7002 struct ieee80211_network *network)
7004 struct ipw_priv *priv = ieee80211_priv(dev);
7005 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7006 (network == priv->assoc_network));
7008 ipw_qos_handle_probe_response(priv, active_network, network);
7010 return 0;
7013 static int ipw_handle_assoc_response(struct net_device *dev,
7014 struct ieee80211_assoc_response *resp,
7015 struct ieee80211_network *network)
7017 struct ipw_priv *priv = ieee80211_priv(dev);
7018 ipw_qos_association_resp(priv, network);
7019 return 0;
7022 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7023 *qos_param)
7025 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7026 sizeof(*qos_param) * 3, qos_param);
7029 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7030 *qos_param)
7032 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7033 qos_param);
7036 #endif /* CONFIG_IPW_QOS */
7038 static int ipw_associate_network(struct ipw_priv *priv,
7039 struct ieee80211_network *network,
7040 struct ipw_supported_rates *rates, int roaming)
7042 int err;
7044 if (priv->config & CFG_FIXED_RATE)
7045 ipw_set_fixed_rate(priv, network->mode);
7047 if (!(priv->config & CFG_STATIC_ESSID)) {
7048 priv->essid_len = min(network->ssid_len,
7049 (u8) IW_ESSID_MAX_SIZE);
7050 memcpy(priv->essid, network->ssid, priv->essid_len);
7053 network->last_associate = jiffies;
7055 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7056 priv->assoc_request.channel = network->channel;
7057 priv->assoc_request.auth_key = 0;
7059 if ((priv->capability & CAP_PRIVACY_ON) &&
7060 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7061 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7062 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7064 if (priv->ieee->sec.level == SEC_LEVEL_1)
7065 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7067 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7068 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7069 priv->assoc_request.auth_type = AUTH_LEAP;
7070 else
7071 priv->assoc_request.auth_type = AUTH_OPEN;
7073 if (priv->ieee->wpa_ie_len) {
7074 priv->assoc_request.policy_support = 0x02; /* RSN active */
7075 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7076 priv->ieee->wpa_ie_len);
7080 * It is valid for our ieee device to support multiple modes, but
7081 * when it comes to associating to a given network we have to choose
7082 * just one mode.
7084 if (network->mode & priv->ieee->mode & IEEE_A)
7085 priv->assoc_request.ieee_mode = IPW_A_MODE;
7086 else if (network->mode & priv->ieee->mode & IEEE_G)
7087 priv->assoc_request.ieee_mode = IPW_G_MODE;
7088 else if (network->mode & priv->ieee->mode & IEEE_B)
7089 priv->assoc_request.ieee_mode = IPW_B_MODE;
7091 priv->assoc_request.capability = network->capability;
7092 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7093 && !(priv->config & CFG_PREAMBLE_LONG)) {
7094 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7095 } else {
7096 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7098 /* Clear the short preamble if we won't be supporting it */
7099 priv->assoc_request.capability &=
7100 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7103 /* Clear capability bits that aren't used in Ad Hoc */
7104 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7105 priv->assoc_request.capability &=
7106 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7108 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7109 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7110 roaming ? "Rea" : "A",
7111 escape_essid(priv->essid, priv->essid_len),
7112 network->channel,
7113 ipw_modes[priv->assoc_request.ieee_mode],
7114 rates->num_rates,
7115 (priv->assoc_request.preamble_length ==
7116 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7117 network->capability &
7118 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7119 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7120 priv->capability & CAP_PRIVACY_ON ?
7121 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7122 "(open)") : "",
7123 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7124 priv->capability & CAP_PRIVACY_ON ?
7125 '1' + priv->ieee->sec.active_key : '.',
7126 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7128 priv->assoc_request.beacon_interval = network->beacon_interval;
7129 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7130 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7131 priv->assoc_request.assoc_type = HC_IBSS_START;
7132 priv->assoc_request.assoc_tsf_msw = 0;
7133 priv->assoc_request.assoc_tsf_lsw = 0;
7134 } else {
7135 if (unlikely(roaming))
7136 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7137 else
7138 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7139 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7140 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7143 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7145 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7146 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7147 priv->assoc_request.atim_window = network->atim_window;
7148 } else {
7149 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7150 priv->assoc_request.atim_window = 0;
7153 priv->assoc_request.listen_interval = network->listen_interval;
7155 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7156 if (err) {
7157 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7158 return err;
7161 rates->ieee_mode = priv->assoc_request.ieee_mode;
7162 rates->purpose = IPW_RATE_CONNECT;
7163 ipw_send_supported_rates(priv, rates);
7165 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7166 priv->sys_config.dot11g_auto_detection = 1;
7167 else
7168 priv->sys_config.dot11g_auto_detection = 0;
7170 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7171 priv->sys_config.answer_broadcast_ssid_probe = 1;
7172 else
7173 priv->sys_config.answer_broadcast_ssid_probe = 0;
7175 err = ipw_send_system_config(priv, &priv->sys_config);
7176 if (err) {
7177 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7178 return err;
7181 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7182 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7183 if (err) {
7184 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7185 return err;
7189 * If preemption is enabled, it is possible for the association
7190 * to complete before we return from ipw_send_associate. Therefore
7191 * we have to be sure and update our priviate data first.
7193 priv->channel = network->channel;
7194 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7195 priv->status |= STATUS_ASSOCIATING;
7196 priv->status &= ~STATUS_SECURITY_UPDATED;
7198 priv->assoc_network = network;
7200 #ifdef CONFIG_IPW_QOS
7201 ipw_qos_association(priv, network);
7202 #endif
7204 err = ipw_send_associate(priv, &priv->assoc_request);
7205 if (err) {
7206 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7207 return err;
7210 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
7211 escape_essid(priv->essid, priv->essid_len),
7212 MAC_ARG(priv->bssid));
7214 return 0;
7217 static void ipw_roam(void *data)
7219 struct ipw_priv *priv = data;
7220 struct ieee80211_network *network = NULL;
7221 struct ipw_network_match match = {
7222 .network = priv->assoc_network
7225 /* The roaming process is as follows:
7227 * 1. Missed beacon threshold triggers the roaming process by
7228 * setting the status ROAM bit and requesting a scan.
7229 * 2. When the scan completes, it schedules the ROAM work
7230 * 3. The ROAM work looks at all of the known networks for one that
7231 * is a better network than the currently associated. If none
7232 * found, the ROAM process is over (ROAM bit cleared)
7233 * 4. If a better network is found, a disassociation request is
7234 * sent.
7235 * 5. When the disassociation completes, the roam work is again
7236 * scheduled. The second time through, the driver is no longer
7237 * associated, and the newly selected network is sent an
7238 * association request.
7239 * 6. At this point ,the roaming process is complete and the ROAM
7240 * status bit is cleared.
7243 /* If we are no longer associated, and the roaming bit is no longer
7244 * set, then we are not actively roaming, so just return */
7245 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7246 return;
7248 if (priv->status & STATUS_ASSOCIATED) {
7249 /* First pass through ROAM process -- look for a better
7250 * network */
7251 unsigned long flags;
7252 u8 rssi = priv->assoc_network->stats.rssi;
7253 priv->assoc_network->stats.rssi = -128;
7254 spin_lock_irqsave(&priv->ieee->lock, flags);
7255 list_for_each_entry(network, &priv->ieee->network_list, list) {
7256 if (network != priv->assoc_network)
7257 ipw_best_network(priv, &match, network, 1);
7259 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7260 priv->assoc_network->stats.rssi = rssi;
7262 if (match.network == priv->assoc_network) {
7263 IPW_DEBUG_ASSOC("No better APs in this network to "
7264 "roam to.\n");
7265 priv->status &= ~STATUS_ROAMING;
7266 ipw_debug_config(priv);
7267 return;
7270 ipw_send_disassociate(priv, 1);
7271 priv->assoc_network = match.network;
7273 return;
7276 /* Second pass through ROAM process -- request association */
7277 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7278 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7279 priv->status &= ~STATUS_ROAMING;
7282 static void ipw_bg_roam(void *data)
7284 struct ipw_priv *priv = data;
7285 mutex_lock(&priv->mutex);
7286 ipw_roam(data);
7287 mutex_unlock(&priv->mutex);
7290 static int ipw_associate(void *data)
7292 struct ipw_priv *priv = data;
7294 struct ieee80211_network *network = NULL;
7295 struct ipw_network_match match = {
7296 .network = NULL
7298 struct ipw_supported_rates *rates;
7299 struct list_head *element;
7300 unsigned long flags;
7302 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7303 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7304 return 0;
7307 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7308 IPW_DEBUG_ASSOC("Not attempting association (already in "
7309 "progress)\n");
7310 return 0;
7313 if (priv->status & STATUS_DISASSOCIATING) {
7314 IPW_DEBUG_ASSOC("Not attempting association (in "
7315 "disassociating)\n ");
7316 queue_work(priv->workqueue, &priv->associate);
7317 return 0;
7320 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7321 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7322 "initialized)\n");
7323 return 0;
7326 if (!(priv->config & CFG_ASSOCIATE) &&
7327 !(priv->config & (CFG_STATIC_ESSID |
7328 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7329 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7330 return 0;
7333 /* Protect our use of the network_list */
7334 spin_lock_irqsave(&priv->ieee->lock, flags);
7335 list_for_each_entry(network, &priv->ieee->network_list, list)
7336 ipw_best_network(priv, &match, network, 0);
7338 network = match.network;
7339 rates = &match.rates;
7341 if (network == NULL &&
7342 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7343 priv->config & CFG_ADHOC_CREATE &&
7344 priv->config & CFG_STATIC_ESSID &&
7345 priv->config & CFG_STATIC_CHANNEL &&
7346 !list_empty(&priv->ieee->network_free_list)) {
7347 element = priv->ieee->network_free_list.next;
7348 network = list_entry(element, struct ieee80211_network, list);
7349 ipw_adhoc_create(priv, network);
7350 rates = &priv->rates;
7351 list_del(element);
7352 list_add_tail(&network->list, &priv->ieee->network_list);
7354 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7356 /* If we reached the end of the list, then we don't have any valid
7357 * matching APs */
7358 if (!network) {
7359 ipw_debug_config(priv);
7361 if (!(priv->status & STATUS_SCANNING)) {
7362 if (!(priv->config & CFG_SPEED_SCAN))
7363 queue_delayed_work(priv->workqueue,
7364 &priv->request_scan,
7365 SCAN_INTERVAL);
7366 else
7367 queue_work(priv->workqueue,
7368 &priv->request_scan);
7371 return 0;
7374 ipw_associate_network(priv, network, rates, 0);
7376 return 1;
7379 static void ipw_bg_associate(void *data)
7381 struct ipw_priv *priv = data;
7382 mutex_lock(&priv->mutex);
7383 ipw_associate(data);
7384 mutex_unlock(&priv->mutex);
7387 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7388 struct sk_buff *skb)
7390 struct ieee80211_hdr *hdr;
7391 u16 fc;
7393 hdr = (struct ieee80211_hdr *)skb->data;
7394 fc = le16_to_cpu(hdr->frame_ctl);
7395 if (!(fc & IEEE80211_FCTL_PROTECTED))
7396 return;
7398 fc &= ~IEEE80211_FCTL_PROTECTED;
7399 hdr->frame_ctl = cpu_to_le16(fc);
7400 switch (priv->ieee->sec.level) {
7401 case SEC_LEVEL_3:
7402 /* Remove CCMP HDR */
7403 memmove(skb->data + IEEE80211_3ADDR_LEN,
7404 skb->data + IEEE80211_3ADDR_LEN + 8,
7405 skb->len - IEEE80211_3ADDR_LEN - 8);
7406 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7407 break;
7408 case SEC_LEVEL_2:
7409 break;
7410 case SEC_LEVEL_1:
7411 /* Remove IV */
7412 memmove(skb->data + IEEE80211_3ADDR_LEN,
7413 skb->data + IEEE80211_3ADDR_LEN + 4,
7414 skb->len - IEEE80211_3ADDR_LEN - 4);
7415 skb_trim(skb, skb->len - 8); /* IV + ICV */
7416 break;
7417 case SEC_LEVEL_0:
7418 break;
7419 default:
7420 printk(KERN_ERR "Unknow security level %d\n",
7421 priv->ieee->sec.level);
7422 break;
7426 static void ipw_handle_data_packet(struct ipw_priv *priv,
7427 struct ipw_rx_mem_buffer *rxb,
7428 struct ieee80211_rx_stats *stats)
7430 struct ieee80211_hdr_4addr *hdr;
7431 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7433 /* We received data from the HW, so stop the watchdog */
7434 priv->net_dev->trans_start = jiffies;
7436 /* We only process data packets if the
7437 * interface is open */
7438 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7439 skb_tailroom(rxb->skb))) {
7440 priv->ieee->stats.rx_errors++;
7441 priv->wstats.discard.misc++;
7442 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7443 return;
7444 } else if (unlikely(!netif_running(priv->net_dev))) {
7445 priv->ieee->stats.rx_dropped++;
7446 priv->wstats.discard.misc++;
7447 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7448 return;
7451 /* Advance skb->data to the start of the actual payload */
7452 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7454 /* Set the size of the skb to the size of the frame */
7455 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7457 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7459 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7460 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7461 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7462 (is_multicast_ether_addr(hdr->addr1) ?
7463 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7464 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7466 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7467 priv->ieee->stats.rx_errors++;
7468 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7469 rxb->skb = NULL;
7470 __ipw_led_activity_on(priv);
7474 #ifdef CONFIG_IEEE80211_RADIOTAP
7475 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7476 struct ipw_rx_mem_buffer *rxb,
7477 struct ieee80211_rx_stats *stats)
7479 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7480 struct ipw_rx_frame *frame = &pkt->u.frame;
7482 /* initial pull of some data */
7483 u16 received_channel = frame->received_channel;
7484 u8 antennaAndPhy = frame->antennaAndPhy;
7485 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7486 u16 pktrate = frame->rate;
7488 /* Magic struct that slots into the radiotap header -- no reason
7489 * to build this manually element by element, we can write it much
7490 * more efficiently than we can parse it. ORDER MATTERS HERE */
7491 struct ipw_rt_hdr {
7492 struct ieee80211_radiotap_header rt_hdr;
7493 u8 rt_flags; /* radiotap packet flags */
7494 u8 rt_rate; /* rate in 500kb/s */
7495 u16 rt_channel; /* channel in mhz */
7496 u16 rt_chbitmask; /* channel bitfield */
7497 s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
7498 u8 rt_antenna; /* antenna number */
7499 } *ipw_rt;
7501 short len = le16_to_cpu(pkt->u.frame.length);
7503 /* We received data from the HW, so stop the watchdog */
7504 priv->net_dev->trans_start = jiffies;
7506 /* We only process data packets if the
7507 * interface is open */
7508 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7509 skb_tailroom(rxb->skb))) {
7510 priv->ieee->stats.rx_errors++;
7511 priv->wstats.discard.misc++;
7512 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7513 return;
7514 } else if (unlikely(!netif_running(priv->net_dev))) {
7515 priv->ieee->stats.rx_dropped++;
7516 priv->wstats.discard.misc++;
7517 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7518 return;
7521 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7522 * that now */
7523 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7524 /* FIXME: Should alloc bigger skb instead */
7525 priv->ieee->stats.rx_dropped++;
7526 priv->wstats.discard.misc++;
7527 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7528 return;
7531 /* copy the frame itself */
7532 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7533 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7535 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7536 * part of our real header, saves a little time.
7538 * No longer necessary since we fill in all our data. Purge before merging
7539 * patch officially.
7540 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7541 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7544 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7546 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7547 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7548 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7550 /* Big bitfield of all the fields we provide in radiotap */
7551 ipw_rt->rt_hdr.it_present =
7552 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7553 (1 << IEEE80211_RADIOTAP_RATE) |
7554 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7555 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7556 (1 << IEEE80211_RADIOTAP_ANTENNA));
7558 /* Zero the flags, we'll add to them as we go */
7559 ipw_rt->rt_flags = 0;
7561 /* Convert signal to DBM */
7562 ipw_rt->rt_dbmsignal = antsignal;
7564 /* Convert the channel data and set the flags */
7565 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7566 if (received_channel > 14) { /* 802.11a */
7567 ipw_rt->rt_chbitmask =
7568 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7569 } else if (antennaAndPhy & 32) { /* 802.11b */
7570 ipw_rt->rt_chbitmask =
7571 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7572 } else { /* 802.11g */
7573 ipw_rt->rt_chbitmask =
7574 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7577 /* set the rate in multiples of 500k/s */
7578 switch (pktrate) {
7579 case IPW_TX_RATE_1MB:
7580 ipw_rt->rt_rate = 2;
7581 break;
7582 case IPW_TX_RATE_2MB:
7583 ipw_rt->rt_rate = 4;
7584 break;
7585 case IPW_TX_RATE_5MB:
7586 ipw_rt->rt_rate = 10;
7587 break;
7588 case IPW_TX_RATE_6MB:
7589 ipw_rt->rt_rate = 12;
7590 break;
7591 case IPW_TX_RATE_9MB:
7592 ipw_rt->rt_rate = 18;
7593 break;
7594 case IPW_TX_RATE_11MB:
7595 ipw_rt->rt_rate = 22;
7596 break;
7597 case IPW_TX_RATE_12MB:
7598 ipw_rt->rt_rate = 24;
7599 break;
7600 case IPW_TX_RATE_18MB:
7601 ipw_rt->rt_rate = 36;
7602 break;
7603 case IPW_TX_RATE_24MB:
7604 ipw_rt->rt_rate = 48;
7605 break;
7606 case IPW_TX_RATE_36MB:
7607 ipw_rt->rt_rate = 72;
7608 break;
7609 case IPW_TX_RATE_48MB:
7610 ipw_rt->rt_rate = 96;
7611 break;
7612 case IPW_TX_RATE_54MB:
7613 ipw_rt->rt_rate = 108;
7614 break;
7615 default:
7616 ipw_rt->rt_rate = 0;
7617 break;
7620 /* antenna number */
7621 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7623 /* set the preamble flag if we have it */
7624 if ((antennaAndPhy & 64))
7625 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7627 /* Set the size of the skb to the size of the frame */
7628 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7630 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7632 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7633 priv->ieee->stats.rx_errors++;
7634 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7635 rxb->skb = NULL;
7636 /* no LED during capture */
7639 #endif
7641 static int is_network_packet(struct ipw_priv *priv,
7642 struct ieee80211_hdr_4addr *header)
7644 /* Filter incoming packets to determine if they are targetted toward
7645 * this network, discarding packets coming from ourselves */
7646 switch (priv->ieee->iw_mode) {
7647 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
7648 /* packets from our adapter are dropped (echo) */
7649 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
7650 return 0;
7652 /* {broad,multi}cast packets to our BSSID go through */
7653 if (is_multicast_ether_addr(header->addr1))
7654 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
7656 /* packets to our adapter go through */
7657 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7658 ETH_ALEN);
7660 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
7661 /* packets from our adapter are dropped (echo) */
7662 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
7663 return 0;
7665 /* {broad,multi}cast packets to our BSS go through */
7666 if (is_multicast_ether_addr(header->addr1))
7667 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
7669 /* packets to our adapter go through */
7670 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7671 ETH_ALEN);
7674 return 1;
7677 #define IPW_PACKET_RETRY_TIME HZ
7679 static int is_duplicate_packet(struct ipw_priv *priv,
7680 struct ieee80211_hdr_4addr *header)
7682 u16 sc = le16_to_cpu(header->seq_ctl);
7683 u16 seq = WLAN_GET_SEQ_SEQ(sc);
7684 u16 frag = WLAN_GET_SEQ_FRAG(sc);
7685 u16 *last_seq, *last_frag;
7686 unsigned long *last_time;
7688 switch (priv->ieee->iw_mode) {
7689 case IW_MODE_ADHOC:
7691 struct list_head *p;
7692 struct ipw_ibss_seq *entry = NULL;
7693 u8 *mac = header->addr2;
7694 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
7696 __list_for_each(p, &priv->ibss_mac_hash[index]) {
7697 entry =
7698 list_entry(p, struct ipw_ibss_seq, list);
7699 if (!memcmp(entry->mac, mac, ETH_ALEN))
7700 break;
7702 if (p == &priv->ibss_mac_hash[index]) {
7703 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
7704 if (!entry) {
7705 IPW_ERROR
7706 ("Cannot malloc new mac entry\n");
7707 return 0;
7709 memcpy(entry->mac, mac, ETH_ALEN);
7710 entry->seq_num = seq;
7711 entry->frag_num = frag;
7712 entry->packet_time = jiffies;
7713 list_add(&entry->list,
7714 &priv->ibss_mac_hash[index]);
7715 return 0;
7717 last_seq = &entry->seq_num;
7718 last_frag = &entry->frag_num;
7719 last_time = &entry->packet_time;
7720 break;
7722 case IW_MODE_INFRA:
7723 last_seq = &priv->last_seq_num;
7724 last_frag = &priv->last_frag_num;
7725 last_time = &priv->last_packet_time;
7726 break;
7727 default:
7728 return 0;
7730 if ((*last_seq == seq) &&
7731 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
7732 if (*last_frag == frag)
7733 goto drop;
7734 if (*last_frag + 1 != frag)
7735 /* out-of-order fragment */
7736 goto drop;
7737 } else
7738 *last_seq = seq;
7740 *last_frag = frag;
7741 *last_time = jiffies;
7742 return 0;
7744 drop:
7745 /* Comment this line now since we observed the card receives
7746 * duplicate packets but the FCTL_RETRY bit is not set in the
7747 * IBSS mode with fragmentation enabled.
7748 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
7749 return 1;
7752 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
7753 struct ipw_rx_mem_buffer *rxb,
7754 struct ieee80211_rx_stats *stats)
7756 struct sk_buff *skb = rxb->skb;
7757 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
7758 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
7759 (skb->data + IPW_RX_FRAME_SIZE);
7761 ieee80211_rx_mgt(priv->ieee, header, stats);
7763 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
7764 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
7765 IEEE80211_STYPE_PROBE_RESP) ||
7766 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
7767 IEEE80211_STYPE_BEACON))) {
7768 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
7769 ipw_add_station(priv, header->addr2);
7772 if (priv->config & CFG_NET_STATS) {
7773 IPW_DEBUG_HC("sending stat packet\n");
7775 /* Set the size of the skb to the size of the full
7776 * ipw header and 802.11 frame */
7777 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
7778 IPW_RX_FRAME_SIZE);
7780 /* Advance past the ipw packet header to the 802.11 frame */
7781 skb_pull(skb, IPW_RX_FRAME_SIZE);
7783 /* Push the ieee80211_rx_stats before the 802.11 frame */
7784 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
7786 skb->dev = priv->ieee->dev;
7788 /* Point raw at the ieee80211_stats */
7789 skb->mac.raw = skb->data;
7791 skb->pkt_type = PACKET_OTHERHOST;
7792 skb->protocol = __constant_htons(ETH_P_80211_STATS);
7793 memset(skb->cb, 0, sizeof(rxb->skb->cb));
7794 netif_rx(skb);
7795 rxb->skb = NULL;
7800 * Main entry function for recieving a packet with 80211 headers. This
7801 * should be called when ever the FW has notified us that there is a new
7802 * skb in the recieve queue.
7804 static void ipw_rx(struct ipw_priv *priv)
7806 struct ipw_rx_mem_buffer *rxb;
7807 struct ipw_rx_packet *pkt;
7808 struct ieee80211_hdr_4addr *header;
7809 u32 r, w, i;
7810 u8 network_packet;
7812 r = ipw_read32(priv, IPW_RX_READ_INDEX);
7813 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
7814 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
7816 while (i != r) {
7817 rxb = priv->rxq->queue[i];
7818 #ifdef CONFIG_IPW2200_DEBUG
7819 if (unlikely(rxb == NULL)) {
7820 printk(KERN_CRIT "Queue not allocated!\n");
7821 break;
7823 #endif
7824 priv->rxq->queue[i] = NULL;
7826 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
7827 IPW_RX_BUF_SIZE,
7828 PCI_DMA_FROMDEVICE);
7830 pkt = (struct ipw_rx_packet *)rxb->skb->data;
7831 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
7832 pkt->header.message_type,
7833 pkt->header.rx_seq_num, pkt->header.control_bits);
7835 switch (pkt->header.message_type) {
7836 case RX_FRAME_TYPE: /* 802.11 frame */ {
7837 struct ieee80211_rx_stats stats = {
7838 .rssi =
7839 le16_to_cpu(pkt->u.frame.rssi_dbm) -
7840 IPW_RSSI_TO_DBM,
7841 .signal =
7842 le16_to_cpu(pkt->u.frame.signal),
7843 .noise =
7844 le16_to_cpu(pkt->u.frame.noise),
7845 .rate = pkt->u.frame.rate,
7846 .mac_time = jiffies,
7847 .received_channel =
7848 pkt->u.frame.received_channel,
7849 .freq =
7850 (pkt->u.frame.
7851 control & (1 << 0)) ?
7852 IEEE80211_24GHZ_BAND :
7853 IEEE80211_52GHZ_BAND,
7854 .len = le16_to_cpu(pkt->u.frame.length),
7857 if (stats.rssi != 0)
7858 stats.mask |= IEEE80211_STATMASK_RSSI;
7859 if (stats.signal != 0)
7860 stats.mask |= IEEE80211_STATMASK_SIGNAL;
7861 if (stats.noise != 0)
7862 stats.mask |= IEEE80211_STATMASK_NOISE;
7863 if (stats.rate != 0)
7864 stats.mask |= IEEE80211_STATMASK_RATE;
7866 priv->rx_packets++;
7868 #ifdef CONFIG_IPW2200_MONITOR
7869 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7870 #ifdef CONFIG_IEEE80211_RADIOTAP
7871 ipw_handle_data_packet_monitor(priv,
7872 rxb,
7873 &stats);
7874 #else
7875 ipw_handle_data_packet(priv, rxb,
7876 &stats);
7877 #endif
7878 break;
7880 #endif
7882 header =
7883 (struct ieee80211_hdr_4addr *)(rxb->skb->
7884 data +
7885 IPW_RX_FRAME_SIZE);
7886 /* TODO: Check Ad-Hoc dest/source and make sure
7887 * that we are actually parsing these packets
7888 * correctly -- we should probably use the
7889 * frame control of the packet and disregard
7890 * the current iw_mode */
7892 network_packet =
7893 is_network_packet(priv, header);
7894 if (network_packet && priv->assoc_network) {
7895 priv->assoc_network->stats.rssi =
7896 stats.rssi;
7897 average_add(&priv->average_rssi,
7898 stats.rssi);
7899 priv->last_rx_rssi = stats.rssi;
7902 IPW_DEBUG_RX("Frame: len=%u\n",
7903 le16_to_cpu(pkt->u.frame.length));
7905 if (le16_to_cpu(pkt->u.frame.length) <
7906 frame_hdr_len(header)) {
7907 IPW_DEBUG_DROP
7908 ("Received packet is too small. "
7909 "Dropping.\n");
7910 priv->ieee->stats.rx_errors++;
7911 priv->wstats.discard.misc++;
7912 break;
7915 switch (WLAN_FC_GET_TYPE
7916 (le16_to_cpu(header->frame_ctl))) {
7918 case IEEE80211_FTYPE_MGMT:
7919 ipw_handle_mgmt_packet(priv, rxb,
7920 &stats);
7921 break;
7923 case IEEE80211_FTYPE_CTL:
7924 break;
7926 case IEEE80211_FTYPE_DATA:
7927 if (unlikely(!network_packet ||
7928 is_duplicate_packet(priv,
7929 header)))
7931 IPW_DEBUG_DROP("Dropping: "
7932 MAC_FMT ", "
7933 MAC_FMT ", "
7934 MAC_FMT "\n",
7935 MAC_ARG(header->
7936 addr1),
7937 MAC_ARG(header->
7938 addr2),
7939 MAC_ARG(header->
7940 addr3));
7941 break;
7944 ipw_handle_data_packet(priv, rxb,
7945 &stats);
7947 break;
7949 break;
7952 case RX_HOST_NOTIFICATION_TYPE:{
7953 IPW_DEBUG_RX
7954 ("Notification: subtype=%02X flags=%02X size=%d\n",
7955 pkt->u.notification.subtype,
7956 pkt->u.notification.flags,
7957 pkt->u.notification.size);
7958 ipw_rx_notification(priv, &pkt->u.notification);
7959 break;
7962 default:
7963 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
7964 pkt->header.message_type);
7965 break;
7968 /* For now we just don't re-use anything. We can tweak this
7969 * later to try and re-use notification packets and SKBs that
7970 * fail to Rx correctly */
7971 if (rxb->skb != NULL) {
7972 dev_kfree_skb_any(rxb->skb);
7973 rxb->skb = NULL;
7976 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
7977 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
7978 list_add_tail(&rxb->list, &priv->rxq->rx_used);
7980 i = (i + 1) % RX_QUEUE_SIZE;
7983 /* Backtrack one entry */
7984 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
7986 ipw_rx_queue_restock(priv);
7989 #define DEFAULT_RTS_THRESHOLD 2304U
7990 #define MIN_RTS_THRESHOLD 1U
7991 #define MAX_RTS_THRESHOLD 2304U
7992 #define DEFAULT_BEACON_INTERVAL 100U
7993 #define DEFAULT_SHORT_RETRY_LIMIT 7U
7994 #define DEFAULT_LONG_RETRY_LIMIT 4U
7996 static int ipw_sw_reset(struct ipw_priv *priv, int init)
7998 int band, modulation;
7999 int old_mode = priv->ieee->iw_mode;
8001 /* Initialize module parameter values here */
8002 priv->config = 0;
8004 /* We default to disabling the LED code as right now it causes
8005 * too many systems to lock up... */
8006 if (!led)
8007 priv->config |= CFG_NO_LED;
8009 if (associate)
8010 priv->config |= CFG_ASSOCIATE;
8011 else
8012 IPW_DEBUG_INFO("Auto associate disabled.\n");
8014 if (auto_create)
8015 priv->config |= CFG_ADHOC_CREATE;
8016 else
8017 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8019 priv->config &= ~CFG_STATIC_ESSID;
8020 priv->essid_len = 0;
8021 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8023 if (disable) {
8024 priv->status |= STATUS_RF_KILL_SW;
8025 IPW_DEBUG_INFO("Radio disabled.\n");
8028 if (channel != 0) {
8029 priv->config |= CFG_STATIC_CHANNEL;
8030 priv->channel = channel;
8031 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8032 /* TODO: Validate that provided channel is in range */
8034 #ifdef CONFIG_IPW_QOS
8035 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8036 burst_duration_CCK, burst_duration_OFDM);
8037 #endif /* CONFIG_IPW_QOS */
8039 switch (mode) {
8040 case 1:
8041 priv->ieee->iw_mode = IW_MODE_ADHOC;
8042 priv->net_dev->type = ARPHRD_ETHER;
8044 break;
8045 #ifdef CONFIG_IPW2200_MONITOR
8046 case 2:
8047 priv->ieee->iw_mode = IW_MODE_MONITOR;
8048 #ifdef CONFIG_IEEE80211_RADIOTAP
8049 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8050 #else
8051 priv->net_dev->type = ARPHRD_IEEE80211;
8052 #endif
8053 break;
8054 #endif
8055 default:
8056 case 0:
8057 priv->net_dev->type = ARPHRD_ETHER;
8058 priv->ieee->iw_mode = IW_MODE_INFRA;
8059 break;
8062 if (hwcrypto) {
8063 priv->ieee->host_encrypt = 0;
8064 priv->ieee->host_encrypt_msdu = 0;
8065 priv->ieee->host_decrypt = 0;
8066 priv->ieee->host_mc_decrypt = 0;
8068 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8070 /* IPW2200/2915 is abled to do hardware fragmentation. */
8071 priv->ieee->host_open_frag = 0;
8073 if ((priv->pci_dev->device == 0x4223) ||
8074 (priv->pci_dev->device == 0x4224)) {
8075 if (init)
8076 printk(KERN_INFO DRV_NAME
8077 ": Detected Intel PRO/Wireless 2915ABG Network "
8078 "Connection\n");
8079 priv->ieee->abg_true = 1;
8080 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8081 modulation = IEEE80211_OFDM_MODULATION |
8082 IEEE80211_CCK_MODULATION;
8083 priv->adapter = IPW_2915ABG;
8084 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8085 } else {
8086 if (init)
8087 printk(KERN_INFO DRV_NAME
8088 ": Detected Intel PRO/Wireless 2200BG Network "
8089 "Connection\n");
8091 priv->ieee->abg_true = 0;
8092 band = IEEE80211_24GHZ_BAND;
8093 modulation = IEEE80211_OFDM_MODULATION |
8094 IEEE80211_CCK_MODULATION;
8095 priv->adapter = IPW_2200BG;
8096 priv->ieee->mode = IEEE_G | IEEE_B;
8099 priv->ieee->freq_band = band;
8100 priv->ieee->modulation = modulation;
8102 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8104 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8105 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8107 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8108 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8109 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8111 /* If power management is turned on, default to AC mode */
8112 priv->power_mode = IPW_POWER_AC;
8113 priv->tx_power = IPW_TX_POWER_DEFAULT;
8115 return old_mode == priv->ieee->iw_mode;
8119 * This file defines the Wireless Extension handlers. It does not
8120 * define any methods of hardware manipulation and relies on the
8121 * functions defined in ipw_main to provide the HW interaction.
8123 * The exception to this is the use of the ipw_get_ordinal()
8124 * function used to poll the hardware vs. making unecessary calls.
8128 static int ipw_wx_get_name(struct net_device *dev,
8129 struct iw_request_info *info,
8130 union iwreq_data *wrqu, char *extra)
8132 struct ipw_priv *priv = ieee80211_priv(dev);
8133 mutex_lock(&priv->mutex);
8134 if (priv->status & STATUS_RF_KILL_MASK)
8135 strcpy(wrqu->name, "radio off");
8136 else if (!(priv->status & STATUS_ASSOCIATED))
8137 strcpy(wrqu->name, "unassociated");
8138 else
8139 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8140 ipw_modes[priv->assoc_request.ieee_mode]);
8141 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8142 mutex_unlock(&priv->mutex);
8143 return 0;
8146 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8148 if (channel == 0) {
8149 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8150 priv->config &= ~CFG_STATIC_CHANNEL;
8151 IPW_DEBUG_ASSOC("Attempting to associate with new "
8152 "parameters.\n");
8153 ipw_associate(priv);
8154 return 0;
8157 priv->config |= CFG_STATIC_CHANNEL;
8159 if (priv->channel == channel) {
8160 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8161 channel);
8162 return 0;
8165 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8166 priv->channel = channel;
8168 #ifdef CONFIG_IPW2200_MONITOR
8169 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8170 int i;
8171 if (priv->status & STATUS_SCANNING) {
8172 IPW_DEBUG_SCAN("Scan abort triggered due to "
8173 "channel change.\n");
8174 ipw_abort_scan(priv);
8177 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8178 udelay(10);
8180 if (priv->status & STATUS_SCANNING)
8181 IPW_DEBUG_SCAN("Still scanning...\n");
8182 else
8183 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8184 1000 - i);
8186 return 0;
8188 #endif /* CONFIG_IPW2200_MONITOR */
8190 /* Network configuration changed -- force [re]association */
8191 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8192 if (!ipw_disassociate(priv))
8193 ipw_associate(priv);
8195 return 0;
8198 static int ipw_wx_set_freq(struct net_device *dev,
8199 struct iw_request_info *info,
8200 union iwreq_data *wrqu, char *extra)
8202 struct ipw_priv *priv = ieee80211_priv(dev);
8203 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
8204 struct iw_freq *fwrq = &wrqu->freq;
8205 int ret = 0, i;
8206 u8 channel, flags;
8207 int band;
8209 if (fwrq->m == 0) {
8210 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8211 mutex_lock(&priv->mutex);
8212 ret = ipw_set_channel(priv, 0);
8213 mutex_unlock(&priv->mutex);
8214 return ret;
8216 /* if setting by freq convert to channel */
8217 if (fwrq->e == 1) {
8218 channel = ipw_freq_to_channel(priv->ieee, fwrq->m);
8219 if (channel == 0)
8220 return -EINVAL;
8221 } else
8222 channel = fwrq->m;
8224 if (!(band = ipw_is_valid_channel(priv->ieee, channel)))
8225 return -EINVAL;
8227 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8228 i = ipw_channel_to_index(priv->ieee, channel);
8229 if (i == -1)
8230 return -EINVAL;
8232 flags = (band == IEEE80211_24GHZ_BAND) ?
8233 geo->bg[i].flags : geo->a[i].flags;
8234 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8235 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8236 return -EINVAL;
8240 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8241 mutex_lock(&priv->mutex);
8242 ret = ipw_set_channel(priv, channel);
8243 mutex_unlock(&priv->mutex);
8244 return ret;
8247 static int ipw_wx_get_freq(struct net_device *dev,
8248 struct iw_request_info *info,
8249 union iwreq_data *wrqu, char *extra)
8251 struct ipw_priv *priv = ieee80211_priv(dev);
8253 wrqu->freq.e = 0;
8255 /* If we are associated, trying to associate, or have a statically
8256 * configured CHANNEL then return that; otherwise return ANY */
8257 mutex_lock(&priv->mutex);
8258 if (priv->config & CFG_STATIC_CHANNEL ||
8259 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
8260 wrqu->freq.m = priv->channel;
8261 else
8262 wrqu->freq.m = 0;
8264 mutex_unlock(&priv->mutex);
8265 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8266 return 0;
8269 static int ipw_wx_set_mode(struct net_device *dev,
8270 struct iw_request_info *info,
8271 union iwreq_data *wrqu, char *extra)
8273 struct ipw_priv *priv = ieee80211_priv(dev);
8274 int err = 0;
8276 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8278 switch (wrqu->mode) {
8279 #ifdef CONFIG_IPW2200_MONITOR
8280 case IW_MODE_MONITOR:
8281 #endif
8282 case IW_MODE_ADHOC:
8283 case IW_MODE_INFRA:
8284 break;
8285 case IW_MODE_AUTO:
8286 wrqu->mode = IW_MODE_INFRA;
8287 break;
8288 default:
8289 return -EINVAL;
8291 if (wrqu->mode == priv->ieee->iw_mode)
8292 return 0;
8294 mutex_lock(&priv->mutex);
8296 ipw_sw_reset(priv, 0);
8298 #ifdef CONFIG_IPW2200_MONITOR
8299 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8300 priv->net_dev->type = ARPHRD_ETHER;
8302 if (wrqu->mode == IW_MODE_MONITOR)
8303 #ifdef CONFIG_IEEE80211_RADIOTAP
8304 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8305 #else
8306 priv->net_dev->type = ARPHRD_IEEE80211;
8307 #endif
8308 #endif /* CONFIG_IPW2200_MONITOR */
8310 /* Free the existing firmware and reset the fw_loaded
8311 * flag so ipw_load() will bring in the new firmawre */
8312 free_firmware();
8314 priv->ieee->iw_mode = wrqu->mode;
8316 queue_work(priv->workqueue, &priv->adapter_restart);
8317 mutex_unlock(&priv->mutex);
8318 return err;
8321 static int ipw_wx_get_mode(struct net_device *dev,
8322 struct iw_request_info *info,
8323 union iwreq_data *wrqu, char *extra)
8325 struct ipw_priv *priv = ieee80211_priv(dev);
8326 mutex_lock(&priv->mutex);
8327 wrqu->mode = priv->ieee->iw_mode;
8328 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8329 mutex_unlock(&priv->mutex);
8330 return 0;
8333 /* Values are in microsecond */
8334 static const s32 timeout_duration[] = {
8335 350000,
8336 250000,
8337 75000,
8338 37000,
8339 25000,
8342 static const s32 period_duration[] = {
8343 400000,
8344 700000,
8345 1000000,
8346 1000000,
8347 1000000
8350 static int ipw_wx_get_range(struct net_device *dev,
8351 struct iw_request_info *info,
8352 union iwreq_data *wrqu, char *extra)
8354 struct ipw_priv *priv = ieee80211_priv(dev);
8355 struct iw_range *range = (struct iw_range *)extra;
8356 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
8357 int i = 0, j;
8359 wrqu->data.length = sizeof(*range);
8360 memset(range, 0, sizeof(*range));
8362 /* 54Mbs == ~27 Mb/s real (802.11g) */
8363 range->throughput = 27 * 1000 * 1000;
8365 range->max_qual.qual = 100;
8366 /* TODO: Find real max RSSI and stick here */
8367 range->max_qual.level = 0;
8368 range->max_qual.noise = priv->ieee->worst_rssi + 0x100;
8369 range->max_qual.updated = 7; /* Updated all three */
8371 range->avg_qual.qual = 70;
8372 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8373 range->avg_qual.level = 0; /* FIXME to real average level */
8374 range->avg_qual.noise = 0;
8375 range->avg_qual.updated = 7; /* Updated all three */
8376 mutex_lock(&priv->mutex);
8377 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8379 for (i = 0; i < range->num_bitrates; i++)
8380 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8381 500000;
8383 range->max_rts = DEFAULT_RTS_THRESHOLD;
8384 range->min_frag = MIN_FRAG_THRESHOLD;
8385 range->max_frag = MAX_FRAG_THRESHOLD;
8387 range->encoding_size[0] = 5;
8388 range->encoding_size[1] = 13;
8389 range->num_encoding_sizes = 2;
8390 range->max_encoding_tokens = WEP_KEYS;
8392 /* Set the Wireless Extension versions */
8393 range->we_version_compiled = WIRELESS_EXT;
8394 range->we_version_source = 18;
8396 i = 0;
8397 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8398 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES;
8399 i++, j++) {
8400 range->freq[i].i = geo->bg[j].channel;
8401 range->freq[i].m = geo->bg[j].freq * 100000;
8402 range->freq[i].e = 1;
8406 if (priv->ieee->mode & IEEE_A) {
8407 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES;
8408 i++, j++) {
8409 range->freq[i].i = geo->a[j].channel;
8410 range->freq[i].m = geo->a[j].freq * 100000;
8411 range->freq[i].e = 1;
8415 range->num_channels = i;
8416 range->num_frequency = i;
8418 mutex_unlock(&priv->mutex);
8420 /* Event capability (kernel + driver) */
8421 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8422 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8423 IW_EVENT_CAPA_MASK(SIOCGIWAP));
8424 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8426 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8427 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8429 IPW_DEBUG_WX("GET Range\n");
8430 return 0;
8433 static int ipw_wx_set_wap(struct net_device *dev,
8434 struct iw_request_info *info,
8435 union iwreq_data *wrqu, char *extra)
8437 struct ipw_priv *priv = ieee80211_priv(dev);
8439 static const unsigned char any[] = {
8440 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8442 static const unsigned char off[] = {
8443 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8446 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8447 return -EINVAL;
8448 mutex_lock(&priv->mutex);
8449 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8450 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8451 /* we disable mandatory BSSID association */
8452 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8453 priv->config &= ~CFG_STATIC_BSSID;
8454 IPW_DEBUG_ASSOC("Attempting to associate with new "
8455 "parameters.\n");
8456 ipw_associate(priv);
8457 mutex_unlock(&priv->mutex);
8458 return 0;
8461 priv->config |= CFG_STATIC_BSSID;
8462 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8463 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8464 mutex_unlock(&priv->mutex);
8465 return 0;
8468 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
8469 MAC_ARG(wrqu->ap_addr.sa_data));
8471 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8473 /* Network configuration changed -- force [re]association */
8474 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8475 if (!ipw_disassociate(priv))
8476 ipw_associate(priv);
8478 mutex_unlock(&priv->mutex);
8479 return 0;
8482 static int ipw_wx_get_wap(struct net_device *dev,
8483 struct iw_request_info *info,
8484 union iwreq_data *wrqu, char *extra)
8486 struct ipw_priv *priv = ieee80211_priv(dev);
8487 /* If we are associated, trying to associate, or have a statically
8488 * configured BSSID then return that; otherwise return ANY */
8489 mutex_lock(&priv->mutex);
8490 if (priv->config & CFG_STATIC_BSSID ||
8491 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8492 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8493 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8494 } else
8495 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8497 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8498 MAC_ARG(wrqu->ap_addr.sa_data));
8499 mutex_unlock(&priv->mutex);
8500 return 0;
8503 static int ipw_wx_set_essid(struct net_device *dev,
8504 struct iw_request_info *info,
8505 union iwreq_data *wrqu, char *extra)
8507 struct ipw_priv *priv = ieee80211_priv(dev);
8508 char *essid = ""; /* ANY */
8509 int length = 0;
8510 mutex_lock(&priv->mutex);
8511 if (wrqu->essid.flags && wrqu->essid.length) {
8512 length = wrqu->essid.length - 1;
8513 essid = extra;
8515 if (length == 0) {
8516 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8517 if ((priv->config & CFG_STATIC_ESSID) &&
8518 !(priv->status & (STATUS_ASSOCIATED |
8519 STATUS_ASSOCIATING))) {
8520 IPW_DEBUG_ASSOC("Attempting to associate with new "
8521 "parameters.\n");
8522 priv->config &= ~CFG_STATIC_ESSID;
8523 ipw_associate(priv);
8525 mutex_unlock(&priv->mutex);
8526 return 0;
8529 length = min(length, IW_ESSID_MAX_SIZE);
8531 priv->config |= CFG_STATIC_ESSID;
8533 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
8534 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8535 mutex_unlock(&priv->mutex);
8536 return 0;
8539 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
8540 length);
8542 priv->essid_len = length;
8543 memcpy(priv->essid, essid, priv->essid_len);
8545 /* Network configuration changed -- force [re]association */
8546 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8547 if (!ipw_disassociate(priv))
8548 ipw_associate(priv);
8550 mutex_unlock(&priv->mutex);
8551 return 0;
8554 static int ipw_wx_get_essid(struct net_device *dev,
8555 struct iw_request_info *info,
8556 union iwreq_data *wrqu, char *extra)
8558 struct ipw_priv *priv = ieee80211_priv(dev);
8560 /* If we are associated, trying to associate, or have a statically
8561 * configured ESSID then return that; otherwise return ANY */
8562 mutex_lock(&priv->mutex);
8563 if (priv->config & CFG_STATIC_ESSID ||
8564 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8565 IPW_DEBUG_WX("Getting essid: '%s'\n",
8566 escape_essid(priv->essid, priv->essid_len));
8567 memcpy(extra, priv->essid, priv->essid_len);
8568 wrqu->essid.length = priv->essid_len;
8569 wrqu->essid.flags = 1; /* active */
8570 } else {
8571 IPW_DEBUG_WX("Getting essid: ANY\n");
8572 wrqu->essid.length = 0;
8573 wrqu->essid.flags = 0; /* active */
8575 mutex_unlock(&priv->mutex);
8576 return 0;
8579 static int ipw_wx_set_nick(struct net_device *dev,
8580 struct iw_request_info *info,
8581 union iwreq_data *wrqu, char *extra)
8583 struct ipw_priv *priv = ieee80211_priv(dev);
8585 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
8586 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
8587 return -E2BIG;
8588 mutex_lock(&priv->mutex);
8589 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
8590 memset(priv->nick, 0, sizeof(priv->nick));
8591 memcpy(priv->nick, extra, wrqu->data.length);
8592 IPW_DEBUG_TRACE("<<\n");
8593 mutex_unlock(&priv->mutex);
8594 return 0;
8598 static int ipw_wx_get_nick(struct net_device *dev,
8599 struct iw_request_info *info,
8600 union iwreq_data *wrqu, char *extra)
8602 struct ipw_priv *priv = ieee80211_priv(dev);
8603 IPW_DEBUG_WX("Getting nick\n");
8604 mutex_lock(&priv->mutex);
8605 wrqu->data.length = strlen(priv->nick) + 1;
8606 memcpy(extra, priv->nick, wrqu->data.length);
8607 wrqu->data.flags = 1; /* active */
8608 mutex_unlock(&priv->mutex);
8609 return 0;
8612 static int ipw_wx_set_rate(struct net_device *dev,
8613 struct iw_request_info *info,
8614 union iwreq_data *wrqu, char *extra)
8616 /* TODO: We should use semaphores or locks for access to priv */
8617 struct ipw_priv *priv = ieee80211_priv(dev);
8618 u32 target_rate = wrqu->bitrate.value;
8619 u32 fixed, mask;
8621 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
8622 /* value = X, fixed = 1 means only rate X */
8623 /* value = X, fixed = 0 means all rates lower equal X */
8625 if (target_rate == -1) {
8626 fixed = 0;
8627 mask = IEEE80211_DEFAULT_RATES_MASK;
8628 /* Now we should reassociate */
8629 goto apply;
8632 mask = 0;
8633 fixed = wrqu->bitrate.fixed;
8635 if (target_rate == 1000000 || !fixed)
8636 mask |= IEEE80211_CCK_RATE_1MB_MASK;
8637 if (target_rate == 1000000)
8638 goto apply;
8640 if (target_rate == 2000000 || !fixed)
8641 mask |= IEEE80211_CCK_RATE_2MB_MASK;
8642 if (target_rate == 2000000)
8643 goto apply;
8645 if (target_rate == 5500000 || !fixed)
8646 mask |= IEEE80211_CCK_RATE_5MB_MASK;
8647 if (target_rate == 5500000)
8648 goto apply;
8650 if (target_rate == 6000000 || !fixed)
8651 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
8652 if (target_rate == 6000000)
8653 goto apply;
8655 if (target_rate == 9000000 || !fixed)
8656 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
8657 if (target_rate == 9000000)
8658 goto apply;
8660 if (target_rate == 11000000 || !fixed)
8661 mask |= IEEE80211_CCK_RATE_11MB_MASK;
8662 if (target_rate == 11000000)
8663 goto apply;
8665 if (target_rate == 12000000 || !fixed)
8666 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
8667 if (target_rate == 12000000)
8668 goto apply;
8670 if (target_rate == 18000000 || !fixed)
8671 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
8672 if (target_rate == 18000000)
8673 goto apply;
8675 if (target_rate == 24000000 || !fixed)
8676 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
8677 if (target_rate == 24000000)
8678 goto apply;
8680 if (target_rate == 36000000 || !fixed)
8681 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
8682 if (target_rate == 36000000)
8683 goto apply;
8685 if (target_rate == 48000000 || !fixed)
8686 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
8687 if (target_rate == 48000000)
8688 goto apply;
8690 if (target_rate == 54000000 || !fixed)
8691 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
8692 if (target_rate == 54000000)
8693 goto apply;
8695 IPW_DEBUG_WX("invalid rate specified, returning error\n");
8696 return -EINVAL;
8698 apply:
8699 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
8700 mask, fixed ? "fixed" : "sub-rates");
8701 mutex_lock(&priv->mutex);
8702 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
8703 priv->config &= ~CFG_FIXED_RATE;
8704 ipw_set_fixed_rate(priv, priv->ieee->mode);
8705 } else
8706 priv->config |= CFG_FIXED_RATE;
8708 if (priv->rates_mask == mask) {
8709 IPW_DEBUG_WX("Mask set to current mask.\n");
8710 mutex_unlock(&priv->mutex);
8711 return 0;
8714 priv->rates_mask = mask;
8716 /* Network configuration changed -- force [re]association */
8717 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
8718 if (!ipw_disassociate(priv))
8719 ipw_associate(priv);
8721 mutex_unlock(&priv->mutex);
8722 return 0;
8725 static int ipw_wx_get_rate(struct net_device *dev,
8726 struct iw_request_info *info,
8727 union iwreq_data *wrqu, char *extra)
8729 struct ipw_priv *priv = ieee80211_priv(dev);
8730 mutex_lock(&priv->mutex);
8731 wrqu->bitrate.value = priv->last_rate;
8732 mutex_unlock(&priv->mutex);
8733 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
8734 return 0;
8737 static int ipw_wx_set_rts(struct net_device *dev,
8738 struct iw_request_info *info,
8739 union iwreq_data *wrqu, char *extra)
8741 struct ipw_priv *priv = ieee80211_priv(dev);
8742 mutex_lock(&priv->mutex);
8743 if (wrqu->rts.disabled)
8744 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8745 else {
8746 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
8747 wrqu->rts.value > MAX_RTS_THRESHOLD) {
8748 mutex_unlock(&priv->mutex);
8749 return -EINVAL;
8751 priv->rts_threshold = wrqu->rts.value;
8754 ipw_send_rts_threshold(priv, priv->rts_threshold);
8755 mutex_unlock(&priv->mutex);
8756 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
8757 return 0;
8760 static int ipw_wx_get_rts(struct net_device *dev,
8761 struct iw_request_info *info,
8762 union iwreq_data *wrqu, char *extra)
8764 struct ipw_priv *priv = ieee80211_priv(dev);
8765 mutex_lock(&priv->mutex);
8766 wrqu->rts.value = priv->rts_threshold;
8767 wrqu->rts.fixed = 0; /* no auto select */
8768 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
8769 mutex_unlock(&priv->mutex);
8770 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
8771 return 0;
8774 static int ipw_wx_set_txpow(struct net_device *dev,
8775 struct iw_request_info *info,
8776 union iwreq_data *wrqu, char *extra)
8778 struct ipw_priv *priv = ieee80211_priv(dev);
8779 int err = 0;
8781 mutex_lock(&priv->mutex);
8782 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
8783 err = -EINPROGRESS;
8784 goto out;
8787 if (!wrqu->power.fixed)
8788 wrqu->power.value = IPW_TX_POWER_DEFAULT;
8790 if (wrqu->power.flags != IW_TXPOW_DBM) {
8791 err = -EINVAL;
8792 goto out;
8795 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
8796 (wrqu->power.value < IPW_TX_POWER_MIN)) {
8797 err = -EINVAL;
8798 goto out;
8801 priv->tx_power = wrqu->power.value;
8802 err = ipw_set_tx_power(priv);
8803 out:
8804 mutex_unlock(&priv->mutex);
8805 return err;
8808 static int ipw_wx_get_txpow(struct net_device *dev,
8809 struct iw_request_info *info,
8810 union iwreq_data *wrqu, char *extra)
8812 struct ipw_priv *priv = ieee80211_priv(dev);
8813 mutex_lock(&priv->mutex);
8814 wrqu->power.value = priv->tx_power;
8815 wrqu->power.fixed = 1;
8816 wrqu->power.flags = IW_TXPOW_DBM;
8817 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
8818 mutex_unlock(&priv->mutex);
8820 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
8821 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
8823 return 0;
8826 static int ipw_wx_set_frag(struct net_device *dev,
8827 struct iw_request_info *info,
8828 union iwreq_data *wrqu, char *extra)
8830 struct ipw_priv *priv = ieee80211_priv(dev);
8831 mutex_lock(&priv->mutex);
8832 if (wrqu->frag.disabled)
8833 priv->ieee->fts = DEFAULT_FTS;
8834 else {
8835 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
8836 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
8837 mutex_unlock(&priv->mutex);
8838 return -EINVAL;
8841 priv->ieee->fts = wrqu->frag.value & ~0x1;
8844 ipw_send_frag_threshold(priv, wrqu->frag.value);
8845 mutex_unlock(&priv->mutex);
8846 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
8847 return 0;
8850 static int ipw_wx_get_frag(struct net_device *dev,
8851 struct iw_request_info *info,
8852 union iwreq_data *wrqu, char *extra)
8854 struct ipw_priv *priv = ieee80211_priv(dev);
8855 mutex_lock(&priv->mutex);
8856 wrqu->frag.value = priv->ieee->fts;
8857 wrqu->frag.fixed = 0; /* no auto select */
8858 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
8859 mutex_unlock(&priv->mutex);
8860 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
8862 return 0;
8865 static int ipw_wx_set_retry(struct net_device *dev,
8866 struct iw_request_info *info,
8867 union iwreq_data *wrqu, char *extra)
8869 struct ipw_priv *priv = ieee80211_priv(dev);
8871 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
8872 return -EINVAL;
8874 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
8875 return 0;
8877 if (wrqu->retry.value < 0 || wrqu->retry.value > 255)
8878 return -EINVAL;
8880 mutex_lock(&priv->mutex);
8881 if (wrqu->retry.flags & IW_RETRY_MIN)
8882 priv->short_retry_limit = (u8) wrqu->retry.value;
8883 else if (wrqu->retry.flags & IW_RETRY_MAX)
8884 priv->long_retry_limit = (u8) wrqu->retry.value;
8885 else {
8886 priv->short_retry_limit = (u8) wrqu->retry.value;
8887 priv->long_retry_limit = (u8) wrqu->retry.value;
8890 ipw_send_retry_limit(priv, priv->short_retry_limit,
8891 priv->long_retry_limit);
8892 mutex_unlock(&priv->mutex);
8893 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
8894 priv->short_retry_limit, priv->long_retry_limit);
8895 return 0;
8898 static int ipw_wx_get_retry(struct net_device *dev,
8899 struct iw_request_info *info,
8900 union iwreq_data *wrqu, char *extra)
8902 struct ipw_priv *priv = ieee80211_priv(dev);
8904 mutex_lock(&priv->mutex);
8905 wrqu->retry.disabled = 0;
8907 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
8908 mutex_unlock(&priv->mutex);
8909 return -EINVAL;
8912 if (wrqu->retry.flags & IW_RETRY_MAX) {
8913 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
8914 wrqu->retry.value = priv->long_retry_limit;
8915 } else if (wrqu->retry.flags & IW_RETRY_MIN) {
8916 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
8917 wrqu->retry.value = priv->short_retry_limit;
8918 } else {
8919 wrqu->retry.flags = IW_RETRY_LIMIT;
8920 wrqu->retry.value = priv->short_retry_limit;
8922 mutex_unlock(&priv->mutex);
8924 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
8926 return 0;
8929 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
8930 int essid_len)
8932 struct ipw_scan_request_ext scan;
8933 int err = 0, scan_type;
8935 if (!(priv->status & STATUS_INIT) ||
8936 (priv->status & STATUS_EXIT_PENDING))
8937 return 0;
8939 mutex_lock(&priv->mutex);
8941 if (priv->status & STATUS_RF_KILL_MASK) {
8942 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
8943 priv->status |= STATUS_SCAN_PENDING;
8944 goto done;
8947 IPW_DEBUG_HC("starting request direct scan!\n");
8949 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
8950 /* We should not sleep here; otherwise we will block most
8951 * of the system (for instance, we hold rtnl_lock when we
8952 * get here).
8954 err = -EAGAIN;
8955 goto done;
8957 memset(&scan, 0, sizeof(scan));
8959 if (priv->config & CFG_SPEED_SCAN)
8960 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
8961 cpu_to_le16(30);
8962 else
8963 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
8964 cpu_to_le16(20);
8966 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
8967 cpu_to_le16(20);
8968 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
8969 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
8971 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
8973 err = ipw_send_ssid(priv, essid, essid_len);
8974 if (err) {
8975 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
8976 goto done;
8978 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
8980 ipw_add_scan_channels(priv, &scan, scan_type);
8982 err = ipw_send_scan_request_ext(priv, &scan);
8983 if (err) {
8984 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
8985 goto done;
8988 priv->status |= STATUS_SCANNING;
8990 done:
8991 mutex_unlock(&priv->mutex);
8992 return err;
8995 static int ipw_wx_set_scan(struct net_device *dev,
8996 struct iw_request_info *info,
8997 union iwreq_data *wrqu, char *extra)
8999 struct ipw_priv *priv = ieee80211_priv(dev);
9000 struct iw_scan_req *req = NULL;
9001 if (wrqu->data.length
9002 && wrqu->data.length == sizeof(struct iw_scan_req)) {
9003 req = (struct iw_scan_req *)extra;
9004 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9005 ipw_request_direct_scan(priv, req->essid,
9006 req->essid_len);
9007 return 0;
9011 IPW_DEBUG_WX("Start scan\n");
9013 queue_work(priv->workqueue, &priv->request_scan);
9015 return 0;
9018 static int ipw_wx_get_scan(struct net_device *dev,
9019 struct iw_request_info *info,
9020 union iwreq_data *wrqu, char *extra)
9022 struct ipw_priv *priv = ieee80211_priv(dev);
9023 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9026 static int ipw_wx_set_encode(struct net_device *dev,
9027 struct iw_request_info *info,
9028 union iwreq_data *wrqu, char *key)
9030 struct ipw_priv *priv = ieee80211_priv(dev);
9031 int ret;
9032 u32 cap = priv->capability;
9034 mutex_lock(&priv->mutex);
9035 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9037 /* In IBSS mode, we need to notify the firmware to update
9038 * the beacon info after we changed the capability. */
9039 if (cap != priv->capability &&
9040 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9041 priv->status & STATUS_ASSOCIATED)
9042 ipw_disassociate(priv);
9044 mutex_unlock(&priv->mutex);
9045 return ret;
9048 static int ipw_wx_get_encode(struct net_device *dev,
9049 struct iw_request_info *info,
9050 union iwreq_data *wrqu, char *key)
9052 struct ipw_priv *priv = ieee80211_priv(dev);
9053 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9056 static int ipw_wx_set_power(struct net_device *dev,
9057 struct iw_request_info *info,
9058 union iwreq_data *wrqu, char *extra)
9060 struct ipw_priv *priv = ieee80211_priv(dev);
9061 int err;
9062 mutex_lock(&priv->mutex);
9063 if (wrqu->power.disabled) {
9064 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9065 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9066 if (err) {
9067 IPW_DEBUG_WX("failed setting power mode.\n");
9068 mutex_unlock(&priv->mutex);
9069 return err;
9071 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9072 mutex_unlock(&priv->mutex);
9073 return 0;
9076 switch (wrqu->power.flags & IW_POWER_MODE) {
9077 case IW_POWER_ON: /* If not specified */
9078 case IW_POWER_MODE: /* If set all mask */
9079 case IW_POWER_ALL_R: /* If explicitely state all */
9080 break;
9081 default: /* Otherwise we don't support it */
9082 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9083 wrqu->power.flags);
9084 mutex_unlock(&priv->mutex);
9085 return -EOPNOTSUPP;
9088 /* If the user hasn't specified a power management mode yet, default
9089 * to BATTERY */
9090 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9091 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9092 else
9093 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9094 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9095 if (err) {
9096 IPW_DEBUG_WX("failed setting power mode.\n");
9097 mutex_unlock(&priv->mutex);
9098 return err;
9101 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9102 mutex_unlock(&priv->mutex);
9103 return 0;
9106 static int ipw_wx_get_power(struct net_device *dev,
9107 struct iw_request_info *info,
9108 union iwreq_data *wrqu, char *extra)
9110 struct ipw_priv *priv = ieee80211_priv(dev);
9111 mutex_lock(&priv->mutex);
9112 if (!(priv->power_mode & IPW_POWER_ENABLED))
9113 wrqu->power.disabled = 1;
9114 else
9115 wrqu->power.disabled = 0;
9117 mutex_unlock(&priv->mutex);
9118 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9120 return 0;
9123 static int ipw_wx_set_powermode(struct net_device *dev,
9124 struct iw_request_info *info,
9125 union iwreq_data *wrqu, char *extra)
9127 struct ipw_priv *priv = ieee80211_priv(dev);
9128 int mode = *(int *)extra;
9129 int err;
9130 mutex_lock(&priv->mutex);
9131 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
9132 mode = IPW_POWER_AC;
9133 priv->power_mode = mode;
9134 } else {
9135 priv->power_mode = IPW_POWER_ENABLED | mode;
9138 if (priv->power_mode != mode) {
9139 err = ipw_send_power_mode(priv, mode);
9141 if (err) {
9142 IPW_DEBUG_WX("failed setting power mode.\n");
9143 mutex_unlock(&priv->mutex);
9144 return err;
9147 mutex_unlock(&priv->mutex);
9148 return 0;
9151 #define MAX_WX_STRING 80
9152 static int ipw_wx_get_powermode(struct net_device *dev,
9153 struct iw_request_info *info,
9154 union iwreq_data *wrqu, char *extra)
9156 struct ipw_priv *priv = ieee80211_priv(dev);
9157 int level = IPW_POWER_LEVEL(priv->power_mode);
9158 char *p = extra;
9160 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9162 switch (level) {
9163 case IPW_POWER_AC:
9164 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9165 break;
9166 case IPW_POWER_BATTERY:
9167 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9168 break;
9169 default:
9170 p += snprintf(p, MAX_WX_STRING - (p - extra),
9171 "(Timeout %dms, Period %dms)",
9172 timeout_duration[level - 1] / 1000,
9173 period_duration[level - 1] / 1000);
9176 if (!(priv->power_mode & IPW_POWER_ENABLED))
9177 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9179 wrqu->data.length = p - extra + 1;
9181 return 0;
9184 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9185 struct iw_request_info *info,
9186 union iwreq_data *wrqu, char *extra)
9188 struct ipw_priv *priv = ieee80211_priv(dev);
9189 int mode = *(int *)extra;
9190 u8 band = 0, modulation = 0;
9192 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9193 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9194 return -EINVAL;
9196 mutex_lock(&priv->mutex);
9197 if (priv->adapter == IPW_2915ABG) {
9198 priv->ieee->abg_true = 1;
9199 if (mode & IEEE_A) {
9200 band |= IEEE80211_52GHZ_BAND;
9201 modulation |= IEEE80211_OFDM_MODULATION;
9202 } else
9203 priv->ieee->abg_true = 0;
9204 } else {
9205 if (mode & IEEE_A) {
9206 IPW_WARNING("Attempt to set 2200BG into "
9207 "802.11a mode\n");
9208 mutex_unlock(&priv->mutex);
9209 return -EINVAL;
9212 priv->ieee->abg_true = 0;
9215 if (mode & IEEE_B) {
9216 band |= IEEE80211_24GHZ_BAND;
9217 modulation |= IEEE80211_CCK_MODULATION;
9218 } else
9219 priv->ieee->abg_true = 0;
9221 if (mode & IEEE_G) {
9222 band |= IEEE80211_24GHZ_BAND;
9223 modulation |= IEEE80211_OFDM_MODULATION;
9224 } else
9225 priv->ieee->abg_true = 0;
9227 priv->ieee->mode = mode;
9228 priv->ieee->freq_band = band;
9229 priv->ieee->modulation = modulation;
9230 init_supported_rates(priv, &priv->rates);
9232 /* Network configuration changed -- force [re]association */
9233 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9234 if (!ipw_disassociate(priv)) {
9235 ipw_send_supported_rates(priv, &priv->rates);
9236 ipw_associate(priv);
9239 /* Update the band LEDs */
9240 ipw_led_band_on(priv);
9242 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9243 mode & IEEE_A ? 'a' : '.',
9244 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9245 mutex_unlock(&priv->mutex);
9246 return 0;
9249 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9250 struct iw_request_info *info,
9251 union iwreq_data *wrqu, char *extra)
9253 struct ipw_priv *priv = ieee80211_priv(dev);
9254 mutex_lock(&priv->mutex);
9255 switch (priv->ieee->mode) {
9256 case IEEE_A:
9257 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9258 break;
9259 case IEEE_B:
9260 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9261 break;
9262 case IEEE_A | IEEE_B:
9263 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9264 break;
9265 case IEEE_G:
9266 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9267 break;
9268 case IEEE_A | IEEE_G:
9269 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9270 break;
9271 case IEEE_B | IEEE_G:
9272 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9273 break;
9274 case IEEE_A | IEEE_B | IEEE_G:
9275 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9276 break;
9277 default:
9278 strncpy(extra, "unknown", MAX_WX_STRING);
9279 break;
9282 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9284 wrqu->data.length = strlen(extra) + 1;
9285 mutex_unlock(&priv->mutex);
9287 return 0;
9290 static int ipw_wx_set_preamble(struct net_device *dev,
9291 struct iw_request_info *info,
9292 union iwreq_data *wrqu, char *extra)
9294 struct ipw_priv *priv = ieee80211_priv(dev);
9295 int mode = *(int *)extra;
9296 mutex_lock(&priv->mutex);
9297 /* Switching from SHORT -> LONG requires a disassociation */
9298 if (mode == 1) {
9299 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9300 priv->config |= CFG_PREAMBLE_LONG;
9302 /* Network configuration changed -- force [re]association */
9303 IPW_DEBUG_ASSOC
9304 ("[re]association triggered due to preamble change.\n");
9305 if (!ipw_disassociate(priv))
9306 ipw_associate(priv);
9308 goto done;
9311 if (mode == 0) {
9312 priv->config &= ~CFG_PREAMBLE_LONG;
9313 goto done;
9315 mutex_unlock(&priv->mutex);
9316 return -EINVAL;
9318 done:
9319 mutex_unlock(&priv->mutex);
9320 return 0;
9323 static int ipw_wx_get_preamble(struct net_device *dev,
9324 struct iw_request_info *info,
9325 union iwreq_data *wrqu, char *extra)
9327 struct ipw_priv *priv = ieee80211_priv(dev);
9328 mutex_lock(&priv->mutex);
9329 if (priv->config & CFG_PREAMBLE_LONG)
9330 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9331 else
9332 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9333 mutex_unlock(&priv->mutex);
9334 return 0;
9337 #ifdef CONFIG_IPW2200_MONITOR
9338 static int ipw_wx_set_monitor(struct net_device *dev,
9339 struct iw_request_info *info,
9340 union iwreq_data *wrqu, char *extra)
9342 struct ipw_priv *priv = ieee80211_priv(dev);
9343 int *parms = (int *)extra;
9344 int enable = (parms[0] > 0);
9345 mutex_lock(&priv->mutex);
9346 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9347 if (enable) {
9348 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9349 #ifdef CONFIG_IEEE80211_RADIOTAP
9350 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9351 #else
9352 priv->net_dev->type = ARPHRD_IEEE80211;
9353 #endif
9354 queue_work(priv->workqueue, &priv->adapter_restart);
9357 ipw_set_channel(priv, parms[1]);
9358 } else {
9359 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9360 mutex_unlock(&priv->mutex);
9361 return 0;
9363 priv->net_dev->type = ARPHRD_ETHER;
9364 queue_work(priv->workqueue, &priv->adapter_restart);
9366 mutex_unlock(&priv->mutex);
9367 return 0;
9370 #endif // CONFIG_IPW2200_MONITOR
9372 static int ipw_wx_reset(struct net_device *dev,
9373 struct iw_request_info *info,
9374 union iwreq_data *wrqu, char *extra)
9376 struct ipw_priv *priv = ieee80211_priv(dev);
9377 IPW_DEBUG_WX("RESET\n");
9378 queue_work(priv->workqueue, &priv->adapter_restart);
9379 return 0;
9382 static int ipw_wx_sw_reset(struct net_device *dev,
9383 struct iw_request_info *info,
9384 union iwreq_data *wrqu, char *extra)
9386 struct ipw_priv *priv = ieee80211_priv(dev);
9387 union iwreq_data wrqu_sec = {
9388 .encoding = {
9389 .flags = IW_ENCODE_DISABLED,
9392 int ret;
9394 IPW_DEBUG_WX("SW_RESET\n");
9396 mutex_lock(&priv->mutex);
9398 ret = ipw_sw_reset(priv, 0);
9399 if (!ret) {
9400 free_firmware();
9401 ipw_adapter_restart(priv);
9404 /* The SW reset bit might have been toggled on by the 'disable'
9405 * module parameter, so take appropriate action */
9406 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9408 mutex_unlock(&priv->mutex);
9409 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9410 mutex_lock(&priv->mutex);
9412 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9413 /* Configuration likely changed -- force [re]association */
9414 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9415 "reset.\n");
9416 if (!ipw_disassociate(priv))
9417 ipw_associate(priv);
9420 mutex_unlock(&priv->mutex);
9422 return 0;
9425 /* Rebase the WE IOCTLs to zero for the handler array */
9426 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9427 static iw_handler ipw_wx_handlers[] = {
9428 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9429 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9430 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9431 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9432 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9433 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9434 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9435 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9436 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9437 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9438 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9439 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9440 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9441 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9442 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9443 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9444 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9445 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9446 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9447 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9448 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9449 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9450 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9451 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9452 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9453 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9454 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9455 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9456 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9457 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9458 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9459 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9460 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9461 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9462 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9463 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9464 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9465 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9466 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9469 enum {
9470 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9471 IPW_PRIV_GET_POWER,
9472 IPW_PRIV_SET_MODE,
9473 IPW_PRIV_GET_MODE,
9474 IPW_PRIV_SET_PREAMBLE,
9475 IPW_PRIV_GET_PREAMBLE,
9476 IPW_PRIV_RESET,
9477 IPW_PRIV_SW_RESET,
9478 #ifdef CONFIG_IPW2200_MONITOR
9479 IPW_PRIV_SET_MONITOR,
9480 #endif
9483 static struct iw_priv_args ipw_priv_args[] = {
9485 .cmd = IPW_PRIV_SET_POWER,
9486 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9487 .name = "set_power"},
9489 .cmd = IPW_PRIV_GET_POWER,
9490 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9491 .name = "get_power"},
9493 .cmd = IPW_PRIV_SET_MODE,
9494 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9495 .name = "set_mode"},
9497 .cmd = IPW_PRIV_GET_MODE,
9498 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9499 .name = "get_mode"},
9501 .cmd = IPW_PRIV_SET_PREAMBLE,
9502 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9503 .name = "set_preamble"},
9505 .cmd = IPW_PRIV_GET_PREAMBLE,
9506 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9507 .name = "get_preamble"},
9509 IPW_PRIV_RESET,
9510 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9512 IPW_PRIV_SW_RESET,
9513 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9514 #ifdef CONFIG_IPW2200_MONITOR
9516 IPW_PRIV_SET_MONITOR,
9517 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9518 #endif /* CONFIG_IPW2200_MONITOR */
9521 static iw_handler ipw_priv_handler[] = {
9522 ipw_wx_set_powermode,
9523 ipw_wx_get_powermode,
9524 ipw_wx_set_wireless_mode,
9525 ipw_wx_get_wireless_mode,
9526 ipw_wx_set_preamble,
9527 ipw_wx_get_preamble,
9528 ipw_wx_reset,
9529 ipw_wx_sw_reset,
9530 #ifdef CONFIG_IPW2200_MONITOR
9531 ipw_wx_set_monitor,
9532 #endif
9535 static struct iw_handler_def ipw_wx_handler_def = {
9536 .standard = ipw_wx_handlers,
9537 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
9538 .num_private = ARRAY_SIZE(ipw_priv_handler),
9539 .num_private_args = ARRAY_SIZE(ipw_priv_args),
9540 .private = ipw_priv_handler,
9541 .private_args = ipw_priv_args,
9542 .get_wireless_stats = ipw_get_wireless_stats,
9546 * Get wireless statistics.
9547 * Called by /proc/net/wireless
9548 * Also called by SIOCGIWSTATS
9550 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9552 struct ipw_priv *priv = ieee80211_priv(dev);
9553 struct iw_statistics *wstats;
9555 wstats = &priv->wstats;
9557 /* if hw is disabled, then ipw_get_ordinal() can't be called.
9558 * netdev->get_wireless_stats seems to be called before fw is
9559 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
9560 * and associated; if not associcated, the values are all meaningless
9561 * anyway, so set them all to NULL and INVALID */
9562 if (!(priv->status & STATUS_ASSOCIATED)) {
9563 wstats->miss.beacon = 0;
9564 wstats->discard.retries = 0;
9565 wstats->qual.qual = 0;
9566 wstats->qual.level = 0;
9567 wstats->qual.noise = 0;
9568 wstats->qual.updated = 7;
9569 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
9570 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
9571 return wstats;
9574 wstats->qual.qual = priv->quality;
9575 wstats->qual.level = average_value(&priv->average_rssi);
9576 wstats->qual.noise = average_value(&priv->average_noise);
9577 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
9578 IW_QUAL_NOISE_UPDATED;
9580 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
9581 wstats->discard.retries = priv->last_tx_failures;
9582 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
9584 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
9585 goto fail_get_ordinal;
9586 wstats->discard.retries += tx_retry; */
9588 return wstats;
9591 /* net device stuff */
9593 static void init_sys_config(struct ipw_sys_config *sys_config)
9595 memset(sys_config, 0, sizeof(struct ipw_sys_config));
9596 sys_config->bt_coexistence = 0;
9597 sys_config->answer_broadcast_ssid_probe = 0;
9598 sys_config->accept_all_data_frames = 0;
9599 sys_config->accept_non_directed_frames = 1;
9600 sys_config->exclude_unicast_unencrypted = 0;
9601 sys_config->disable_unicast_decryption = 1;
9602 sys_config->exclude_multicast_unencrypted = 0;
9603 sys_config->disable_multicast_decryption = 1;
9604 sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH;
9605 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
9606 sys_config->dot11g_auto_detection = 0;
9607 sys_config->enable_cts_to_self = 0;
9608 sys_config->bt_coexist_collision_thr = 0;
9609 sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256
9612 static int ipw_net_open(struct net_device *dev)
9614 struct ipw_priv *priv = ieee80211_priv(dev);
9615 IPW_DEBUG_INFO("dev->open\n");
9616 /* we should be verifying the device is ready to be opened */
9617 mutex_lock(&priv->mutex);
9618 if (!(priv->status & STATUS_RF_KILL_MASK) &&
9619 (priv->status & STATUS_ASSOCIATED))
9620 netif_start_queue(dev);
9621 mutex_unlock(&priv->mutex);
9622 return 0;
9625 static int ipw_net_stop(struct net_device *dev)
9627 IPW_DEBUG_INFO("dev->close\n");
9628 netif_stop_queue(dev);
9629 return 0;
9633 todo:
9635 modify to send one tfd per fragment instead of using chunking. otherwise
9636 we need to heavily modify the ieee80211_skb_to_txb.
9639 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
9640 int pri)
9642 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
9643 txb->fragments[0]->data;
9644 int i = 0;
9645 struct tfd_frame *tfd;
9646 #ifdef CONFIG_IPW_QOS
9647 int tx_id = ipw_get_tx_queue_number(priv, pri);
9648 struct clx2_tx_queue *txq = &priv->txq[tx_id];
9649 #else
9650 struct clx2_tx_queue *txq = &priv->txq[0];
9651 #endif
9652 struct clx2_queue *q = &txq->q;
9653 u8 id, hdr_len, unicast;
9654 u16 remaining_bytes;
9655 int fc;
9657 /* If there isn't room in the queue, we return busy and let the
9658 * network stack requeue the packet for us */
9659 if (ipw_queue_space(q) < q->high_mark)
9660 return NETDEV_TX_BUSY;
9662 switch (priv->ieee->iw_mode) {
9663 case IW_MODE_ADHOC:
9664 hdr_len = IEEE80211_3ADDR_LEN;
9665 unicast = !is_multicast_ether_addr(hdr->addr1);
9666 id = ipw_find_station(priv, hdr->addr1);
9667 if (id == IPW_INVALID_STATION) {
9668 id = ipw_add_station(priv, hdr->addr1);
9669 if (id == IPW_INVALID_STATION) {
9670 IPW_WARNING("Attempt to send data to "
9671 "invalid cell: " MAC_FMT "\n",
9672 MAC_ARG(hdr->addr1));
9673 goto drop;
9676 break;
9678 case IW_MODE_INFRA:
9679 default:
9680 unicast = !is_multicast_ether_addr(hdr->addr3);
9681 hdr_len = IEEE80211_3ADDR_LEN;
9682 id = 0;
9683 break;
9686 tfd = &txq->bd[q->first_empty];
9687 txq->txb[q->first_empty] = txb;
9688 memset(tfd, 0, sizeof(*tfd));
9689 tfd->u.data.station_number = id;
9691 tfd->control_flags.message_type = TX_FRAME_TYPE;
9692 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
9694 tfd->u.data.cmd_id = DINO_CMD_TX;
9695 tfd->u.data.len = cpu_to_le16(txb->payload_size);
9696 remaining_bytes = txb->payload_size;
9698 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
9699 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
9700 else
9701 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
9703 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
9704 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
9706 fc = le16_to_cpu(hdr->frame_ctl);
9707 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
9709 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
9711 if (likely(unicast))
9712 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
9714 if (txb->encrypted && !priv->ieee->host_encrypt) {
9715 switch (priv->ieee->sec.level) {
9716 case SEC_LEVEL_3:
9717 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9718 IEEE80211_FCTL_PROTECTED;
9719 /* XXX: ACK flag must be set for CCMP even if it
9720 * is a multicast/broadcast packet, because CCMP
9721 * group communication encrypted by GTK is
9722 * actually done by the AP. */
9723 if (!unicast)
9724 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
9726 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
9727 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
9728 tfd->u.data.key_index = 0;
9729 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
9730 break;
9731 case SEC_LEVEL_2:
9732 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9733 IEEE80211_FCTL_PROTECTED;
9734 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
9735 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
9736 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
9737 break;
9738 case SEC_LEVEL_1:
9739 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9740 IEEE80211_FCTL_PROTECTED;
9741 tfd->u.data.key_index = priv->ieee->tx_keyidx;
9742 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
9744 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
9745 else
9746 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
9747 break;
9748 case SEC_LEVEL_0:
9749 break;
9750 default:
9751 printk(KERN_ERR "Unknow security level %d\n",
9752 priv->ieee->sec.level);
9753 break;
9755 } else
9756 /* No hardware encryption */
9757 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
9759 #ifdef CONFIG_IPW_QOS
9760 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data), unicast);
9761 #endif /* CONFIG_IPW_QOS */
9763 /* payload */
9764 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
9765 txb->nr_frags));
9766 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
9767 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
9768 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
9769 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
9770 i, le32_to_cpu(tfd->u.data.num_chunks),
9771 txb->fragments[i]->len - hdr_len);
9772 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
9773 i, tfd->u.data.num_chunks,
9774 txb->fragments[i]->len - hdr_len);
9775 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
9776 txb->fragments[i]->len - hdr_len);
9778 tfd->u.data.chunk_ptr[i] =
9779 cpu_to_le32(pci_map_single
9780 (priv->pci_dev,
9781 txb->fragments[i]->data + hdr_len,
9782 txb->fragments[i]->len - hdr_len,
9783 PCI_DMA_TODEVICE));
9784 tfd->u.data.chunk_len[i] =
9785 cpu_to_le16(txb->fragments[i]->len - hdr_len);
9788 if (i != txb->nr_frags) {
9789 struct sk_buff *skb;
9790 u16 remaining_bytes = 0;
9791 int j;
9793 for (j = i; j < txb->nr_frags; j++)
9794 remaining_bytes += txb->fragments[j]->len - hdr_len;
9796 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
9797 remaining_bytes);
9798 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
9799 if (skb != NULL) {
9800 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
9801 for (j = i; j < txb->nr_frags; j++) {
9802 int size = txb->fragments[j]->len - hdr_len;
9804 printk(KERN_INFO "Adding frag %d %d...\n",
9805 j, size);
9806 memcpy(skb_put(skb, size),
9807 txb->fragments[j]->data + hdr_len, size);
9809 dev_kfree_skb_any(txb->fragments[i]);
9810 txb->fragments[i] = skb;
9811 tfd->u.data.chunk_ptr[i] =
9812 cpu_to_le32(pci_map_single
9813 (priv->pci_dev, skb->data,
9814 tfd->u.data.chunk_len[i],
9815 PCI_DMA_TODEVICE));
9817 tfd->u.data.num_chunks =
9818 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
9823 /* kick DMA */
9824 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
9825 ipw_write32(priv, q->reg_w, q->first_empty);
9827 return NETDEV_TX_OK;
9829 drop:
9830 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
9831 ieee80211_txb_free(txb);
9832 return NETDEV_TX_OK;
9835 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
9837 struct ipw_priv *priv = ieee80211_priv(dev);
9838 #ifdef CONFIG_IPW_QOS
9839 int tx_id = ipw_get_tx_queue_number(priv, pri);
9840 struct clx2_tx_queue *txq = &priv->txq[tx_id];
9841 #else
9842 struct clx2_tx_queue *txq = &priv->txq[0];
9843 #endif /* CONFIG_IPW_QOS */
9845 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
9846 return 1;
9848 return 0;
9851 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
9852 struct net_device *dev, int pri)
9854 struct ipw_priv *priv = ieee80211_priv(dev);
9855 unsigned long flags;
9856 int ret;
9858 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
9859 spin_lock_irqsave(&priv->lock, flags);
9861 if (!(priv->status & STATUS_ASSOCIATED)) {
9862 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
9863 priv->ieee->stats.tx_carrier_errors++;
9864 netif_stop_queue(dev);
9865 goto fail_unlock;
9868 ret = ipw_tx_skb(priv, txb, pri);
9869 if (ret == NETDEV_TX_OK)
9870 __ipw_led_activity_on(priv);
9871 spin_unlock_irqrestore(&priv->lock, flags);
9873 return ret;
9875 fail_unlock:
9876 spin_unlock_irqrestore(&priv->lock, flags);
9877 return 1;
9880 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
9882 struct ipw_priv *priv = ieee80211_priv(dev);
9884 priv->ieee->stats.tx_packets = priv->tx_packets;
9885 priv->ieee->stats.rx_packets = priv->rx_packets;
9886 return &priv->ieee->stats;
9889 static void ipw_net_set_multicast_list(struct net_device *dev)
9894 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
9896 struct ipw_priv *priv = ieee80211_priv(dev);
9897 struct sockaddr *addr = p;
9898 if (!is_valid_ether_addr(addr->sa_data))
9899 return -EADDRNOTAVAIL;
9900 mutex_lock(&priv->mutex);
9901 priv->config |= CFG_CUSTOM_MAC;
9902 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
9903 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
9904 priv->net_dev->name, MAC_ARG(priv->mac_addr));
9905 queue_work(priv->workqueue, &priv->adapter_restart);
9906 mutex_unlock(&priv->mutex);
9907 return 0;
9910 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
9911 struct ethtool_drvinfo *info)
9913 struct ipw_priv *p = ieee80211_priv(dev);
9914 char vers[64];
9915 char date[32];
9916 u32 len;
9918 strcpy(info->driver, DRV_NAME);
9919 strcpy(info->version, DRV_VERSION);
9921 len = sizeof(vers);
9922 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
9923 len = sizeof(date);
9924 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
9926 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
9927 vers, date);
9928 strcpy(info->bus_info, pci_name(p->pci_dev));
9929 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
9932 static u32 ipw_ethtool_get_link(struct net_device *dev)
9934 struct ipw_priv *priv = ieee80211_priv(dev);
9935 return (priv->status & STATUS_ASSOCIATED) != 0;
9938 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
9940 return IPW_EEPROM_IMAGE_SIZE;
9943 static int ipw_ethtool_get_eeprom(struct net_device *dev,
9944 struct ethtool_eeprom *eeprom, u8 * bytes)
9946 struct ipw_priv *p = ieee80211_priv(dev);
9948 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
9949 return -EINVAL;
9950 mutex_lock(&p->mutex);
9951 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
9952 mutex_unlock(&p->mutex);
9953 return 0;
9956 static int ipw_ethtool_set_eeprom(struct net_device *dev,
9957 struct ethtool_eeprom *eeprom, u8 * bytes)
9959 struct ipw_priv *p = ieee80211_priv(dev);
9960 int i;
9962 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
9963 return -EINVAL;
9964 mutex_lock(&p->mutex);
9965 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
9966 for (i = IPW_EEPROM_DATA;
9967 i < IPW_EEPROM_DATA + IPW_EEPROM_IMAGE_SIZE; i++)
9968 ipw_write8(p, i, p->eeprom[i]);
9969 mutex_unlock(&p->mutex);
9970 return 0;
9973 static struct ethtool_ops ipw_ethtool_ops = {
9974 .get_link = ipw_ethtool_get_link,
9975 .get_drvinfo = ipw_ethtool_get_drvinfo,
9976 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
9977 .get_eeprom = ipw_ethtool_get_eeprom,
9978 .set_eeprom = ipw_ethtool_set_eeprom,
9981 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
9983 struct ipw_priv *priv = data;
9984 u32 inta, inta_mask;
9986 if (!priv)
9987 return IRQ_NONE;
9989 spin_lock(&priv->lock);
9991 if (!(priv->status & STATUS_INT_ENABLED)) {
9992 /* Shared IRQ */
9993 goto none;
9996 inta = ipw_read32(priv, IPW_INTA_RW);
9997 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
9999 if (inta == 0xFFFFFFFF) {
10000 /* Hardware disappeared */
10001 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10002 goto none;
10005 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10006 /* Shared interrupt */
10007 goto none;
10010 /* tell the device to stop sending interrupts */
10011 ipw_disable_interrupts(priv);
10013 /* ack current interrupts */
10014 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10015 ipw_write32(priv, IPW_INTA_RW, inta);
10017 /* Cache INTA value for our tasklet */
10018 priv->isr_inta = inta;
10020 tasklet_schedule(&priv->irq_tasklet);
10022 spin_unlock(&priv->lock);
10024 return IRQ_HANDLED;
10025 none:
10026 spin_unlock(&priv->lock);
10027 return IRQ_NONE;
10030 static void ipw_rf_kill(void *adapter)
10032 struct ipw_priv *priv = adapter;
10033 unsigned long flags;
10035 spin_lock_irqsave(&priv->lock, flags);
10037 if (rf_kill_active(priv)) {
10038 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10039 if (priv->workqueue)
10040 queue_delayed_work(priv->workqueue,
10041 &priv->rf_kill, 2 * HZ);
10042 goto exit_unlock;
10045 /* RF Kill is now disabled, so bring the device back up */
10047 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10048 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10049 "device\n");
10051 /* we can not do an adapter restart while inside an irq lock */
10052 queue_work(priv->workqueue, &priv->adapter_restart);
10053 } else
10054 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10055 "enabled\n");
10057 exit_unlock:
10058 spin_unlock_irqrestore(&priv->lock, flags);
10061 static void ipw_bg_rf_kill(void *data)
10063 struct ipw_priv *priv = data;
10064 mutex_lock(&priv->mutex);
10065 ipw_rf_kill(data);
10066 mutex_unlock(&priv->mutex);
10069 static void ipw_link_up(struct ipw_priv *priv)
10071 priv->last_seq_num = -1;
10072 priv->last_frag_num = -1;
10073 priv->last_packet_time = 0;
10075 netif_carrier_on(priv->net_dev);
10076 if (netif_queue_stopped(priv->net_dev)) {
10077 IPW_DEBUG_NOTIF("waking queue\n");
10078 netif_wake_queue(priv->net_dev);
10079 } else {
10080 IPW_DEBUG_NOTIF("starting queue\n");
10081 netif_start_queue(priv->net_dev);
10084 cancel_delayed_work(&priv->request_scan);
10085 ipw_reset_stats(priv);
10086 /* Ensure the rate is updated immediately */
10087 priv->last_rate = ipw_get_current_rate(priv);
10088 ipw_gather_stats(priv);
10089 ipw_led_link_up(priv);
10090 notify_wx_assoc_event(priv);
10092 if (priv->config & CFG_BACKGROUND_SCAN)
10093 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10096 static void ipw_bg_link_up(void *data)
10098 struct ipw_priv *priv = data;
10099 mutex_lock(&priv->mutex);
10100 ipw_link_up(data);
10101 mutex_unlock(&priv->mutex);
10104 static void ipw_link_down(struct ipw_priv *priv)
10106 ipw_led_link_down(priv);
10107 netif_carrier_off(priv->net_dev);
10108 netif_stop_queue(priv->net_dev);
10109 notify_wx_assoc_event(priv);
10111 /* Cancel any queued work ... */
10112 cancel_delayed_work(&priv->request_scan);
10113 cancel_delayed_work(&priv->adhoc_check);
10114 cancel_delayed_work(&priv->gather_stats);
10116 ipw_reset_stats(priv);
10118 if (!(priv->status & STATUS_EXIT_PENDING)) {
10119 /* Queue up another scan... */
10120 queue_work(priv->workqueue, &priv->request_scan);
10124 static void ipw_bg_link_down(void *data)
10126 struct ipw_priv *priv = data;
10127 mutex_lock(&priv->mutex);
10128 ipw_link_down(data);
10129 mutex_unlock(&priv->mutex);
10132 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10134 int ret = 0;
10136 priv->workqueue = create_workqueue(DRV_NAME);
10137 init_waitqueue_head(&priv->wait_command_queue);
10138 init_waitqueue_head(&priv->wait_state);
10140 INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
10141 INIT_WORK(&priv->associate, ipw_bg_associate, priv);
10142 INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
10143 INIT_WORK(&priv->system_config, ipw_system_config, priv);
10144 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
10145 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
10146 INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
10147 INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
10148 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
10149 INIT_WORK(&priv->request_scan,
10150 (void (*)(void *))ipw_request_scan, priv);
10151 INIT_WORK(&priv->gather_stats,
10152 (void (*)(void *))ipw_bg_gather_stats, priv);
10153 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
10154 INIT_WORK(&priv->roam, ipw_bg_roam, priv);
10155 INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
10156 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
10157 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
10158 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
10159 priv);
10160 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
10161 priv);
10162 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
10163 priv);
10164 INIT_WORK(&priv->merge_networks,
10165 (void (*)(void *))ipw_merge_adhoc_network, priv);
10167 #ifdef CONFIG_IPW_QOS
10168 INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
10169 priv);
10170 #endif /* CONFIG_IPW_QOS */
10172 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10173 ipw_irq_tasklet, (unsigned long)priv);
10175 return ret;
10178 static void shim__set_security(struct net_device *dev,
10179 struct ieee80211_security *sec)
10181 struct ipw_priv *priv = ieee80211_priv(dev);
10182 int i;
10183 for (i = 0; i < 4; i++) {
10184 if (sec->flags & (1 << i)) {
10185 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10186 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10187 if (sec->key_sizes[i] == 0)
10188 priv->ieee->sec.flags &= ~(1 << i);
10189 else {
10190 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10191 sec->key_sizes[i]);
10192 priv->ieee->sec.flags |= (1 << i);
10194 priv->status |= STATUS_SECURITY_UPDATED;
10195 } else if (sec->level != SEC_LEVEL_1)
10196 priv->ieee->sec.flags &= ~(1 << i);
10199 if (sec->flags & SEC_ACTIVE_KEY) {
10200 if (sec->active_key <= 3) {
10201 priv->ieee->sec.active_key = sec->active_key;
10202 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10203 } else
10204 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10205 priv->status |= STATUS_SECURITY_UPDATED;
10206 } else
10207 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10209 if ((sec->flags & SEC_AUTH_MODE) &&
10210 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10211 priv->ieee->sec.auth_mode = sec->auth_mode;
10212 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10213 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10214 priv->capability |= CAP_SHARED_KEY;
10215 else
10216 priv->capability &= ~CAP_SHARED_KEY;
10217 priv->status |= STATUS_SECURITY_UPDATED;
10220 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10221 priv->ieee->sec.flags |= SEC_ENABLED;
10222 priv->ieee->sec.enabled = sec->enabled;
10223 priv->status |= STATUS_SECURITY_UPDATED;
10224 if (sec->enabled)
10225 priv->capability |= CAP_PRIVACY_ON;
10226 else
10227 priv->capability &= ~CAP_PRIVACY_ON;
10230 if (sec->flags & SEC_ENCRYPT)
10231 priv->ieee->sec.encrypt = sec->encrypt;
10233 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10234 priv->ieee->sec.level = sec->level;
10235 priv->ieee->sec.flags |= SEC_LEVEL;
10236 priv->status |= STATUS_SECURITY_UPDATED;
10239 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10240 ipw_set_hwcrypto_keys(priv);
10242 /* To match current functionality of ipw2100 (which works well w/
10243 * various supplicants, we don't force a disassociate if the
10244 * privacy capability changes ... */
10245 #if 0
10246 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10247 (((priv->assoc_request.capability &
10248 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10249 (!(priv->assoc_request.capability &
10250 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10251 IPW_DEBUG_ASSOC("Disassociating due to capability "
10252 "change.\n");
10253 ipw_disassociate(priv);
10255 #endif
10258 static int init_supported_rates(struct ipw_priv *priv,
10259 struct ipw_supported_rates *rates)
10261 /* TODO: Mask out rates based on priv->rates_mask */
10263 memset(rates, 0, sizeof(*rates));
10264 /* configure supported rates */
10265 switch (priv->ieee->freq_band) {
10266 case IEEE80211_52GHZ_BAND:
10267 rates->ieee_mode = IPW_A_MODE;
10268 rates->purpose = IPW_RATE_CAPABILITIES;
10269 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10270 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10271 break;
10273 default: /* Mixed or 2.4Ghz */
10274 rates->ieee_mode = IPW_G_MODE;
10275 rates->purpose = IPW_RATE_CAPABILITIES;
10276 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10277 IEEE80211_CCK_DEFAULT_RATES_MASK);
10278 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10279 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10280 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10282 break;
10285 return 0;
10288 static int ipw_config(struct ipw_priv *priv)
10290 /* This is only called from ipw_up, which resets/reloads the firmware
10291 so, we don't need to first disable the card before we configure
10292 it */
10293 if (ipw_set_tx_power(priv))
10294 goto error;
10296 /* initialize adapter address */
10297 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10298 goto error;
10300 /* set basic system config settings */
10301 init_sys_config(&priv->sys_config);
10303 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10304 * Does not support BT priority yet (don't abort or defer our Tx) */
10305 if (bt_coexist) {
10306 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10308 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10309 priv->sys_config.bt_coexistence
10310 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10311 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10312 priv->sys_config.bt_coexistence
10313 |= CFG_BT_COEXISTENCE_OOB;
10316 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10317 priv->sys_config.answer_broadcast_ssid_probe = 1;
10318 else
10319 priv->sys_config.answer_broadcast_ssid_probe = 0;
10321 if (ipw_send_system_config(priv, &priv->sys_config))
10322 goto error;
10324 init_supported_rates(priv, &priv->rates);
10325 if (ipw_send_supported_rates(priv, &priv->rates))
10326 goto error;
10328 /* Set request-to-send threshold */
10329 if (priv->rts_threshold) {
10330 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10331 goto error;
10333 #ifdef CONFIG_IPW_QOS
10334 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10335 ipw_qos_activate(priv, NULL);
10336 #endif /* CONFIG_IPW_QOS */
10338 if (ipw_set_random_seed(priv))
10339 goto error;
10341 /* final state transition to the RUN state */
10342 if (ipw_send_host_complete(priv))
10343 goto error;
10345 priv->status |= STATUS_INIT;
10347 ipw_led_init(priv);
10348 ipw_led_radio_on(priv);
10349 priv->notif_missed_beacons = 0;
10351 /* Set hardware WEP key if it is configured. */
10352 if ((priv->capability & CAP_PRIVACY_ON) &&
10353 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10354 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10355 ipw_set_hwcrypto_keys(priv);
10357 return 0;
10359 error:
10360 return -EIO;
10364 * NOTE:
10366 * These tables have been tested in conjunction with the
10367 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10369 * Altering this values, using it on other hardware, or in geographies
10370 * not intended for resale of the above mentioned Intel adapters has
10371 * not been tested.
10374 static const struct ieee80211_geo ipw_geos[] = {
10375 { /* Restricted */
10376 "---",
10377 .bg_channels = 11,
10378 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10379 {2427, 4}, {2432, 5}, {2437, 6},
10380 {2442, 7}, {2447, 8}, {2452, 9},
10381 {2457, 10}, {2462, 11}},
10384 { /* Custom US/Canada */
10385 "ZZF",
10386 .bg_channels = 11,
10387 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10388 {2427, 4}, {2432, 5}, {2437, 6},
10389 {2442, 7}, {2447, 8}, {2452, 9},
10390 {2457, 10}, {2462, 11}},
10391 .a_channels = 8,
10392 .a = {{5180, 36},
10393 {5200, 40},
10394 {5220, 44},
10395 {5240, 48},
10396 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10397 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10398 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10399 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10402 { /* Rest of World */
10403 "ZZD",
10404 .bg_channels = 13,
10405 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10406 {2427, 4}, {2432, 5}, {2437, 6},
10407 {2442, 7}, {2447, 8}, {2452, 9},
10408 {2457, 10}, {2462, 11}, {2467, 12},
10409 {2472, 13}},
10412 { /* Custom USA & Europe & High */
10413 "ZZA",
10414 .bg_channels = 11,
10415 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10416 {2427, 4}, {2432, 5}, {2437, 6},
10417 {2442, 7}, {2447, 8}, {2452, 9},
10418 {2457, 10}, {2462, 11}},
10419 .a_channels = 13,
10420 .a = {{5180, 36},
10421 {5200, 40},
10422 {5220, 44},
10423 {5240, 48},
10424 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10425 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10426 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10427 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10428 {5745, 149},
10429 {5765, 153},
10430 {5785, 157},
10431 {5805, 161},
10432 {5825, 165}},
10435 { /* Custom NA & Europe */
10436 "ZZB",
10437 .bg_channels = 11,
10438 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10439 {2427, 4}, {2432, 5}, {2437, 6},
10440 {2442, 7}, {2447, 8}, {2452, 9},
10441 {2457, 10}, {2462, 11}},
10442 .a_channels = 13,
10443 .a = {{5180, 36},
10444 {5200, 40},
10445 {5220, 44},
10446 {5240, 48},
10447 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10448 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10449 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10450 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10451 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10452 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10453 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10454 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10455 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10458 { /* Custom Japan */
10459 "ZZC",
10460 .bg_channels = 11,
10461 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10462 {2427, 4}, {2432, 5}, {2437, 6},
10463 {2442, 7}, {2447, 8}, {2452, 9},
10464 {2457, 10}, {2462, 11}},
10465 .a_channels = 4,
10466 .a = {{5170, 34}, {5190, 38},
10467 {5210, 42}, {5230, 46}},
10470 { /* Custom */
10471 "ZZM",
10472 .bg_channels = 11,
10473 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10474 {2427, 4}, {2432, 5}, {2437, 6},
10475 {2442, 7}, {2447, 8}, {2452, 9},
10476 {2457, 10}, {2462, 11}},
10479 { /* Europe */
10480 "ZZE",
10481 .bg_channels = 13,
10482 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10483 {2427, 4}, {2432, 5}, {2437, 6},
10484 {2442, 7}, {2447, 8}, {2452, 9},
10485 {2457, 10}, {2462, 11}, {2467, 12},
10486 {2472, 13}},
10487 .a_channels = 19,
10488 .a = {{5180, 36},
10489 {5200, 40},
10490 {5220, 44},
10491 {5240, 48},
10492 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10493 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10494 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10495 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10496 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10497 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10498 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10499 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10500 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10501 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10502 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10503 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10504 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10505 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10506 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
10509 { /* Custom Japan */
10510 "ZZJ",
10511 .bg_channels = 14,
10512 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10513 {2427, 4}, {2432, 5}, {2437, 6},
10514 {2442, 7}, {2447, 8}, {2452, 9},
10515 {2457, 10}, {2462, 11}, {2467, 12},
10516 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
10517 .a_channels = 4,
10518 .a = {{5170, 34}, {5190, 38},
10519 {5210, 42}, {5230, 46}},
10522 { /* Rest of World */
10523 "ZZR",
10524 .bg_channels = 14,
10525 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10526 {2427, 4}, {2432, 5}, {2437, 6},
10527 {2442, 7}, {2447, 8}, {2452, 9},
10528 {2457, 10}, {2462, 11}, {2467, 12},
10529 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
10530 IEEE80211_CH_PASSIVE_ONLY}},
10533 { /* High Band */
10534 "ZZH",
10535 .bg_channels = 13,
10536 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10537 {2427, 4}, {2432, 5}, {2437, 6},
10538 {2442, 7}, {2447, 8}, {2452, 9},
10539 {2457, 10}, {2462, 11},
10540 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
10541 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
10542 .a_channels = 4,
10543 .a = {{5745, 149}, {5765, 153},
10544 {5785, 157}, {5805, 161}},
10547 { /* Custom Europe */
10548 "ZZG",
10549 .bg_channels = 13,
10550 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10551 {2427, 4}, {2432, 5}, {2437, 6},
10552 {2442, 7}, {2447, 8}, {2452, 9},
10553 {2457, 10}, {2462, 11},
10554 {2467, 12}, {2472, 13}},
10555 .a_channels = 4,
10556 .a = {{5180, 36}, {5200, 40},
10557 {5220, 44}, {5240, 48}},
10560 { /* Europe */
10561 "ZZK",
10562 .bg_channels = 13,
10563 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10564 {2427, 4}, {2432, 5}, {2437, 6},
10565 {2442, 7}, {2447, 8}, {2452, 9},
10566 {2457, 10}, {2462, 11},
10567 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
10568 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
10569 .a_channels = 24,
10570 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
10571 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
10572 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
10573 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
10574 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10575 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10576 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10577 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10578 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10579 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10580 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10581 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10582 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10583 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10584 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10585 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10586 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10587 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10588 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
10589 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10590 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10591 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10592 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10593 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10596 { /* Europe */
10597 "ZZL",
10598 .bg_channels = 11,
10599 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10600 {2427, 4}, {2432, 5}, {2437, 6},
10601 {2442, 7}, {2447, 8}, {2452, 9},
10602 {2457, 10}, {2462, 11}},
10603 .a_channels = 13,
10604 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
10605 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
10606 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
10607 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
10608 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10609 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10610 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10611 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10612 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10613 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10614 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10615 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10616 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10620 /* GEO code borrowed from ieee80211_geo.c */
10621 static int ipw_is_valid_channel(struct ieee80211_device *ieee, u8 channel)
10623 int i;
10625 /* Driver needs to initialize the geography map before using
10626 * these helper functions */
10627 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10629 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10630 for (i = 0; i < ieee->geo.bg_channels; i++)
10631 /* NOTE: If G mode is currently supported but
10632 * this is a B only channel, we don't see it
10633 * as valid. */
10634 if ((ieee->geo.bg[i].channel == channel) &&
10635 (!(ieee->mode & IEEE_G) ||
10636 !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY)))
10637 return IEEE80211_24GHZ_BAND;
10639 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10640 for (i = 0; i < ieee->geo.a_channels; i++)
10641 if (ieee->geo.a[i].channel == channel)
10642 return IEEE80211_52GHZ_BAND;
10644 return 0;
10647 static int ipw_channel_to_index(struct ieee80211_device *ieee, u8 channel)
10649 int i;
10651 /* Driver needs to initialize the geography map before using
10652 * these helper functions */
10653 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10655 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10656 for (i = 0; i < ieee->geo.bg_channels; i++)
10657 if (ieee->geo.bg[i].channel == channel)
10658 return i;
10660 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10661 for (i = 0; i < ieee->geo.a_channels; i++)
10662 if (ieee->geo.a[i].channel == channel)
10663 return i;
10665 return -1;
10668 static u8 ipw_freq_to_channel(struct ieee80211_device *ieee, u32 freq)
10670 int i;
10672 /* Driver needs to initialize the geography map before using
10673 * these helper functions */
10674 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10676 freq /= 100000;
10678 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10679 for (i = 0; i < ieee->geo.bg_channels; i++)
10680 if (ieee->geo.bg[i].freq == freq)
10681 return ieee->geo.bg[i].channel;
10683 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10684 for (i = 0; i < ieee->geo.a_channels; i++)
10685 if (ieee->geo.a[i].freq == freq)
10686 return ieee->geo.a[i].channel;
10688 return 0;
10691 static int ipw_set_geo(struct ieee80211_device *ieee,
10692 const struct ieee80211_geo *geo)
10694 memcpy(ieee->geo.name, geo->name, 3);
10695 ieee->geo.name[3] = '\0';
10696 ieee->geo.bg_channels = geo->bg_channels;
10697 ieee->geo.a_channels = geo->a_channels;
10698 memcpy(ieee->geo.bg, geo->bg, geo->bg_channels *
10699 sizeof(struct ieee80211_channel));
10700 memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels *
10701 sizeof(struct ieee80211_channel));
10702 return 0;
10705 static const struct ieee80211_geo *ipw_get_geo(struct ieee80211_device *ieee)
10707 return &ieee->geo;
10710 #define MAX_HW_RESTARTS 5
10711 static int ipw_up(struct ipw_priv *priv)
10713 int rc, i, j;
10715 if (priv->status & STATUS_EXIT_PENDING)
10716 return -EIO;
10718 if (cmdlog && !priv->cmdlog) {
10719 priv->cmdlog = kmalloc(sizeof(*priv->cmdlog) * cmdlog,
10720 GFP_KERNEL);
10721 if (priv->cmdlog == NULL) {
10722 IPW_ERROR("Error allocating %d command log entries.\n",
10723 cmdlog);
10724 } else {
10725 memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
10726 priv->cmdlog_len = cmdlog;
10730 for (i = 0; i < MAX_HW_RESTARTS; i++) {
10731 /* Load the microcode, firmware, and eeprom.
10732 * Also start the clocks. */
10733 rc = ipw_load(priv);
10734 if (rc) {
10735 IPW_ERROR("Unable to load firmware: %d\n", rc);
10736 return rc;
10739 ipw_init_ordinals(priv);
10740 if (!(priv->config & CFG_CUSTOM_MAC))
10741 eeprom_parse_mac(priv, priv->mac_addr);
10742 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
10744 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
10745 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
10746 ipw_geos[j].name, 3))
10747 break;
10749 if (j == ARRAY_SIZE(ipw_geos)) {
10750 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
10751 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
10752 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
10753 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
10754 j = 0;
10756 if (ipw_set_geo(priv->ieee, &ipw_geos[j])) {
10757 IPW_WARNING("Could not set geography.");
10758 return 0;
10761 IPW_DEBUG_INFO("Geography %03d [%s] detected.\n",
10762 j, priv->ieee->geo.name);
10764 if (priv->status & STATUS_RF_KILL_SW) {
10765 IPW_WARNING("Radio disabled by module parameter.\n");
10766 return 0;
10767 } else if (rf_kill_active(priv)) {
10768 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
10769 "Kill switch must be turned off for "
10770 "wireless networking to work.\n");
10771 queue_delayed_work(priv->workqueue, &priv->rf_kill,
10772 2 * HZ);
10773 return 0;
10776 rc = ipw_config(priv);
10777 if (!rc) {
10778 IPW_DEBUG_INFO("Configured device on count %i\n", i);
10780 /* If configure to try and auto-associate, kick
10781 * off a scan. */
10782 queue_work(priv->workqueue, &priv->request_scan);
10784 return 0;
10787 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
10788 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
10789 i, MAX_HW_RESTARTS);
10791 /* We had an error bringing up the hardware, so take it
10792 * all the way back down so we can try again */
10793 ipw_down(priv);
10796 /* tried to restart and config the device for as long as our
10797 * patience could withstand */
10798 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
10800 return -EIO;
10803 static void ipw_bg_up(void *data)
10805 struct ipw_priv *priv = data;
10806 mutex_lock(&priv->mutex);
10807 ipw_up(data);
10808 mutex_unlock(&priv->mutex);
10811 static void ipw_deinit(struct ipw_priv *priv)
10813 int i;
10815 if (priv->status & STATUS_SCANNING) {
10816 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
10817 ipw_abort_scan(priv);
10820 if (priv->status & STATUS_ASSOCIATED) {
10821 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
10822 ipw_disassociate(priv);
10825 ipw_led_shutdown(priv);
10827 /* Wait up to 1s for status to change to not scanning and not
10828 * associated (disassociation can take a while for a ful 802.11
10829 * exchange */
10830 for (i = 1000; i && (priv->status &
10831 (STATUS_DISASSOCIATING |
10832 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
10833 udelay(10);
10835 if (priv->status & (STATUS_DISASSOCIATING |
10836 STATUS_ASSOCIATED | STATUS_SCANNING))
10837 IPW_DEBUG_INFO("Still associated or scanning...\n");
10838 else
10839 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
10841 /* Attempt to disable the card */
10842 ipw_send_card_disable(priv, 0);
10844 priv->status &= ~STATUS_INIT;
10847 static void ipw_down(struct ipw_priv *priv)
10849 int exit_pending = priv->status & STATUS_EXIT_PENDING;
10851 priv->status |= STATUS_EXIT_PENDING;
10853 if (ipw_is_init(priv))
10854 ipw_deinit(priv);
10856 /* Wipe out the EXIT_PENDING status bit if we are not actually
10857 * exiting the module */
10858 if (!exit_pending)
10859 priv->status &= ~STATUS_EXIT_PENDING;
10861 /* tell the device to stop sending interrupts */
10862 ipw_disable_interrupts(priv);
10864 /* Clear all bits but the RF Kill */
10865 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
10866 netif_carrier_off(priv->net_dev);
10867 netif_stop_queue(priv->net_dev);
10869 ipw_stop_nic(priv);
10871 ipw_led_radio_off(priv);
10874 static void ipw_bg_down(void *data)
10876 struct ipw_priv *priv = data;
10877 mutex_lock(&priv->mutex);
10878 ipw_down(data);
10879 mutex_unlock(&priv->mutex);
10882 /* Called by register_netdev() */
10883 static int ipw_net_init(struct net_device *dev)
10885 struct ipw_priv *priv = ieee80211_priv(dev);
10886 mutex_lock(&priv->mutex);
10888 if (ipw_up(priv)) {
10889 mutex_unlock(&priv->mutex);
10890 return -EIO;
10893 mutex_unlock(&priv->mutex);
10894 return 0;
10897 /* PCI driver stuff */
10898 static struct pci_device_id card_ids[] = {
10899 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
10900 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
10901 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
10902 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
10903 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
10904 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
10905 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
10906 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
10907 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
10908 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
10909 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
10910 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
10911 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
10912 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
10913 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
10914 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
10915 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
10916 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
10917 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
10918 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
10919 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
10920 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
10922 /* required last entry */
10923 {0,}
10926 MODULE_DEVICE_TABLE(pci, card_ids);
10928 static struct attribute *ipw_sysfs_entries[] = {
10929 &dev_attr_rf_kill.attr,
10930 &dev_attr_direct_dword.attr,
10931 &dev_attr_indirect_byte.attr,
10932 &dev_attr_indirect_dword.attr,
10933 &dev_attr_mem_gpio_reg.attr,
10934 &dev_attr_command_event_reg.attr,
10935 &dev_attr_nic_type.attr,
10936 &dev_attr_status.attr,
10937 &dev_attr_cfg.attr,
10938 &dev_attr_error.attr,
10939 &dev_attr_event_log.attr,
10940 &dev_attr_cmd_log.attr,
10941 &dev_attr_eeprom_delay.attr,
10942 &dev_attr_ucode_version.attr,
10943 &dev_attr_rtc.attr,
10944 &dev_attr_scan_age.attr,
10945 &dev_attr_led.attr,
10946 &dev_attr_speed_scan.attr,
10947 &dev_attr_net_stats.attr,
10948 NULL
10951 static struct attribute_group ipw_attribute_group = {
10952 .name = NULL, /* put in device directory */
10953 .attrs = ipw_sysfs_entries,
10956 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10958 int err = 0;
10959 struct net_device *net_dev;
10960 void __iomem *base;
10961 u32 length, val;
10962 struct ipw_priv *priv;
10963 int i;
10965 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
10966 if (net_dev == NULL) {
10967 err = -ENOMEM;
10968 goto out;
10971 priv = ieee80211_priv(net_dev);
10972 priv->ieee = netdev_priv(net_dev);
10974 priv->net_dev = net_dev;
10975 priv->pci_dev = pdev;
10976 #ifdef CONFIG_IPW2200_DEBUG
10977 ipw_debug_level = debug;
10978 #endif
10979 spin_lock_init(&priv->lock);
10980 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
10981 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
10983 mutex_init(&priv->mutex);
10984 if (pci_enable_device(pdev)) {
10985 err = -ENODEV;
10986 goto out_free_ieee80211;
10989 pci_set_master(pdev);
10991 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10992 if (!err)
10993 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
10994 if (err) {
10995 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
10996 goto out_pci_disable_device;
10999 pci_set_drvdata(pdev, priv);
11001 err = pci_request_regions(pdev, DRV_NAME);
11002 if (err)
11003 goto out_pci_disable_device;
11005 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11006 * PCI Tx retries from interfering with C3 CPU state */
11007 pci_read_config_dword(pdev, 0x40, &val);
11008 if ((val & 0x0000ff00) != 0)
11009 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11011 length = pci_resource_len(pdev, 0);
11012 priv->hw_len = length;
11014 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
11015 if (!base) {
11016 err = -ENODEV;
11017 goto out_pci_release_regions;
11020 priv->hw_base = base;
11021 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11022 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11024 err = ipw_setup_deferred_work(priv);
11025 if (err) {
11026 IPW_ERROR("Unable to setup deferred work\n");
11027 goto out_iounmap;
11030 ipw_sw_reset(priv, 1);
11032 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
11033 if (err) {
11034 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11035 goto out_destroy_workqueue;
11038 SET_MODULE_OWNER(net_dev);
11039 SET_NETDEV_DEV(net_dev, &pdev->dev);
11041 mutex_lock(&priv->mutex);
11043 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11044 priv->ieee->set_security = shim__set_security;
11045 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11047 #ifdef CONFIG_IPW_QOS
11048 priv->ieee->handle_probe_response = ipw_handle_beacon;
11049 priv->ieee->handle_beacon = ipw_handle_probe_response;
11050 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11051 #endif /* CONFIG_IPW_QOS */
11053 priv->ieee->perfect_rssi = -20;
11054 priv->ieee->worst_rssi = -85;
11056 net_dev->open = ipw_net_open;
11057 net_dev->stop = ipw_net_stop;
11058 net_dev->init = ipw_net_init;
11059 net_dev->get_stats = ipw_net_get_stats;
11060 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11061 net_dev->set_mac_address = ipw_net_set_mac_address;
11062 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11063 net_dev->wireless_data = &priv->wireless_data;
11064 net_dev->wireless_handlers = &ipw_wx_handler_def;
11065 net_dev->ethtool_ops = &ipw_ethtool_ops;
11066 net_dev->irq = pdev->irq;
11067 net_dev->base_addr = (unsigned long)priv->hw_base;
11068 net_dev->mem_start = pci_resource_start(pdev, 0);
11069 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11071 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11072 if (err) {
11073 IPW_ERROR("failed to create sysfs device attributes\n");
11074 mutex_unlock(&priv->mutex);
11075 goto out_release_irq;
11078 mutex_unlock(&priv->mutex);
11079 err = register_netdev(net_dev);
11080 if (err) {
11081 IPW_ERROR("failed to register network device\n");
11082 goto out_remove_sysfs;
11084 return 0;
11086 out_remove_sysfs:
11087 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11088 out_release_irq:
11089 free_irq(pdev->irq, priv);
11090 out_destroy_workqueue:
11091 destroy_workqueue(priv->workqueue);
11092 priv->workqueue = NULL;
11093 out_iounmap:
11094 iounmap(priv->hw_base);
11095 out_pci_release_regions:
11096 pci_release_regions(pdev);
11097 out_pci_disable_device:
11098 pci_disable_device(pdev);
11099 pci_set_drvdata(pdev, NULL);
11100 out_free_ieee80211:
11101 free_ieee80211(priv->net_dev);
11102 out:
11103 return err;
11106 static void ipw_pci_remove(struct pci_dev *pdev)
11108 struct ipw_priv *priv = pci_get_drvdata(pdev);
11109 struct list_head *p, *q;
11110 int i;
11112 if (!priv)
11113 return;
11115 mutex_lock(&priv->mutex);
11117 priv->status |= STATUS_EXIT_PENDING;
11118 ipw_down(priv);
11119 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11121 mutex_unlock(&priv->mutex);
11123 unregister_netdev(priv->net_dev);
11125 if (priv->rxq) {
11126 ipw_rx_queue_free(priv, priv->rxq);
11127 priv->rxq = NULL;
11129 ipw_tx_queue_free(priv);
11131 if (priv->cmdlog) {
11132 kfree(priv->cmdlog);
11133 priv->cmdlog = NULL;
11135 /* ipw_down will ensure that there is no more pending work
11136 * in the workqueue's, so we can safely remove them now. */
11137 cancel_delayed_work(&priv->adhoc_check);
11138 cancel_delayed_work(&priv->gather_stats);
11139 cancel_delayed_work(&priv->request_scan);
11140 cancel_delayed_work(&priv->rf_kill);
11141 cancel_delayed_work(&priv->scan_check);
11142 destroy_workqueue(priv->workqueue);
11143 priv->workqueue = NULL;
11145 /* Free MAC hash list for ADHOC */
11146 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11147 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11148 list_del(p);
11149 kfree(list_entry(p, struct ipw_ibss_seq, list));
11153 if (priv->error) {
11154 ipw_free_error_log(priv->error);
11155 priv->error = NULL;
11158 free_irq(pdev->irq, priv);
11159 iounmap(priv->hw_base);
11160 pci_release_regions(pdev);
11161 pci_disable_device(pdev);
11162 pci_set_drvdata(pdev, NULL);
11163 free_ieee80211(priv->net_dev);
11164 free_firmware();
11167 #ifdef CONFIG_PM
11168 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11170 struct ipw_priv *priv = pci_get_drvdata(pdev);
11171 struct net_device *dev = priv->net_dev;
11173 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11175 /* Take down the device; powers it off, etc. */
11176 ipw_down(priv);
11178 /* Remove the PRESENT state of the device */
11179 netif_device_detach(dev);
11181 pci_save_state(pdev);
11182 pci_disable_device(pdev);
11183 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11185 return 0;
11188 static int ipw_pci_resume(struct pci_dev *pdev)
11190 struct ipw_priv *priv = pci_get_drvdata(pdev);
11191 struct net_device *dev = priv->net_dev;
11192 u32 val;
11194 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11196 pci_set_power_state(pdev, PCI_D0);
11197 pci_enable_device(pdev);
11198 pci_restore_state(pdev);
11201 * Suspend/Resume resets the PCI configuration space, so we have to
11202 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11203 * from interfering with C3 CPU state. pci_restore_state won't help
11204 * here since it only restores the first 64 bytes pci config header.
11206 pci_read_config_dword(pdev, 0x40, &val);
11207 if ((val & 0x0000ff00) != 0)
11208 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11210 /* Set the device back into the PRESENT state; this will also wake
11211 * the queue of needed */
11212 netif_device_attach(dev);
11214 /* Bring the device back up */
11215 queue_work(priv->workqueue, &priv->up);
11217 return 0;
11219 #endif
11221 /* driver initialization stuff */
11222 static struct pci_driver ipw_driver = {
11223 .name = DRV_NAME,
11224 .id_table = card_ids,
11225 .probe = ipw_pci_probe,
11226 .remove = __devexit_p(ipw_pci_remove),
11227 #ifdef CONFIG_PM
11228 .suspend = ipw_pci_suspend,
11229 .resume = ipw_pci_resume,
11230 #endif
11233 static int __init ipw_init(void)
11235 int ret;
11237 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11238 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11240 ret = pci_module_init(&ipw_driver);
11241 if (ret) {
11242 IPW_ERROR("Unable to initialize PCI module\n");
11243 return ret;
11246 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11247 if (ret) {
11248 IPW_ERROR("Unable to create driver sysfs file\n");
11249 pci_unregister_driver(&ipw_driver);
11250 return ret;
11253 return ret;
11256 static void __exit ipw_exit(void)
11258 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11259 pci_unregister_driver(&ipw_driver);
11262 module_param(disable, int, 0444);
11263 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11265 module_param(associate, int, 0444);
11266 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11268 module_param(auto_create, int, 0444);
11269 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11271 module_param(led, int, 0444);
11272 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11274 module_param(debug, int, 0444);
11275 MODULE_PARM_DESC(debug, "debug output mask");
11277 module_param(channel, int, 0444);
11278 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11280 #ifdef CONFIG_IPW_QOS
11281 module_param(qos_enable, int, 0444);
11282 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11284 module_param(qos_burst_enable, int, 0444);
11285 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11287 module_param(qos_no_ack_mask, int, 0444);
11288 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11290 module_param(burst_duration_CCK, int, 0444);
11291 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11293 module_param(burst_duration_OFDM, int, 0444);
11294 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11295 #endif /* CONFIG_IPW_QOS */
11297 #ifdef CONFIG_IPW2200_MONITOR
11298 module_param(mode, int, 0444);
11299 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11300 #else
11301 module_param(mode, int, 0444);
11302 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11303 #endif
11305 module_param(bt_coexist, int, 0444);
11306 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11308 module_param(hwcrypto, int, 0444);
11309 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11311 module_param(cmdlog, int, 0444);
11312 MODULE_PARM_DESC(cmdlog,
11313 "allocate a ring buffer for logging firmware commands");
11315 module_param(roaming, int, 0444);
11316 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11318 module_exit(ipw_exit);
11319 module_init(ipw_init);