Merge branch 'upstream-fixes'
[linux-2.6/libata-dev.git] / drivers / net / wireless / ipw2200.c
blob44024c76d1871a3bb6696227b2def267ac475231
1 /******************************************************************************
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 ******************************************************************************/
33 #include "ipw2200.h"
34 #include <linux/version.h>
36 #define IPW2200_VERSION "git-1.0.10"
37 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
38 #define DRV_COPYRIGHT "Copyright(c) 2003-2005 Intel Corporation"
39 #define DRV_VERSION IPW2200_VERSION
41 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
43 MODULE_DESCRIPTION(DRV_DESCRIPTION);
44 MODULE_VERSION(DRV_VERSION);
45 MODULE_AUTHOR(DRV_COPYRIGHT);
46 MODULE_LICENSE("GPL");
48 static int cmdlog = 0;
49 static int debug = 0;
50 static int channel = 0;
51 static int mode = 0;
53 static u32 ipw_debug_level;
54 static int associate = 1;
55 static int auto_create = 1;
56 static int led = 0;
57 static int disable = 0;
58 static int bt_coexist = 0;
59 static int hwcrypto = 0;
60 static int roaming = 1;
61 static const char ipw_modes[] = {
62 'a', 'b', 'g', '?'
65 #ifdef CONFIG_IPW_QOS
66 static int qos_enable = 0;
67 static int qos_burst_enable = 0;
68 static int qos_no_ack_mask = 0;
69 static int burst_duration_CCK = 0;
70 static int burst_duration_OFDM = 0;
72 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
73 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
74 QOS_TX3_CW_MIN_OFDM},
75 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
76 QOS_TX3_CW_MAX_OFDM},
77 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
78 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
79 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
80 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
83 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
84 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
85 QOS_TX3_CW_MIN_CCK},
86 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
87 QOS_TX3_CW_MAX_CCK},
88 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
89 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
90 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
91 QOS_TX3_TXOP_LIMIT_CCK}
94 static struct ieee80211_qos_parameters def_parameters_OFDM = {
95 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
96 DEF_TX3_CW_MIN_OFDM},
97 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
98 DEF_TX3_CW_MAX_OFDM},
99 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
100 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
101 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
102 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
105 static struct ieee80211_qos_parameters def_parameters_CCK = {
106 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
107 DEF_TX3_CW_MIN_CCK},
108 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
109 DEF_TX3_CW_MAX_CCK},
110 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
111 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
112 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
113 DEF_TX3_TXOP_LIMIT_CCK}
116 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
118 static int from_priority_to_tx_queue[] = {
119 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
120 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
123 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
125 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
126 *qos_param);
127 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
128 *qos_param);
129 #endif /* CONFIG_IPW_QOS */
131 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
132 static void ipw_remove_current_network(struct ipw_priv *priv);
133 static void ipw_rx(struct ipw_priv *priv);
134 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
135 struct clx2_tx_queue *txq, int qindex);
136 static int ipw_queue_reset(struct ipw_priv *priv);
138 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
139 int len, int sync);
141 static void ipw_tx_queue_free(struct ipw_priv *);
143 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
144 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
145 static void ipw_rx_queue_replenish(void *);
146 static int ipw_up(struct ipw_priv *);
147 static void ipw_bg_up(void *);
148 static void ipw_down(struct ipw_priv *);
149 static void ipw_bg_down(void *);
150 static int ipw_config(struct ipw_priv *);
151 static int init_supported_rates(struct ipw_priv *priv,
152 struct ipw_supported_rates *prates);
153 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
154 static void ipw_send_wep_keys(struct ipw_priv *, int);
156 static int ipw_is_valid_channel(struct ieee80211_device *, u8);
157 static int ipw_channel_to_index(struct ieee80211_device *, u8);
158 static u8 ipw_freq_to_channel(struct ieee80211_device *, u32);
159 static int ipw_set_geo(struct ieee80211_device *, const struct ieee80211_geo *);
160 static const struct ieee80211_geo *ipw_get_geo(struct ieee80211_device *);
162 static int snprint_line(char *buf, size_t count,
163 const u8 * data, u32 len, u32 ofs)
165 int out, i, j, l;
166 char c;
168 out = snprintf(buf, count, "%08X", ofs);
170 for (l = 0, i = 0; i < 2; i++) {
171 out += snprintf(buf + out, count - out, " ");
172 for (j = 0; j < 8 && l < len; j++, l++)
173 out += snprintf(buf + out, count - out, "%02X ",
174 data[(i * 8 + j)]);
175 for (; j < 8; j++)
176 out += snprintf(buf + out, count - out, " ");
179 out += snprintf(buf + out, count - out, " ");
180 for (l = 0, i = 0; i < 2; i++) {
181 out += snprintf(buf + out, count - out, " ");
182 for (j = 0; j < 8 && l < len; j++, l++) {
183 c = data[(i * 8 + j)];
184 if (!isascii(c) || !isprint(c))
185 c = '.';
187 out += snprintf(buf + out, count - out, "%c", c);
190 for (; j < 8; j++)
191 out += snprintf(buf + out, count - out, " ");
194 return out;
197 static void printk_buf(int level, const u8 * data, u32 len)
199 char line[81];
200 u32 ofs = 0;
201 if (!(ipw_debug_level & level))
202 return;
204 while (len) {
205 snprint_line(line, sizeof(line), &data[ofs],
206 min(len, 16U), ofs);
207 printk(KERN_DEBUG "%s\n", line);
208 ofs += 16;
209 len -= min(len, 16U);
213 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
215 size_t out = size;
216 u32 ofs = 0;
217 int total = 0;
219 while (size && len) {
220 out = snprint_line(output, size, &data[ofs],
221 min_t(size_t, len, 16U), ofs);
223 ofs += 16;
224 output += out;
225 size -= out;
226 len -= min_t(size_t, len, 16U);
227 total += out;
229 return total;
232 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
233 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
234 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
236 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
237 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
238 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
240 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
241 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
242 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
244 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
245 __LINE__, (u32) (b), (u32) (c));
246 _ipw_write_reg8(a, b, c);
249 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
250 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
251 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
253 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
254 __LINE__, (u32) (b), (u32) (c));
255 _ipw_write_reg16(a, b, c);
258 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
259 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
260 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
262 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
263 __LINE__, (u32) (b), (u32) (c));
264 _ipw_write_reg32(a, b, c);
267 /* 8-bit direct write (low 4K) */
268 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
270 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
271 #define ipw_write8(ipw, ofs, val) \
272 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
273 _ipw_write8(ipw, ofs, val)
275 /* 16-bit direct write (low 4K) */
276 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
278 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
279 #define ipw_write16(ipw, ofs, val) \
280 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
281 _ipw_write16(ipw, ofs, val)
283 /* 32-bit direct write (low 4K) */
284 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
286 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
287 #define ipw_write32(ipw, ofs, val) \
288 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
289 _ipw_write32(ipw, ofs, val)
291 /* 8-bit direct read (low 4K) */
292 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
294 /* 8-bit direct read (low 4K), with debug wrapper */
295 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
297 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
298 return _ipw_read8(ipw, ofs);
301 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
302 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
304 /* 16-bit direct read (low 4K) */
305 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
307 /* 16-bit direct read (low 4K), with debug wrapper */
308 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
310 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
311 return _ipw_read16(ipw, ofs);
314 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
315 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
317 /* 32-bit direct read (low 4K) */
318 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
320 /* 32-bit direct read (low 4K), with debug wrapper */
321 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
323 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
324 return _ipw_read32(ipw, ofs);
327 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
328 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
330 /* multi-byte read (above 4K), with debug wrapper */
331 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
332 static inline void __ipw_read_indirect(const char *f, int l,
333 struct ipw_priv *a, u32 b, u8 * c, int d)
335 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
337 _ipw_read_indirect(a, b, c, d);
340 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
341 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
343 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
344 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
345 int num);
346 #define ipw_write_indirect(a, b, c, d) \
347 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
348 _ipw_write_indirect(a, b, c, d)
350 /* 32-bit indirect write (above 4K) */
351 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
353 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
354 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
355 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
358 /* 8-bit indirect write (above 4K) */
359 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
361 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
362 u32 dif_len = reg - aligned_addr;
364 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
365 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
366 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
369 /* 16-bit indirect write (above 4K) */
370 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
372 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
373 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
375 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
376 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
377 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
380 /* 8-bit indirect read (above 4K) */
381 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
383 u32 word;
384 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
385 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
386 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
387 return (word >> ((reg & 0x3) * 8)) & 0xff;
390 /* 32-bit indirect read (above 4K) */
391 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
393 u32 value;
395 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
397 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
398 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
399 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
400 return value;
403 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
404 /* for area above 1st 4K of SRAM/reg space */
405 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
406 int num)
408 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
409 u32 dif_len = addr - aligned_addr;
410 u32 i;
412 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
414 if (num <= 0) {
415 return;
418 /* Read the first dword (or portion) byte by byte */
419 if (unlikely(dif_len)) {
420 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
421 /* Start reading at aligned_addr + dif_len */
422 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
423 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
424 aligned_addr += 4;
427 /* Read all of the middle dwords as dwords, with auto-increment */
428 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
429 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
430 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
432 /* Read the last dword (or portion) byte by byte */
433 if (unlikely(num)) {
434 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
435 for (i = 0; num > 0; i++, num--)
436 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
440 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
441 /* for area above 1st 4K of SRAM/reg space */
442 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
443 int num)
445 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
446 u32 dif_len = addr - aligned_addr;
447 u32 i;
449 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
451 if (num <= 0) {
452 return;
455 /* Write the first dword (or portion) byte by byte */
456 if (unlikely(dif_len)) {
457 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
458 /* Start writing at aligned_addr + dif_len */
459 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
460 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
461 aligned_addr += 4;
464 /* Write all of the middle dwords as dwords, with auto-increment */
465 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
466 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
467 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
469 /* Write the last dword (or portion) byte by byte */
470 if (unlikely(num)) {
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472 for (i = 0; num > 0; i++, num--, buf++)
473 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
477 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
478 /* for 1st 4K of SRAM/regs space */
479 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
480 int num)
482 memcpy_toio((priv->hw_base + addr), buf, num);
485 /* Set bit(s) in low 4K of SRAM/regs */
486 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
488 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
491 /* Clear bit(s) in low 4K of SRAM/regs */
492 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
494 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
497 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
499 if (priv->status & STATUS_INT_ENABLED)
500 return;
501 priv->status |= STATUS_INT_ENABLED;
502 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
505 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
507 if (!(priv->status & STATUS_INT_ENABLED))
508 return;
509 priv->status &= ~STATUS_INT_ENABLED;
510 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
513 #ifdef CONFIG_IPW2200_DEBUG
514 static char *ipw_error_desc(u32 val)
516 switch (val) {
517 case IPW_FW_ERROR_OK:
518 return "ERROR_OK";
519 case IPW_FW_ERROR_FAIL:
520 return "ERROR_FAIL";
521 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
522 return "MEMORY_UNDERFLOW";
523 case IPW_FW_ERROR_MEMORY_OVERFLOW:
524 return "MEMORY_OVERFLOW";
525 case IPW_FW_ERROR_BAD_PARAM:
526 return "BAD_PARAM";
527 case IPW_FW_ERROR_BAD_CHECKSUM:
528 return "BAD_CHECKSUM";
529 case IPW_FW_ERROR_NMI_INTERRUPT:
530 return "NMI_INTERRUPT";
531 case IPW_FW_ERROR_BAD_DATABASE:
532 return "BAD_DATABASE";
533 case IPW_FW_ERROR_ALLOC_FAIL:
534 return "ALLOC_FAIL";
535 case IPW_FW_ERROR_DMA_UNDERRUN:
536 return "DMA_UNDERRUN";
537 case IPW_FW_ERROR_DMA_STATUS:
538 return "DMA_STATUS";
539 case IPW_FW_ERROR_DINO_ERROR:
540 return "DINO_ERROR";
541 case IPW_FW_ERROR_EEPROM_ERROR:
542 return "EEPROM_ERROR";
543 case IPW_FW_ERROR_SYSASSERT:
544 return "SYSASSERT";
545 case IPW_FW_ERROR_FATAL_ERROR:
546 return "FATAL_ERROR";
547 default:
548 return "UNKNOWN_ERROR";
552 static void ipw_dump_error_log(struct ipw_priv *priv,
553 struct ipw_fw_error *error)
555 u32 i;
557 if (!error) {
558 IPW_ERROR("Error allocating and capturing error log. "
559 "Nothing to dump.\n");
560 return;
563 IPW_ERROR("Start IPW Error Log Dump:\n");
564 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
565 error->status, error->config);
567 for (i = 0; i < error->elem_len; i++)
568 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
569 ipw_error_desc(error->elem[i].desc),
570 error->elem[i].time,
571 error->elem[i].blink1,
572 error->elem[i].blink2,
573 error->elem[i].link1,
574 error->elem[i].link2, error->elem[i].data);
575 for (i = 0; i < error->log_len; i++)
576 IPW_ERROR("%i\t0x%08x\t%i\n",
577 error->log[i].time,
578 error->log[i].data, error->log[i].event);
580 #endif
582 static inline int ipw_is_init(struct ipw_priv *priv)
584 return (priv->status & STATUS_INIT) ? 1 : 0;
587 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
589 u32 addr, field_info, field_len, field_count, total_len;
591 IPW_DEBUG_ORD("ordinal = %i\n", ord);
593 if (!priv || !val || !len) {
594 IPW_DEBUG_ORD("Invalid argument\n");
595 return -EINVAL;
598 /* verify device ordinal tables have been initialized */
599 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
600 IPW_DEBUG_ORD("Access ordinals before initialization\n");
601 return -EINVAL;
604 switch (IPW_ORD_TABLE_ID_MASK & ord) {
605 case IPW_ORD_TABLE_0_MASK:
607 * TABLE 0: Direct access to a table of 32 bit values
609 * This is a very simple table with the data directly
610 * read from the table
613 /* remove the table id from the ordinal */
614 ord &= IPW_ORD_TABLE_VALUE_MASK;
616 /* boundary check */
617 if (ord > priv->table0_len) {
618 IPW_DEBUG_ORD("ordinal value (%i) longer then "
619 "max (%i)\n", ord, priv->table0_len);
620 return -EINVAL;
623 /* verify we have enough room to store the value */
624 if (*len < sizeof(u32)) {
625 IPW_DEBUG_ORD("ordinal buffer length too small, "
626 "need %zd\n", sizeof(u32));
627 return -EINVAL;
630 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
631 ord, priv->table0_addr + (ord << 2));
633 *len = sizeof(u32);
634 ord <<= 2;
635 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
636 break;
638 case IPW_ORD_TABLE_1_MASK:
640 * TABLE 1: Indirect access to a table of 32 bit values
642 * This is a fairly large table of u32 values each
643 * representing starting addr for the data (which is
644 * also a u32)
647 /* remove the table id from the ordinal */
648 ord &= IPW_ORD_TABLE_VALUE_MASK;
650 /* boundary check */
651 if (ord > priv->table1_len) {
652 IPW_DEBUG_ORD("ordinal value too long\n");
653 return -EINVAL;
656 /* verify we have enough room to store the value */
657 if (*len < sizeof(u32)) {
658 IPW_DEBUG_ORD("ordinal buffer length too small, "
659 "need %zd\n", sizeof(u32));
660 return -EINVAL;
663 *((u32 *) val) =
664 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
665 *len = sizeof(u32);
666 break;
668 case IPW_ORD_TABLE_2_MASK:
670 * TABLE 2: Indirect access to a table of variable sized values
672 * This table consist of six values, each containing
673 * - dword containing the starting offset of the data
674 * - dword containing the lengh in the first 16bits
675 * and the count in the second 16bits
678 /* remove the table id from the ordinal */
679 ord &= IPW_ORD_TABLE_VALUE_MASK;
681 /* boundary check */
682 if (ord > priv->table2_len) {
683 IPW_DEBUG_ORD("ordinal value too long\n");
684 return -EINVAL;
687 /* get the address of statistic */
688 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
690 /* get the second DW of statistics ;
691 * two 16-bit words - first is length, second is count */
692 field_info =
693 ipw_read_reg32(priv,
694 priv->table2_addr + (ord << 3) +
695 sizeof(u32));
697 /* get each entry length */
698 field_len = *((u16 *) & field_info);
700 /* get number of entries */
701 field_count = *(((u16 *) & field_info) + 1);
703 /* abort if not enought memory */
704 total_len = field_len * field_count;
705 if (total_len > *len) {
706 *len = total_len;
707 return -EINVAL;
710 *len = total_len;
711 if (!total_len)
712 return 0;
714 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
715 "field_info = 0x%08x\n",
716 addr, total_len, field_info);
717 ipw_read_indirect(priv, addr, val, total_len);
718 break;
720 default:
721 IPW_DEBUG_ORD("Invalid ordinal!\n");
722 return -EINVAL;
726 return 0;
729 static void ipw_init_ordinals(struct ipw_priv *priv)
731 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
732 priv->table0_len = ipw_read32(priv, priv->table0_addr);
734 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
735 priv->table0_addr, priv->table0_len);
737 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
738 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
740 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
741 priv->table1_addr, priv->table1_len);
743 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
744 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
745 priv->table2_len &= 0x0000ffff; /* use first two bytes */
747 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
748 priv->table2_addr, priv->table2_len);
752 static u32 ipw_register_toggle(u32 reg)
754 reg &= ~IPW_START_STANDBY;
755 if (reg & IPW_GATE_ODMA)
756 reg &= ~IPW_GATE_ODMA;
757 if (reg & IPW_GATE_IDMA)
758 reg &= ~IPW_GATE_IDMA;
759 if (reg & IPW_GATE_ADMA)
760 reg &= ~IPW_GATE_ADMA;
761 return reg;
765 * LED behavior:
766 * - On radio ON, turn on any LEDs that require to be on during start
767 * - On initialization, start unassociated blink
768 * - On association, disable unassociated blink
769 * - On disassociation, start unassociated blink
770 * - On radio OFF, turn off any LEDs started during radio on
773 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
774 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
775 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
777 static void ipw_led_link_on(struct ipw_priv *priv)
779 unsigned long flags;
780 u32 led;
782 /* If configured to not use LEDs, or nic_type is 1,
783 * then we don't toggle a LINK led */
784 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
785 return;
787 spin_lock_irqsave(&priv->lock, flags);
789 if (!(priv->status & STATUS_RF_KILL_MASK) &&
790 !(priv->status & STATUS_LED_LINK_ON)) {
791 IPW_DEBUG_LED("Link LED On\n");
792 led = ipw_read_reg32(priv, IPW_EVENT_REG);
793 led |= priv->led_association_on;
795 led = ipw_register_toggle(led);
797 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
798 ipw_write_reg32(priv, IPW_EVENT_REG, led);
800 priv->status |= STATUS_LED_LINK_ON;
802 /* If we aren't associated, schedule turning the LED off */
803 if (!(priv->status & STATUS_ASSOCIATED))
804 queue_delayed_work(priv->workqueue,
805 &priv->led_link_off,
806 LD_TIME_LINK_ON);
809 spin_unlock_irqrestore(&priv->lock, flags);
812 static void ipw_bg_led_link_on(void *data)
814 struct ipw_priv *priv = data;
815 mutex_lock(&priv->mutex);
816 ipw_led_link_on(data);
817 mutex_unlock(&priv->mutex);
820 static void ipw_led_link_off(struct ipw_priv *priv)
822 unsigned long flags;
823 u32 led;
825 /* If configured not to use LEDs, or nic type is 1,
826 * then we don't goggle the LINK led. */
827 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
828 return;
830 spin_lock_irqsave(&priv->lock, flags);
832 if (priv->status & STATUS_LED_LINK_ON) {
833 led = ipw_read_reg32(priv, IPW_EVENT_REG);
834 led &= priv->led_association_off;
835 led = ipw_register_toggle(led);
837 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
838 ipw_write_reg32(priv, IPW_EVENT_REG, led);
840 IPW_DEBUG_LED("Link LED Off\n");
842 priv->status &= ~STATUS_LED_LINK_ON;
844 /* If we aren't associated and the radio is on, schedule
845 * turning the LED on (blink while unassociated) */
846 if (!(priv->status & STATUS_RF_KILL_MASK) &&
847 !(priv->status & STATUS_ASSOCIATED))
848 queue_delayed_work(priv->workqueue, &priv->led_link_on,
849 LD_TIME_LINK_OFF);
853 spin_unlock_irqrestore(&priv->lock, flags);
856 static void ipw_bg_led_link_off(void *data)
858 struct ipw_priv *priv = data;
859 mutex_lock(&priv->mutex);
860 ipw_led_link_off(data);
861 mutex_unlock(&priv->mutex);
864 static void __ipw_led_activity_on(struct ipw_priv *priv)
866 u32 led;
868 if (priv->config & CFG_NO_LED)
869 return;
871 if (priv->status & STATUS_RF_KILL_MASK)
872 return;
874 if (!(priv->status & STATUS_LED_ACT_ON)) {
875 led = ipw_read_reg32(priv, IPW_EVENT_REG);
876 led |= priv->led_activity_on;
878 led = ipw_register_toggle(led);
880 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
881 ipw_write_reg32(priv, IPW_EVENT_REG, led);
883 IPW_DEBUG_LED("Activity LED On\n");
885 priv->status |= STATUS_LED_ACT_ON;
887 cancel_delayed_work(&priv->led_act_off);
888 queue_delayed_work(priv->workqueue, &priv->led_act_off,
889 LD_TIME_ACT_ON);
890 } else {
891 /* Reschedule LED off for full time period */
892 cancel_delayed_work(&priv->led_act_off);
893 queue_delayed_work(priv->workqueue, &priv->led_act_off,
894 LD_TIME_ACT_ON);
898 #if 0
899 void ipw_led_activity_on(struct ipw_priv *priv)
901 unsigned long flags;
902 spin_lock_irqsave(&priv->lock, flags);
903 __ipw_led_activity_on(priv);
904 spin_unlock_irqrestore(&priv->lock, flags);
906 #endif /* 0 */
908 static void ipw_led_activity_off(struct ipw_priv *priv)
910 unsigned long flags;
911 u32 led;
913 if (priv->config & CFG_NO_LED)
914 return;
916 spin_lock_irqsave(&priv->lock, flags);
918 if (priv->status & STATUS_LED_ACT_ON) {
919 led = ipw_read_reg32(priv, IPW_EVENT_REG);
920 led &= priv->led_activity_off;
922 led = ipw_register_toggle(led);
924 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
925 ipw_write_reg32(priv, IPW_EVENT_REG, led);
927 IPW_DEBUG_LED("Activity LED Off\n");
929 priv->status &= ~STATUS_LED_ACT_ON;
932 spin_unlock_irqrestore(&priv->lock, flags);
935 static void ipw_bg_led_activity_off(void *data)
937 struct ipw_priv *priv = data;
938 mutex_lock(&priv->mutex);
939 ipw_led_activity_off(data);
940 mutex_unlock(&priv->mutex);
943 static void ipw_led_band_on(struct ipw_priv *priv)
945 unsigned long flags;
946 u32 led;
948 /* Only nic type 1 supports mode LEDs */
949 if (priv->config & CFG_NO_LED ||
950 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
951 return;
953 spin_lock_irqsave(&priv->lock, flags);
955 led = ipw_read_reg32(priv, IPW_EVENT_REG);
956 if (priv->assoc_network->mode == IEEE_A) {
957 led |= priv->led_ofdm_on;
958 led &= priv->led_association_off;
959 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
960 } else if (priv->assoc_network->mode == IEEE_G) {
961 led |= priv->led_ofdm_on;
962 led |= priv->led_association_on;
963 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
964 } else {
965 led &= priv->led_ofdm_off;
966 led |= priv->led_association_on;
967 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
970 led = ipw_register_toggle(led);
972 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
973 ipw_write_reg32(priv, IPW_EVENT_REG, led);
975 spin_unlock_irqrestore(&priv->lock, flags);
978 static void ipw_led_band_off(struct ipw_priv *priv)
980 unsigned long flags;
981 u32 led;
983 /* Only nic type 1 supports mode LEDs */
984 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
985 return;
987 spin_lock_irqsave(&priv->lock, flags);
989 led = ipw_read_reg32(priv, IPW_EVENT_REG);
990 led &= priv->led_ofdm_off;
991 led &= priv->led_association_off;
993 led = ipw_register_toggle(led);
995 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
996 ipw_write_reg32(priv, IPW_EVENT_REG, led);
998 spin_unlock_irqrestore(&priv->lock, flags);
1001 static void ipw_led_radio_on(struct ipw_priv *priv)
1003 ipw_led_link_on(priv);
1006 static void ipw_led_radio_off(struct ipw_priv *priv)
1008 ipw_led_activity_off(priv);
1009 ipw_led_link_off(priv);
1012 static void ipw_led_link_up(struct ipw_priv *priv)
1014 /* Set the Link Led on for all nic types */
1015 ipw_led_link_on(priv);
1018 static void ipw_led_link_down(struct ipw_priv *priv)
1020 ipw_led_activity_off(priv);
1021 ipw_led_link_off(priv);
1023 if (priv->status & STATUS_RF_KILL_MASK)
1024 ipw_led_radio_off(priv);
1027 static void ipw_led_init(struct ipw_priv *priv)
1029 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1031 /* Set the default PINs for the link and activity leds */
1032 priv->led_activity_on = IPW_ACTIVITY_LED;
1033 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1035 priv->led_association_on = IPW_ASSOCIATED_LED;
1036 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1038 /* Set the default PINs for the OFDM leds */
1039 priv->led_ofdm_on = IPW_OFDM_LED;
1040 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1042 switch (priv->nic_type) {
1043 case EEPROM_NIC_TYPE_1:
1044 /* In this NIC type, the LEDs are reversed.... */
1045 priv->led_activity_on = IPW_ASSOCIATED_LED;
1046 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1047 priv->led_association_on = IPW_ACTIVITY_LED;
1048 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1050 if (!(priv->config & CFG_NO_LED))
1051 ipw_led_band_on(priv);
1053 /* And we don't blink link LEDs for this nic, so
1054 * just return here */
1055 return;
1057 case EEPROM_NIC_TYPE_3:
1058 case EEPROM_NIC_TYPE_2:
1059 case EEPROM_NIC_TYPE_4:
1060 case EEPROM_NIC_TYPE_0:
1061 break;
1063 default:
1064 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1065 priv->nic_type);
1066 priv->nic_type = EEPROM_NIC_TYPE_0;
1067 break;
1070 if (!(priv->config & CFG_NO_LED)) {
1071 if (priv->status & STATUS_ASSOCIATED)
1072 ipw_led_link_on(priv);
1073 else
1074 ipw_led_link_off(priv);
1078 static void ipw_led_shutdown(struct ipw_priv *priv)
1080 ipw_led_activity_off(priv);
1081 ipw_led_link_off(priv);
1082 ipw_led_band_off(priv);
1083 cancel_delayed_work(&priv->led_link_on);
1084 cancel_delayed_work(&priv->led_link_off);
1085 cancel_delayed_work(&priv->led_act_off);
1089 * The following adds a new attribute to the sysfs representation
1090 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1091 * used for controling the debug level.
1093 * See the level definitions in ipw for details.
1095 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1097 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1100 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1101 size_t count)
1103 char *p = (char *)buf;
1104 u32 val;
1106 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1107 p++;
1108 if (p[0] == 'x' || p[0] == 'X')
1109 p++;
1110 val = simple_strtoul(p, &p, 16);
1111 } else
1112 val = simple_strtoul(p, &p, 10);
1113 if (p == buf)
1114 printk(KERN_INFO DRV_NAME
1115 ": %s is not in hex or decimal form.\n", buf);
1116 else
1117 ipw_debug_level = val;
1119 return strnlen(buf, count);
1122 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1123 show_debug_level, store_debug_level);
1125 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1127 /* length = 1st dword in log */
1128 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1131 static void ipw_capture_event_log(struct ipw_priv *priv,
1132 u32 log_len, struct ipw_event *log)
1134 u32 base;
1136 if (log_len) {
1137 base = ipw_read32(priv, IPW_EVENT_LOG);
1138 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1139 (u8 *) log, sizeof(*log) * log_len);
1143 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1145 struct ipw_fw_error *error;
1146 u32 log_len = ipw_get_event_log_len(priv);
1147 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1148 u32 elem_len = ipw_read_reg32(priv, base);
1150 error = kmalloc(sizeof(*error) +
1151 sizeof(*error->elem) * elem_len +
1152 sizeof(*error->log) * log_len, GFP_ATOMIC);
1153 if (!error) {
1154 IPW_ERROR("Memory allocation for firmware error log "
1155 "failed.\n");
1156 return NULL;
1158 error->jiffies = jiffies;
1159 error->status = priv->status;
1160 error->config = priv->config;
1161 error->elem_len = elem_len;
1162 error->log_len = log_len;
1163 error->elem = (struct ipw_error_elem *)error->payload;
1164 error->log = (struct ipw_event *)(error->elem + elem_len);
1166 ipw_capture_event_log(priv, log_len, error->log);
1168 if (elem_len)
1169 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1170 sizeof(*error->elem) * elem_len);
1172 return error;
1175 static void ipw_free_error_log(struct ipw_fw_error *error)
1177 if (error)
1178 kfree(error);
1181 static ssize_t show_event_log(struct device *d,
1182 struct device_attribute *attr, char *buf)
1184 struct ipw_priv *priv = dev_get_drvdata(d);
1185 u32 log_len = ipw_get_event_log_len(priv);
1186 struct ipw_event log[log_len];
1187 u32 len = 0, i;
1189 ipw_capture_event_log(priv, log_len, log);
1191 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1192 for (i = 0; i < log_len; i++)
1193 len += snprintf(buf + len, PAGE_SIZE - len,
1194 "\n%08X%08X%08X",
1195 log[i].time, log[i].event, log[i].data);
1196 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1197 return len;
1200 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1202 static ssize_t show_error(struct device *d,
1203 struct device_attribute *attr, char *buf)
1205 struct ipw_priv *priv = dev_get_drvdata(d);
1206 u32 len = 0, i;
1207 if (!priv->error)
1208 return 0;
1209 len += snprintf(buf + len, PAGE_SIZE - len,
1210 "%08lX%08X%08X%08X",
1211 priv->error->jiffies,
1212 priv->error->status,
1213 priv->error->config, priv->error->elem_len);
1214 for (i = 0; i < priv->error->elem_len; i++)
1215 len += snprintf(buf + len, PAGE_SIZE - len,
1216 "\n%08X%08X%08X%08X%08X%08X%08X",
1217 priv->error->elem[i].time,
1218 priv->error->elem[i].desc,
1219 priv->error->elem[i].blink1,
1220 priv->error->elem[i].blink2,
1221 priv->error->elem[i].link1,
1222 priv->error->elem[i].link2,
1223 priv->error->elem[i].data);
1225 len += snprintf(buf + len, PAGE_SIZE - len,
1226 "\n%08X", priv->error->log_len);
1227 for (i = 0; i < priv->error->log_len; i++)
1228 len += snprintf(buf + len, PAGE_SIZE - len,
1229 "\n%08X%08X%08X",
1230 priv->error->log[i].time,
1231 priv->error->log[i].event,
1232 priv->error->log[i].data);
1233 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1234 return len;
1237 static ssize_t clear_error(struct device *d,
1238 struct device_attribute *attr,
1239 const char *buf, size_t count)
1241 struct ipw_priv *priv = dev_get_drvdata(d);
1242 if (priv->error) {
1243 ipw_free_error_log(priv->error);
1244 priv->error = NULL;
1246 return count;
1249 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1251 static ssize_t show_cmd_log(struct device *d,
1252 struct device_attribute *attr, char *buf)
1254 struct ipw_priv *priv = dev_get_drvdata(d);
1255 u32 len = 0, i;
1256 if (!priv->cmdlog)
1257 return 0;
1258 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1259 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1260 i = (i + 1) % priv->cmdlog_len) {
1261 len +=
1262 snprintf(buf + len, PAGE_SIZE - len,
1263 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1264 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1265 priv->cmdlog[i].cmd.len);
1266 len +=
1267 snprintk_buf(buf + len, PAGE_SIZE - len,
1268 (u8 *) priv->cmdlog[i].cmd.param,
1269 priv->cmdlog[i].cmd.len);
1270 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1272 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1273 return len;
1276 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1278 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1279 char *buf)
1281 struct ipw_priv *priv = dev_get_drvdata(d);
1282 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1285 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1286 const char *buf, size_t count)
1288 struct ipw_priv *priv = dev_get_drvdata(d);
1289 #ifdef CONFIG_IPW2200_DEBUG
1290 struct net_device *dev = priv->net_dev;
1291 #endif
1292 char buffer[] = "00000000";
1293 unsigned long len =
1294 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1295 unsigned long val;
1296 char *p = buffer;
1298 IPW_DEBUG_INFO("enter\n");
1300 strncpy(buffer, buf, len);
1301 buffer[len] = 0;
1303 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1304 p++;
1305 if (p[0] == 'x' || p[0] == 'X')
1306 p++;
1307 val = simple_strtoul(p, &p, 16);
1308 } else
1309 val = simple_strtoul(p, &p, 10);
1310 if (p == buffer) {
1311 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1312 } else {
1313 priv->ieee->scan_age = val;
1314 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1317 IPW_DEBUG_INFO("exit\n");
1318 return len;
1321 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1323 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1324 char *buf)
1326 struct ipw_priv *priv = dev_get_drvdata(d);
1327 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1330 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1331 const char *buf, size_t count)
1333 struct ipw_priv *priv = dev_get_drvdata(d);
1335 IPW_DEBUG_INFO("enter\n");
1337 if (count == 0)
1338 return 0;
1340 if (*buf == 0) {
1341 IPW_DEBUG_LED("Disabling LED control.\n");
1342 priv->config |= CFG_NO_LED;
1343 ipw_led_shutdown(priv);
1344 } else {
1345 IPW_DEBUG_LED("Enabling LED control.\n");
1346 priv->config &= ~CFG_NO_LED;
1347 ipw_led_init(priv);
1350 IPW_DEBUG_INFO("exit\n");
1351 return count;
1354 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1356 static ssize_t show_status(struct device *d,
1357 struct device_attribute *attr, char *buf)
1359 struct ipw_priv *p = d->driver_data;
1360 return sprintf(buf, "0x%08x\n", (int)p->status);
1363 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1365 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1366 char *buf)
1368 struct ipw_priv *p = d->driver_data;
1369 return sprintf(buf, "0x%08x\n", (int)p->config);
1372 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1374 static ssize_t show_nic_type(struct device *d,
1375 struct device_attribute *attr, char *buf)
1377 struct ipw_priv *priv = d->driver_data;
1378 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1381 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1383 static ssize_t show_ucode_version(struct device *d,
1384 struct device_attribute *attr, char *buf)
1386 u32 len = sizeof(u32), tmp = 0;
1387 struct ipw_priv *p = d->driver_data;
1389 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1390 return 0;
1392 return sprintf(buf, "0x%08x\n", tmp);
1395 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1397 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1398 char *buf)
1400 u32 len = sizeof(u32), tmp = 0;
1401 struct ipw_priv *p = d->driver_data;
1403 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1404 return 0;
1406 return sprintf(buf, "0x%08x\n", tmp);
1409 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1412 * Add a device attribute to view/control the delay between eeprom
1413 * operations.
1415 static ssize_t show_eeprom_delay(struct device *d,
1416 struct device_attribute *attr, char *buf)
1418 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1419 return sprintf(buf, "%i\n", n);
1421 static ssize_t store_eeprom_delay(struct device *d,
1422 struct device_attribute *attr,
1423 const char *buf, size_t count)
1425 struct ipw_priv *p = d->driver_data;
1426 sscanf(buf, "%i", &p->eeprom_delay);
1427 return strnlen(buf, count);
1430 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1431 show_eeprom_delay, store_eeprom_delay);
1433 static ssize_t show_command_event_reg(struct device *d,
1434 struct device_attribute *attr, char *buf)
1436 u32 reg = 0;
1437 struct ipw_priv *p = d->driver_data;
1439 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1440 return sprintf(buf, "0x%08x\n", reg);
1442 static ssize_t store_command_event_reg(struct device *d,
1443 struct device_attribute *attr,
1444 const char *buf, size_t count)
1446 u32 reg;
1447 struct ipw_priv *p = d->driver_data;
1449 sscanf(buf, "%x", &reg);
1450 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1451 return strnlen(buf, count);
1454 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1455 show_command_event_reg, store_command_event_reg);
1457 static ssize_t show_mem_gpio_reg(struct device *d,
1458 struct device_attribute *attr, char *buf)
1460 u32 reg = 0;
1461 struct ipw_priv *p = d->driver_data;
1463 reg = ipw_read_reg32(p, 0x301100);
1464 return sprintf(buf, "0x%08x\n", reg);
1466 static ssize_t store_mem_gpio_reg(struct device *d,
1467 struct device_attribute *attr,
1468 const char *buf, size_t count)
1470 u32 reg;
1471 struct ipw_priv *p = d->driver_data;
1473 sscanf(buf, "%x", &reg);
1474 ipw_write_reg32(p, 0x301100, reg);
1475 return strnlen(buf, count);
1478 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1479 show_mem_gpio_reg, store_mem_gpio_reg);
1481 static ssize_t show_indirect_dword(struct device *d,
1482 struct device_attribute *attr, char *buf)
1484 u32 reg = 0;
1485 struct ipw_priv *priv = d->driver_data;
1487 if (priv->status & STATUS_INDIRECT_DWORD)
1488 reg = ipw_read_reg32(priv, priv->indirect_dword);
1489 else
1490 reg = 0;
1492 return sprintf(buf, "0x%08x\n", reg);
1494 static ssize_t store_indirect_dword(struct device *d,
1495 struct device_attribute *attr,
1496 const char *buf, size_t count)
1498 struct ipw_priv *priv = d->driver_data;
1500 sscanf(buf, "%x", &priv->indirect_dword);
1501 priv->status |= STATUS_INDIRECT_DWORD;
1502 return strnlen(buf, count);
1505 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1506 show_indirect_dword, store_indirect_dword);
1508 static ssize_t show_indirect_byte(struct device *d,
1509 struct device_attribute *attr, char *buf)
1511 u8 reg = 0;
1512 struct ipw_priv *priv = d->driver_data;
1514 if (priv->status & STATUS_INDIRECT_BYTE)
1515 reg = ipw_read_reg8(priv, priv->indirect_byte);
1516 else
1517 reg = 0;
1519 return sprintf(buf, "0x%02x\n", reg);
1521 static ssize_t store_indirect_byte(struct device *d,
1522 struct device_attribute *attr,
1523 const char *buf, size_t count)
1525 struct ipw_priv *priv = d->driver_data;
1527 sscanf(buf, "%x", &priv->indirect_byte);
1528 priv->status |= STATUS_INDIRECT_BYTE;
1529 return strnlen(buf, count);
1532 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1533 show_indirect_byte, store_indirect_byte);
1535 static ssize_t show_direct_dword(struct device *d,
1536 struct device_attribute *attr, char *buf)
1538 u32 reg = 0;
1539 struct ipw_priv *priv = d->driver_data;
1541 if (priv->status & STATUS_DIRECT_DWORD)
1542 reg = ipw_read32(priv, priv->direct_dword);
1543 else
1544 reg = 0;
1546 return sprintf(buf, "0x%08x\n", reg);
1548 static ssize_t store_direct_dword(struct device *d,
1549 struct device_attribute *attr,
1550 const char *buf, size_t count)
1552 struct ipw_priv *priv = d->driver_data;
1554 sscanf(buf, "%x", &priv->direct_dword);
1555 priv->status |= STATUS_DIRECT_DWORD;
1556 return strnlen(buf, count);
1559 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1560 show_direct_dword, store_direct_dword);
1562 static int rf_kill_active(struct ipw_priv *priv)
1564 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1565 priv->status |= STATUS_RF_KILL_HW;
1566 else
1567 priv->status &= ~STATUS_RF_KILL_HW;
1569 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1572 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1573 char *buf)
1575 /* 0 - RF kill not enabled
1576 1 - SW based RF kill active (sysfs)
1577 2 - HW based RF kill active
1578 3 - Both HW and SW baed RF kill active */
1579 struct ipw_priv *priv = d->driver_data;
1580 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1581 (rf_kill_active(priv) ? 0x2 : 0x0);
1582 return sprintf(buf, "%i\n", val);
1585 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1587 if ((disable_radio ? 1 : 0) ==
1588 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1589 return 0;
1591 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1592 disable_radio ? "OFF" : "ON");
1594 if (disable_radio) {
1595 priv->status |= STATUS_RF_KILL_SW;
1597 if (priv->workqueue)
1598 cancel_delayed_work(&priv->request_scan);
1599 queue_work(priv->workqueue, &priv->down);
1600 } else {
1601 priv->status &= ~STATUS_RF_KILL_SW;
1602 if (rf_kill_active(priv)) {
1603 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1604 "disabled by HW switch\n");
1605 /* Make sure the RF_KILL check timer is running */
1606 cancel_delayed_work(&priv->rf_kill);
1607 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1608 2 * HZ);
1609 } else
1610 queue_work(priv->workqueue, &priv->up);
1613 return 1;
1616 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1617 const char *buf, size_t count)
1619 struct ipw_priv *priv = d->driver_data;
1621 ipw_radio_kill_sw(priv, buf[0] == '1');
1623 return count;
1626 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1628 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1629 char *buf)
1631 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1632 int pos = 0, len = 0;
1633 if (priv->config & CFG_SPEED_SCAN) {
1634 while (priv->speed_scan[pos] != 0)
1635 len += sprintf(&buf[len], "%d ",
1636 priv->speed_scan[pos++]);
1637 return len + sprintf(&buf[len], "\n");
1640 return sprintf(buf, "0\n");
1643 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1644 const char *buf, size_t count)
1646 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1647 int channel, pos = 0;
1648 const char *p = buf;
1650 /* list of space separated channels to scan, optionally ending with 0 */
1651 while ((channel = simple_strtol(p, NULL, 0))) {
1652 if (pos == MAX_SPEED_SCAN - 1) {
1653 priv->speed_scan[pos] = 0;
1654 break;
1657 if (ipw_is_valid_channel(priv->ieee, channel))
1658 priv->speed_scan[pos++] = channel;
1659 else
1660 IPW_WARNING("Skipping invalid channel request: %d\n",
1661 channel);
1662 p = strchr(p, ' ');
1663 if (!p)
1664 break;
1665 while (*p == ' ' || *p == '\t')
1666 p++;
1669 if (pos == 0)
1670 priv->config &= ~CFG_SPEED_SCAN;
1671 else {
1672 priv->speed_scan_pos = 0;
1673 priv->config |= CFG_SPEED_SCAN;
1676 return count;
1679 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1680 store_speed_scan);
1682 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1683 char *buf)
1685 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1686 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1689 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1690 const char *buf, size_t count)
1692 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1693 if (buf[0] == '1')
1694 priv->config |= CFG_NET_STATS;
1695 else
1696 priv->config &= ~CFG_NET_STATS;
1698 return count;
1701 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1702 show_net_stats, store_net_stats);
1704 static void notify_wx_assoc_event(struct ipw_priv *priv)
1706 union iwreq_data wrqu;
1707 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1708 if (priv->status & STATUS_ASSOCIATED)
1709 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1710 else
1711 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1712 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1715 static void ipw_irq_tasklet(struct ipw_priv *priv)
1717 u32 inta, inta_mask, handled = 0;
1718 unsigned long flags;
1719 int rc = 0;
1721 spin_lock_irqsave(&priv->lock, flags);
1723 inta = ipw_read32(priv, IPW_INTA_RW);
1724 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1725 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1727 /* Add any cached INTA values that need to be handled */
1728 inta |= priv->isr_inta;
1730 /* handle all the justifications for the interrupt */
1731 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1732 ipw_rx(priv);
1733 handled |= IPW_INTA_BIT_RX_TRANSFER;
1736 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1737 IPW_DEBUG_HC("Command completed.\n");
1738 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1739 priv->status &= ~STATUS_HCMD_ACTIVE;
1740 wake_up_interruptible(&priv->wait_command_queue);
1741 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1744 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1745 IPW_DEBUG_TX("TX_QUEUE_1\n");
1746 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1747 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1750 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1751 IPW_DEBUG_TX("TX_QUEUE_2\n");
1752 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1753 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1756 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1757 IPW_DEBUG_TX("TX_QUEUE_3\n");
1758 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1759 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1762 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1763 IPW_DEBUG_TX("TX_QUEUE_4\n");
1764 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1765 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1768 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1769 IPW_WARNING("STATUS_CHANGE\n");
1770 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1773 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1774 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1775 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1778 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1779 IPW_WARNING("HOST_CMD_DONE\n");
1780 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1783 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1784 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1785 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1788 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1789 IPW_WARNING("PHY_OFF_DONE\n");
1790 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1793 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1794 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1795 priv->status |= STATUS_RF_KILL_HW;
1796 wake_up_interruptible(&priv->wait_command_queue);
1797 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1798 cancel_delayed_work(&priv->request_scan);
1799 schedule_work(&priv->link_down);
1800 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1801 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1804 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1805 IPW_ERROR("Firmware error detected. Restarting.\n");
1806 if (priv->error) {
1807 IPW_ERROR("Sysfs 'error' log already exists.\n");
1808 #ifdef CONFIG_IPW2200_DEBUG
1809 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1810 struct ipw_fw_error *error =
1811 ipw_alloc_error_log(priv);
1812 ipw_dump_error_log(priv, error);
1813 if (error)
1814 ipw_free_error_log(error);
1816 #endif
1817 } else {
1818 priv->error = ipw_alloc_error_log(priv);
1819 if (priv->error)
1820 IPW_ERROR("Sysfs 'error' log captured.\n");
1821 else
1822 IPW_ERROR("Error allocating sysfs 'error' "
1823 "log.\n");
1824 #ifdef CONFIG_IPW2200_DEBUG
1825 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1826 ipw_dump_error_log(priv, priv->error);
1827 #endif
1830 /* XXX: If hardware encryption is for WPA/WPA2,
1831 * we have to notify the supplicant. */
1832 if (priv->ieee->sec.encrypt) {
1833 priv->status &= ~STATUS_ASSOCIATED;
1834 notify_wx_assoc_event(priv);
1837 /* Keep the restart process from trying to send host
1838 * commands by clearing the INIT status bit */
1839 priv->status &= ~STATUS_INIT;
1841 /* Cancel currently queued command. */
1842 priv->status &= ~STATUS_HCMD_ACTIVE;
1843 wake_up_interruptible(&priv->wait_command_queue);
1845 queue_work(priv->workqueue, &priv->adapter_restart);
1846 handled |= IPW_INTA_BIT_FATAL_ERROR;
1849 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
1850 IPW_ERROR("Parity error\n");
1851 handled |= IPW_INTA_BIT_PARITY_ERROR;
1854 if (handled != inta) {
1855 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1858 /* enable all interrupts */
1859 ipw_enable_interrupts(priv);
1861 spin_unlock_irqrestore(&priv->lock, flags);
1864 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
1865 static char *get_cmd_string(u8 cmd)
1867 switch (cmd) {
1868 IPW_CMD(HOST_COMPLETE);
1869 IPW_CMD(POWER_DOWN);
1870 IPW_CMD(SYSTEM_CONFIG);
1871 IPW_CMD(MULTICAST_ADDRESS);
1872 IPW_CMD(SSID);
1873 IPW_CMD(ADAPTER_ADDRESS);
1874 IPW_CMD(PORT_TYPE);
1875 IPW_CMD(RTS_THRESHOLD);
1876 IPW_CMD(FRAG_THRESHOLD);
1877 IPW_CMD(POWER_MODE);
1878 IPW_CMD(WEP_KEY);
1879 IPW_CMD(TGI_TX_KEY);
1880 IPW_CMD(SCAN_REQUEST);
1881 IPW_CMD(SCAN_REQUEST_EXT);
1882 IPW_CMD(ASSOCIATE);
1883 IPW_CMD(SUPPORTED_RATES);
1884 IPW_CMD(SCAN_ABORT);
1885 IPW_CMD(TX_FLUSH);
1886 IPW_CMD(QOS_PARAMETERS);
1887 IPW_CMD(DINO_CONFIG);
1888 IPW_CMD(RSN_CAPABILITIES);
1889 IPW_CMD(RX_KEY);
1890 IPW_CMD(CARD_DISABLE);
1891 IPW_CMD(SEED_NUMBER);
1892 IPW_CMD(TX_POWER);
1893 IPW_CMD(COUNTRY_INFO);
1894 IPW_CMD(AIRONET_INFO);
1895 IPW_CMD(AP_TX_POWER);
1896 IPW_CMD(CCKM_INFO);
1897 IPW_CMD(CCX_VER_INFO);
1898 IPW_CMD(SET_CALIBRATION);
1899 IPW_CMD(SENSITIVITY_CALIB);
1900 IPW_CMD(RETRY_LIMIT);
1901 IPW_CMD(IPW_PRE_POWER_DOWN);
1902 IPW_CMD(VAP_BEACON_TEMPLATE);
1903 IPW_CMD(VAP_DTIM_PERIOD);
1904 IPW_CMD(EXT_SUPPORTED_RATES);
1905 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
1906 IPW_CMD(VAP_QUIET_INTERVALS);
1907 IPW_CMD(VAP_CHANNEL_SWITCH);
1908 IPW_CMD(VAP_MANDATORY_CHANNELS);
1909 IPW_CMD(VAP_CELL_PWR_LIMIT);
1910 IPW_CMD(VAP_CF_PARAM_SET);
1911 IPW_CMD(VAP_SET_BEACONING_STATE);
1912 IPW_CMD(MEASUREMENT);
1913 IPW_CMD(POWER_CAPABILITY);
1914 IPW_CMD(SUPPORTED_CHANNELS);
1915 IPW_CMD(TPC_REPORT);
1916 IPW_CMD(WME_INFO);
1917 IPW_CMD(PRODUCTION_COMMAND);
1918 default:
1919 return "UNKNOWN";
1923 #define HOST_COMPLETE_TIMEOUT HZ
1925 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1927 int rc = 0;
1928 unsigned long flags;
1930 spin_lock_irqsave(&priv->lock, flags);
1931 if (priv->status & STATUS_HCMD_ACTIVE) {
1932 IPW_ERROR("Failed to send %s: Already sending a command.\n",
1933 get_cmd_string(cmd->cmd));
1934 spin_unlock_irqrestore(&priv->lock, flags);
1935 return -EAGAIN;
1938 priv->status |= STATUS_HCMD_ACTIVE;
1940 if (priv->cmdlog) {
1941 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
1942 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
1943 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
1944 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
1945 cmd->len);
1946 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
1949 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
1950 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
1951 priv->status);
1953 #ifndef DEBUG_CMD_WEP_KEY
1954 if (cmd->cmd == IPW_CMD_WEP_KEY)
1955 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
1956 else
1957 #endif
1958 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
1960 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
1961 if (rc) {
1962 priv->status &= ~STATUS_HCMD_ACTIVE;
1963 IPW_ERROR("Failed to send %s: Reason %d\n",
1964 get_cmd_string(cmd->cmd), rc);
1965 spin_unlock_irqrestore(&priv->lock, flags);
1966 goto exit;
1968 spin_unlock_irqrestore(&priv->lock, flags);
1970 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
1971 !(priv->
1972 status & STATUS_HCMD_ACTIVE),
1973 HOST_COMPLETE_TIMEOUT);
1974 if (rc == 0) {
1975 spin_lock_irqsave(&priv->lock, flags);
1976 if (priv->status & STATUS_HCMD_ACTIVE) {
1977 IPW_ERROR("Failed to send %s: Command timed out.\n",
1978 get_cmd_string(cmd->cmd));
1979 priv->status &= ~STATUS_HCMD_ACTIVE;
1980 spin_unlock_irqrestore(&priv->lock, flags);
1981 rc = -EIO;
1982 goto exit;
1984 spin_unlock_irqrestore(&priv->lock, flags);
1985 } else
1986 rc = 0;
1988 if (priv->status & STATUS_RF_KILL_HW) {
1989 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
1990 get_cmd_string(cmd->cmd));
1991 rc = -EIO;
1992 goto exit;
1995 exit:
1996 if (priv->cmdlog) {
1997 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
1998 priv->cmdlog_pos %= priv->cmdlog_len;
2000 return rc;
2003 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2005 struct host_cmd cmd = {
2006 .cmd = command,
2009 return __ipw_send_cmd(priv, &cmd);
2012 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2013 void *data)
2015 struct host_cmd cmd = {
2016 .cmd = command,
2017 .len = len,
2018 .param = data,
2021 return __ipw_send_cmd(priv, &cmd);
2024 static int ipw_send_host_complete(struct ipw_priv *priv)
2026 if (!priv) {
2027 IPW_ERROR("Invalid args\n");
2028 return -1;
2031 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2034 static int ipw_send_system_config(struct ipw_priv *priv,
2035 struct ipw_sys_config *config)
2037 if (!priv || !config) {
2038 IPW_ERROR("Invalid args\n");
2039 return -1;
2042 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, sizeof(*config),
2043 config);
2046 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2048 if (!priv || !ssid) {
2049 IPW_ERROR("Invalid args\n");
2050 return -1;
2053 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2054 ssid);
2057 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2059 if (!priv || !mac) {
2060 IPW_ERROR("Invalid args\n");
2061 return -1;
2064 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2065 priv->net_dev->name, MAC_ARG(mac));
2067 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2071 * NOTE: This must be executed from our workqueue as it results in udelay
2072 * being called which may corrupt the keyboard if executed on default
2073 * workqueue
2075 static void ipw_adapter_restart(void *adapter)
2077 struct ipw_priv *priv = adapter;
2079 if (priv->status & STATUS_RF_KILL_MASK)
2080 return;
2082 ipw_down(priv);
2084 if (priv->assoc_network &&
2085 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2086 ipw_remove_current_network(priv);
2088 if (ipw_up(priv)) {
2089 IPW_ERROR("Failed to up device\n");
2090 return;
2094 static void ipw_bg_adapter_restart(void *data)
2096 struct ipw_priv *priv = data;
2097 mutex_lock(&priv->mutex);
2098 ipw_adapter_restart(data);
2099 mutex_unlock(&priv->mutex);
2102 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2104 static void ipw_scan_check(void *data)
2106 struct ipw_priv *priv = data;
2107 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2108 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2109 "adapter after (%dms).\n",
2110 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2111 queue_work(priv->workqueue, &priv->adapter_restart);
2115 static void ipw_bg_scan_check(void *data)
2117 struct ipw_priv *priv = data;
2118 mutex_lock(&priv->mutex);
2119 ipw_scan_check(data);
2120 mutex_unlock(&priv->mutex);
2123 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2124 struct ipw_scan_request_ext *request)
2126 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2127 sizeof(*request), request);
2130 static int ipw_send_scan_abort(struct ipw_priv *priv)
2132 if (!priv) {
2133 IPW_ERROR("Invalid args\n");
2134 return -1;
2137 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2140 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2142 struct ipw_sensitivity_calib calib = {
2143 .beacon_rssi_raw = sens,
2146 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2147 &calib);
2150 static int ipw_send_associate(struct ipw_priv *priv,
2151 struct ipw_associate *associate)
2153 struct ipw_associate tmp_associate;
2155 if (!priv || !associate) {
2156 IPW_ERROR("Invalid args\n");
2157 return -1;
2160 memcpy(&tmp_associate, associate, sizeof(*associate));
2161 tmp_associate.policy_support =
2162 cpu_to_le16(tmp_associate.policy_support);
2163 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2164 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2165 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2166 tmp_associate.listen_interval =
2167 cpu_to_le16(tmp_associate.listen_interval);
2168 tmp_associate.beacon_interval =
2169 cpu_to_le16(tmp_associate.beacon_interval);
2170 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2172 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2173 &tmp_associate);
2176 static int ipw_send_supported_rates(struct ipw_priv *priv,
2177 struct ipw_supported_rates *rates)
2179 if (!priv || !rates) {
2180 IPW_ERROR("Invalid args\n");
2181 return -1;
2184 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2185 rates);
2188 static int ipw_set_random_seed(struct ipw_priv *priv)
2190 u32 val;
2192 if (!priv) {
2193 IPW_ERROR("Invalid args\n");
2194 return -1;
2197 get_random_bytes(&val, sizeof(val));
2199 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2202 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2204 if (!priv) {
2205 IPW_ERROR("Invalid args\n");
2206 return -1;
2209 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2210 &phy_off);
2213 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2215 if (!priv || !power) {
2216 IPW_ERROR("Invalid args\n");
2217 return -1;
2220 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2223 static int ipw_set_tx_power(struct ipw_priv *priv)
2225 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
2226 struct ipw_tx_power tx_power;
2227 s8 max_power;
2228 int i;
2230 memset(&tx_power, 0, sizeof(tx_power));
2232 /* configure device for 'G' band */
2233 tx_power.ieee_mode = IPW_G_MODE;
2234 tx_power.num_channels = geo->bg_channels;
2235 for (i = 0; i < geo->bg_channels; i++) {
2236 max_power = geo->bg[i].max_power;
2237 tx_power.channels_tx_power[i].channel_number =
2238 geo->bg[i].channel;
2239 tx_power.channels_tx_power[i].tx_power = max_power ?
2240 min(max_power, priv->tx_power) : priv->tx_power;
2242 if (ipw_send_tx_power(priv, &tx_power))
2243 return -EIO;
2245 /* configure device to also handle 'B' band */
2246 tx_power.ieee_mode = IPW_B_MODE;
2247 if (ipw_send_tx_power(priv, &tx_power))
2248 return -EIO;
2250 /* configure device to also handle 'A' band */
2251 if (priv->ieee->abg_true) {
2252 tx_power.ieee_mode = IPW_A_MODE;
2253 tx_power.num_channels = geo->a_channels;
2254 for (i = 0; i < tx_power.num_channels; i++) {
2255 max_power = geo->a[i].max_power;
2256 tx_power.channels_tx_power[i].channel_number =
2257 geo->a[i].channel;
2258 tx_power.channels_tx_power[i].tx_power = max_power ?
2259 min(max_power, priv->tx_power) : priv->tx_power;
2261 if (ipw_send_tx_power(priv, &tx_power))
2262 return -EIO;
2264 return 0;
2267 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2269 struct ipw_rts_threshold rts_threshold = {
2270 .rts_threshold = rts,
2273 if (!priv) {
2274 IPW_ERROR("Invalid args\n");
2275 return -1;
2278 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2279 sizeof(rts_threshold), &rts_threshold);
2282 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2284 struct ipw_frag_threshold frag_threshold = {
2285 .frag_threshold = frag,
2288 if (!priv) {
2289 IPW_ERROR("Invalid args\n");
2290 return -1;
2293 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2294 sizeof(frag_threshold), &frag_threshold);
2297 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2299 u32 param;
2301 if (!priv) {
2302 IPW_ERROR("Invalid args\n");
2303 return -1;
2306 /* If on battery, set to 3, if AC set to CAM, else user
2307 * level */
2308 switch (mode) {
2309 case IPW_POWER_BATTERY:
2310 param = IPW_POWER_INDEX_3;
2311 break;
2312 case IPW_POWER_AC:
2313 param = IPW_POWER_MODE_CAM;
2314 break;
2315 default:
2316 param = mode;
2317 break;
2320 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2321 &param);
2324 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2326 struct ipw_retry_limit retry_limit = {
2327 .short_retry_limit = slimit,
2328 .long_retry_limit = llimit
2331 if (!priv) {
2332 IPW_ERROR("Invalid args\n");
2333 return -1;
2336 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2337 &retry_limit);
2341 * The IPW device contains a Microwire compatible EEPROM that stores
2342 * various data like the MAC address. Usually the firmware has exclusive
2343 * access to the eeprom, but during device initialization (before the
2344 * device driver has sent the HostComplete command to the firmware) the
2345 * device driver has read access to the EEPROM by way of indirect addressing
2346 * through a couple of memory mapped registers.
2348 * The following is a simplified implementation for pulling data out of the
2349 * the eeprom, along with some helper functions to find information in
2350 * the per device private data's copy of the eeprom.
2352 * NOTE: To better understand how these functions work (i.e what is a chip
2353 * select and why do have to keep driving the eeprom clock?), read
2354 * just about any data sheet for a Microwire compatible EEPROM.
2357 /* write a 32 bit value into the indirect accessor register */
2358 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2360 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2362 /* the eeprom requires some time to complete the operation */
2363 udelay(p->eeprom_delay);
2365 return;
2368 /* perform a chip select operation */
2369 static void eeprom_cs(struct ipw_priv *priv)
2371 eeprom_write_reg(priv, 0);
2372 eeprom_write_reg(priv, EEPROM_BIT_CS);
2373 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2374 eeprom_write_reg(priv, EEPROM_BIT_CS);
2377 /* perform a chip select operation */
2378 static void eeprom_disable_cs(struct ipw_priv *priv)
2380 eeprom_write_reg(priv, EEPROM_BIT_CS);
2381 eeprom_write_reg(priv, 0);
2382 eeprom_write_reg(priv, EEPROM_BIT_SK);
2385 /* push a single bit down to the eeprom */
2386 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2388 int d = (bit ? EEPROM_BIT_DI : 0);
2389 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2390 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2393 /* push an opcode followed by an address down to the eeprom */
2394 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2396 int i;
2398 eeprom_cs(priv);
2399 eeprom_write_bit(priv, 1);
2400 eeprom_write_bit(priv, op & 2);
2401 eeprom_write_bit(priv, op & 1);
2402 for (i = 7; i >= 0; i--) {
2403 eeprom_write_bit(priv, addr & (1 << i));
2407 /* pull 16 bits off the eeprom, one bit at a time */
2408 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2410 int i;
2411 u16 r = 0;
2413 /* Send READ Opcode */
2414 eeprom_op(priv, EEPROM_CMD_READ, addr);
2416 /* Send dummy bit */
2417 eeprom_write_reg(priv, EEPROM_BIT_CS);
2419 /* Read the byte off the eeprom one bit at a time */
2420 for (i = 0; i < 16; i++) {
2421 u32 data = 0;
2422 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2423 eeprom_write_reg(priv, EEPROM_BIT_CS);
2424 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2425 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2428 /* Send another dummy bit */
2429 eeprom_write_reg(priv, 0);
2430 eeprom_disable_cs(priv);
2432 return r;
2435 /* helper function for pulling the mac address out of the private */
2436 /* data's copy of the eeprom data */
2437 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2439 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2443 * Either the device driver (i.e. the host) or the firmware can
2444 * load eeprom data into the designated region in SRAM. If neither
2445 * happens then the FW will shutdown with a fatal error.
2447 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2448 * bit needs region of shared SRAM needs to be non-zero.
2450 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2452 int i;
2453 u16 *eeprom = (u16 *) priv->eeprom;
2455 IPW_DEBUG_TRACE(">>\n");
2457 /* read entire contents of eeprom into private buffer */
2458 for (i = 0; i < 128; i++)
2459 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2462 If the data looks correct, then copy it to our private
2463 copy. Otherwise let the firmware know to perform the operation
2464 on its own.
2466 if (priv->eeprom[EEPROM_VERSION] != 0) {
2467 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2469 /* write the eeprom data to sram */
2470 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2471 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2473 /* Do not load eeprom data on fatal error or suspend */
2474 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2475 } else {
2476 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2478 /* Load eeprom data on fatal error or suspend */
2479 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2482 IPW_DEBUG_TRACE("<<\n");
2485 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2487 count >>= 2;
2488 if (!count)
2489 return;
2490 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2491 while (count--)
2492 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2495 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2497 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2498 CB_NUMBER_OF_ELEMENTS_SMALL *
2499 sizeof(struct command_block));
2502 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2503 { /* start dma engine but no transfers yet */
2505 IPW_DEBUG_FW(">> : \n");
2507 /* Start the dma */
2508 ipw_fw_dma_reset_command_blocks(priv);
2510 /* Write CB base address */
2511 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2513 IPW_DEBUG_FW("<< : \n");
2514 return 0;
2517 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2519 u32 control = 0;
2521 IPW_DEBUG_FW(">> :\n");
2523 //set the Stop and Abort bit
2524 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2525 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2526 priv->sram_desc.last_cb_index = 0;
2528 IPW_DEBUG_FW("<< \n");
2531 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2532 struct command_block *cb)
2534 u32 address =
2535 IPW_SHARED_SRAM_DMA_CONTROL +
2536 (sizeof(struct command_block) * index);
2537 IPW_DEBUG_FW(">> :\n");
2539 ipw_write_indirect(priv, address, (u8 *) cb,
2540 (int)sizeof(struct command_block));
2542 IPW_DEBUG_FW("<< :\n");
2543 return 0;
2547 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2549 u32 control = 0;
2550 u32 index = 0;
2552 IPW_DEBUG_FW(">> :\n");
2554 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2555 ipw_fw_dma_write_command_block(priv, index,
2556 &priv->sram_desc.cb_list[index]);
2558 /* Enable the DMA in the CSR register */
2559 ipw_clear_bit(priv, IPW_RESET_REG,
2560 IPW_RESET_REG_MASTER_DISABLED |
2561 IPW_RESET_REG_STOP_MASTER);
2563 /* Set the Start bit. */
2564 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2565 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2567 IPW_DEBUG_FW("<< :\n");
2568 return 0;
2571 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2573 u32 address;
2574 u32 register_value = 0;
2575 u32 cb_fields_address = 0;
2577 IPW_DEBUG_FW(">> :\n");
2578 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2579 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2581 /* Read the DMA Controlor register */
2582 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2583 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2585 /* Print the CB values */
2586 cb_fields_address = address;
2587 register_value = ipw_read_reg32(priv, cb_fields_address);
2588 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2590 cb_fields_address += sizeof(u32);
2591 register_value = ipw_read_reg32(priv, cb_fields_address);
2592 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2594 cb_fields_address += sizeof(u32);
2595 register_value = ipw_read_reg32(priv, cb_fields_address);
2596 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2597 register_value);
2599 cb_fields_address += sizeof(u32);
2600 register_value = ipw_read_reg32(priv, cb_fields_address);
2601 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2603 IPW_DEBUG_FW(">> :\n");
2606 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2608 u32 current_cb_address = 0;
2609 u32 current_cb_index = 0;
2611 IPW_DEBUG_FW("<< :\n");
2612 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2614 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2615 sizeof(struct command_block);
2617 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2618 current_cb_index, current_cb_address);
2620 IPW_DEBUG_FW(">> :\n");
2621 return current_cb_index;
2625 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2626 u32 src_address,
2627 u32 dest_address,
2628 u32 length,
2629 int interrupt_enabled, int is_last)
2632 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2633 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2634 CB_DEST_SIZE_LONG;
2635 struct command_block *cb;
2636 u32 last_cb_element = 0;
2638 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2639 src_address, dest_address, length);
2641 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2642 return -1;
2644 last_cb_element = priv->sram_desc.last_cb_index;
2645 cb = &priv->sram_desc.cb_list[last_cb_element];
2646 priv->sram_desc.last_cb_index++;
2648 /* Calculate the new CB control word */
2649 if (interrupt_enabled)
2650 control |= CB_INT_ENABLED;
2652 if (is_last)
2653 control |= CB_LAST_VALID;
2655 control |= length;
2657 /* Calculate the CB Element's checksum value */
2658 cb->status = control ^ src_address ^ dest_address;
2660 /* Copy the Source and Destination addresses */
2661 cb->dest_addr = dest_address;
2662 cb->source_addr = src_address;
2664 /* Copy the Control Word last */
2665 cb->control = control;
2667 return 0;
2670 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2671 u32 src_phys, u32 dest_address, u32 length)
2673 u32 bytes_left = length;
2674 u32 src_offset = 0;
2675 u32 dest_offset = 0;
2676 int status = 0;
2677 IPW_DEBUG_FW(">> \n");
2678 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2679 src_phys, dest_address, length);
2680 while (bytes_left > CB_MAX_LENGTH) {
2681 status = ipw_fw_dma_add_command_block(priv,
2682 src_phys + src_offset,
2683 dest_address +
2684 dest_offset,
2685 CB_MAX_LENGTH, 0, 0);
2686 if (status) {
2687 IPW_DEBUG_FW_INFO(": Failed\n");
2688 return -1;
2689 } else
2690 IPW_DEBUG_FW_INFO(": Added new cb\n");
2692 src_offset += CB_MAX_LENGTH;
2693 dest_offset += CB_MAX_LENGTH;
2694 bytes_left -= CB_MAX_LENGTH;
2697 /* add the buffer tail */
2698 if (bytes_left > 0) {
2699 status =
2700 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2701 dest_address + dest_offset,
2702 bytes_left, 0, 0);
2703 if (status) {
2704 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2705 return -1;
2706 } else
2707 IPW_DEBUG_FW_INFO
2708 (": Adding new cb - the buffer tail\n");
2711 IPW_DEBUG_FW("<< \n");
2712 return 0;
2715 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2717 u32 current_index = 0, previous_index;
2718 u32 watchdog = 0;
2720 IPW_DEBUG_FW(">> : \n");
2722 current_index = ipw_fw_dma_command_block_index(priv);
2723 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2724 (int)priv->sram_desc.last_cb_index);
2726 while (current_index < priv->sram_desc.last_cb_index) {
2727 udelay(50);
2728 previous_index = current_index;
2729 current_index = ipw_fw_dma_command_block_index(priv);
2731 if (previous_index < current_index) {
2732 watchdog = 0;
2733 continue;
2735 if (++watchdog > 400) {
2736 IPW_DEBUG_FW_INFO("Timeout\n");
2737 ipw_fw_dma_dump_command_block(priv);
2738 ipw_fw_dma_abort(priv);
2739 return -1;
2743 ipw_fw_dma_abort(priv);
2745 /*Disable the DMA in the CSR register */
2746 ipw_set_bit(priv, IPW_RESET_REG,
2747 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2749 IPW_DEBUG_FW("<< dmaWaitSync \n");
2750 return 0;
2753 static void ipw_remove_current_network(struct ipw_priv *priv)
2755 struct list_head *element, *safe;
2756 struct ieee80211_network *network = NULL;
2757 unsigned long flags;
2759 spin_lock_irqsave(&priv->ieee->lock, flags);
2760 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2761 network = list_entry(element, struct ieee80211_network, list);
2762 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2763 list_del(element);
2764 list_add_tail(&network->list,
2765 &priv->ieee->network_free_list);
2768 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2772 * Check that card is still alive.
2773 * Reads debug register from domain0.
2774 * If card is present, pre-defined value should
2775 * be found there.
2777 * @param priv
2778 * @return 1 if card is present, 0 otherwise
2780 static inline int ipw_alive(struct ipw_priv *priv)
2782 return ipw_read32(priv, 0x90) == 0xd55555d5;
2785 /* timeout in msec, attempted in 10-msec quanta */
2786 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2787 int timeout)
2789 int i = 0;
2791 do {
2792 if ((ipw_read32(priv, addr) & mask) == mask)
2793 return i;
2794 mdelay(10);
2795 i += 10;
2796 } while (i < timeout);
2798 return -ETIME;
2801 /* These functions load the firmware and micro code for the operation of
2802 * the ipw hardware. It assumes the buffer has all the bits for the
2803 * image and the caller is handling the memory allocation and clean up.
2806 static int ipw_stop_master(struct ipw_priv *priv)
2808 int rc;
2810 IPW_DEBUG_TRACE(">> \n");
2811 /* stop master. typical delay - 0 */
2812 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2814 /* timeout is in msec, polled in 10-msec quanta */
2815 rc = ipw_poll_bit(priv, IPW_RESET_REG,
2816 IPW_RESET_REG_MASTER_DISABLED, 100);
2817 if (rc < 0) {
2818 IPW_ERROR("wait for stop master failed after 100ms\n");
2819 return -1;
2822 IPW_DEBUG_INFO("stop master %dms\n", rc);
2824 return rc;
2827 static void ipw_arc_release(struct ipw_priv *priv)
2829 IPW_DEBUG_TRACE(">> \n");
2830 mdelay(5);
2832 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2834 /* no one knows timing, for safety add some delay */
2835 mdelay(5);
2838 struct fw_header {
2839 u32 version;
2840 u32 mode;
2843 struct fw_chunk {
2844 u32 address;
2845 u32 length;
2848 #define IPW_FW_MAJOR_VERSION 2
2849 #define IPW_FW_MINOR_VERSION 4
2851 #define IPW_FW_MINOR(x) ((x & 0xff) >> 8)
2852 #define IPW_FW_MAJOR(x) (x & 0xff)
2854 #define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | IPW_FW_MAJOR_VERSION)
2856 #define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \
2857 "." __stringify(IPW_FW_MINOR_VERSION) "-"
2859 #if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0
2860 #define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw"
2861 #else
2862 #define IPW_FW_NAME(x) "ipw2200_" x ".fw"
2863 #endif
2865 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2867 int rc = 0, i, addr;
2868 u8 cr = 0;
2869 u16 *image;
2871 image = (u16 *) data;
2873 IPW_DEBUG_TRACE(">> \n");
2875 rc = ipw_stop_master(priv);
2877 if (rc < 0)
2878 return rc;
2880 // spin_lock_irqsave(&priv->lock, flags);
2882 for (addr = IPW_SHARED_LOWER_BOUND;
2883 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
2884 ipw_write32(priv, addr, 0);
2887 /* no ucode (yet) */
2888 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
2889 /* destroy DMA queues */
2890 /* reset sequence */
2892 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
2893 ipw_arc_release(priv);
2894 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
2895 mdelay(1);
2897 /* reset PHY */
2898 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
2899 mdelay(1);
2901 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
2902 mdelay(1);
2904 /* enable ucode store */
2905 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
2906 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
2907 mdelay(1);
2909 /* write ucode */
2911 * @bug
2912 * Do NOT set indirect address register once and then
2913 * store data to indirect data register in the loop.
2914 * It seems very reasonable, but in this case DINO do not
2915 * accept ucode. It is essential to set address each time.
2917 /* load new ipw uCode */
2918 for (i = 0; i < len / 2; i++)
2919 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
2920 cpu_to_le16(image[i]));
2922 /* enable DINO */
2923 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
2924 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
2926 /* this is where the igx / win driver deveates from the VAP driver. */
2928 /* wait for alive response */
2929 for (i = 0; i < 100; i++) {
2930 /* poll for incoming data */
2931 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
2932 if (cr & DINO_RXFIFO_DATA)
2933 break;
2934 mdelay(1);
2937 if (cr & DINO_RXFIFO_DATA) {
2938 /* alive_command_responce size is NOT multiple of 4 */
2939 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
2941 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
2942 response_buffer[i] =
2943 le32_to_cpu(ipw_read_reg32(priv,
2944 IPW_BASEBAND_RX_FIFO_READ));
2945 memcpy(&priv->dino_alive, response_buffer,
2946 sizeof(priv->dino_alive));
2947 if (priv->dino_alive.alive_command == 1
2948 && priv->dino_alive.ucode_valid == 1) {
2949 rc = 0;
2950 IPW_DEBUG_INFO
2951 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
2952 "of %02d/%02d/%02d %02d:%02d\n",
2953 priv->dino_alive.software_revision,
2954 priv->dino_alive.software_revision,
2955 priv->dino_alive.device_identifier,
2956 priv->dino_alive.device_identifier,
2957 priv->dino_alive.time_stamp[0],
2958 priv->dino_alive.time_stamp[1],
2959 priv->dino_alive.time_stamp[2],
2960 priv->dino_alive.time_stamp[3],
2961 priv->dino_alive.time_stamp[4]);
2962 } else {
2963 IPW_DEBUG_INFO("Microcode is not alive\n");
2964 rc = -EINVAL;
2966 } else {
2967 IPW_DEBUG_INFO("No alive response from DINO\n");
2968 rc = -ETIME;
2971 /* disable DINO, otherwise for some reason
2972 firmware have problem getting alive resp. */
2973 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
2975 // spin_unlock_irqrestore(&priv->lock, flags);
2977 return rc;
2980 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
2982 int rc = -1;
2983 int offset = 0;
2984 struct fw_chunk *chunk;
2985 dma_addr_t shared_phys;
2986 u8 *shared_virt;
2988 IPW_DEBUG_TRACE("<< : \n");
2989 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
2991 if (!shared_virt)
2992 return -ENOMEM;
2994 memmove(shared_virt, data, len);
2996 /* Start the Dma */
2997 rc = ipw_fw_dma_enable(priv);
2999 if (priv->sram_desc.last_cb_index > 0) {
3000 /* the DMA is already ready this would be a bug. */
3001 BUG();
3002 goto out;
3005 do {
3006 chunk = (struct fw_chunk *)(data + offset);
3007 offset += sizeof(struct fw_chunk);
3008 /* build DMA packet and queue up for sending */
3009 /* dma to chunk->address, the chunk->length bytes from data +
3010 * offeset*/
3011 /* Dma loading */
3012 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3013 le32_to_cpu(chunk->address),
3014 le32_to_cpu(chunk->length));
3015 if (rc) {
3016 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3017 goto out;
3020 offset += le32_to_cpu(chunk->length);
3021 } while (offset < len);
3023 /* Run the DMA and wait for the answer */
3024 rc = ipw_fw_dma_kick(priv);
3025 if (rc) {
3026 IPW_ERROR("dmaKick Failed\n");
3027 goto out;
3030 rc = ipw_fw_dma_wait(priv);
3031 if (rc) {
3032 IPW_ERROR("dmaWaitSync Failed\n");
3033 goto out;
3035 out:
3036 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3037 return rc;
3040 /* stop nic */
3041 static int ipw_stop_nic(struct ipw_priv *priv)
3043 int rc = 0;
3045 /* stop */
3046 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3048 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3049 IPW_RESET_REG_MASTER_DISABLED, 500);
3050 if (rc < 0) {
3051 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3052 return rc;
3055 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3057 return rc;
3060 static void ipw_start_nic(struct ipw_priv *priv)
3062 IPW_DEBUG_TRACE(">>\n");
3064 /* prvHwStartNic release ARC */
3065 ipw_clear_bit(priv, IPW_RESET_REG,
3066 IPW_RESET_REG_MASTER_DISABLED |
3067 IPW_RESET_REG_STOP_MASTER |
3068 CBD_RESET_REG_PRINCETON_RESET);
3070 /* enable power management */
3071 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3072 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3074 IPW_DEBUG_TRACE("<<\n");
3077 static int ipw_init_nic(struct ipw_priv *priv)
3079 int rc;
3081 IPW_DEBUG_TRACE(">>\n");
3082 /* reset */
3083 /*prvHwInitNic */
3084 /* set "initialization complete" bit to move adapter to D0 state */
3085 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3087 /* low-level PLL activation */
3088 ipw_write32(priv, IPW_READ_INT_REGISTER,
3089 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3091 /* wait for clock stabilization */
3092 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3093 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3094 if (rc < 0)
3095 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3097 /* assert SW reset */
3098 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3100 udelay(10);
3102 /* set "initialization complete" bit to move adapter to D0 state */
3103 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3105 IPW_DEBUG_TRACE(">>\n");
3106 return 0;
3109 /* Call this function from process context, it will sleep in request_firmware.
3110 * Probe is an ok place to call this from.
3112 static int ipw_reset_nic(struct ipw_priv *priv)
3114 int rc = 0;
3115 unsigned long flags;
3117 IPW_DEBUG_TRACE(">>\n");
3119 rc = ipw_init_nic(priv);
3121 spin_lock_irqsave(&priv->lock, flags);
3122 /* Clear the 'host command active' bit... */
3123 priv->status &= ~STATUS_HCMD_ACTIVE;
3124 wake_up_interruptible(&priv->wait_command_queue);
3125 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3126 wake_up_interruptible(&priv->wait_state);
3127 spin_unlock_irqrestore(&priv->lock, flags);
3129 IPW_DEBUG_TRACE("<<\n");
3130 return rc;
3133 static int ipw_get_fw(struct ipw_priv *priv,
3134 const struct firmware **fw, const char *name)
3136 struct fw_header *header;
3137 int rc;
3139 /* ask firmware_class module to get the boot firmware off disk */
3140 rc = request_firmware(fw, name, &priv->pci_dev->dev);
3141 if (rc < 0) {
3142 IPW_ERROR("%s load failed: Reason %d\n", name, rc);
3143 return rc;
3146 header = (struct fw_header *)(*fw)->data;
3147 if (IPW_FW_MAJOR(le32_to_cpu(header->version)) != IPW_FW_MAJOR_VERSION) {
3148 IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n",
3149 name,
3150 IPW_FW_MAJOR(le32_to_cpu(header->version)),
3151 IPW_FW_MAJOR_VERSION);
3152 return -EINVAL;
3155 IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n",
3156 name,
3157 IPW_FW_MAJOR(le32_to_cpu(header->version)),
3158 IPW_FW_MINOR(le32_to_cpu(header->version)),
3159 (*fw)->size - sizeof(struct fw_header));
3160 return 0;
3163 #define IPW_RX_BUF_SIZE (3000)
3165 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3166 struct ipw_rx_queue *rxq)
3168 unsigned long flags;
3169 int i;
3171 spin_lock_irqsave(&rxq->lock, flags);
3173 INIT_LIST_HEAD(&rxq->rx_free);
3174 INIT_LIST_HEAD(&rxq->rx_used);
3176 /* Fill the rx_used queue with _all_ of the Rx buffers */
3177 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3178 /* In the reset function, these buffers may have been allocated
3179 * to an SKB, so we need to unmap and free potential storage */
3180 if (rxq->pool[i].skb != NULL) {
3181 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3182 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3183 dev_kfree_skb(rxq->pool[i].skb);
3184 rxq->pool[i].skb = NULL;
3186 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3189 /* Set us so that we have processed and used all buffers, but have
3190 * not restocked the Rx queue with fresh buffers */
3191 rxq->read = rxq->write = 0;
3192 rxq->processed = RX_QUEUE_SIZE - 1;
3193 rxq->free_count = 0;
3194 spin_unlock_irqrestore(&rxq->lock, flags);
3197 #ifdef CONFIG_PM
3198 static int fw_loaded = 0;
3199 static const struct firmware *bootfw = NULL;
3200 static const struct firmware *firmware = NULL;
3201 static const struct firmware *ucode = NULL;
3203 static void free_firmware(void)
3205 if (fw_loaded) {
3206 release_firmware(bootfw);
3207 release_firmware(ucode);
3208 release_firmware(firmware);
3209 bootfw = ucode = firmware = NULL;
3210 fw_loaded = 0;
3213 #else
3214 #define free_firmware() do {} while (0)
3215 #endif
3217 static int ipw_load(struct ipw_priv *priv)
3219 #ifndef CONFIG_PM
3220 const struct firmware *bootfw = NULL;
3221 const struct firmware *firmware = NULL;
3222 const struct firmware *ucode = NULL;
3223 #endif
3224 char *ucode_name;
3225 char *fw_name;
3226 int rc = 0, retries = 3;
3228 switch (priv->ieee->iw_mode) {
3229 case IW_MODE_ADHOC:
3230 ucode_name = IPW_FW_NAME("ibss_ucode");
3231 fw_name = IPW_FW_NAME("ibss");
3232 break;
3233 #ifdef CONFIG_IPW2200_MONITOR
3234 case IW_MODE_MONITOR:
3235 ucode_name = IPW_FW_NAME("sniffer_ucode");
3236 fw_name = IPW_FW_NAME("sniffer");
3237 break;
3238 #endif
3239 case IW_MODE_INFRA:
3240 ucode_name = IPW_FW_NAME("bss_ucode");
3241 fw_name = IPW_FW_NAME("bss");
3242 break;
3243 default:
3244 rc = -EINVAL;
3247 if (rc < 0)
3248 goto error;
3250 if (!priv->rxq)
3251 priv->rxq = ipw_rx_queue_alloc(priv);
3252 else
3253 ipw_rx_queue_reset(priv, priv->rxq);
3254 if (!priv->rxq) {
3255 IPW_ERROR("Unable to initialize Rx queue\n");
3256 goto error;
3259 retry:
3260 /* Ensure interrupts are disabled */
3261 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3262 priv->status &= ~STATUS_INT_ENABLED;
3264 /* ack pending interrupts */
3265 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3267 ipw_stop_nic(priv);
3269 rc = ipw_reset_nic(priv);
3270 if (rc < 0) {
3271 IPW_ERROR("Unable to reset NIC\n");
3272 goto error;
3275 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3276 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3278 #ifdef CONFIG_PM
3279 if (!fw_loaded) {
3280 #endif
3281 rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot"));
3282 if (rc < 0)
3283 goto error;
3284 #ifdef CONFIG_PM
3286 #endif
3287 /* DMA the initial boot firmware into the device */
3288 rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header),
3289 bootfw->size - sizeof(struct fw_header));
3290 if (rc < 0) {
3291 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3292 goto error;
3295 /* kick start the device */
3296 ipw_start_nic(priv);
3298 /* wait for the device to finish its initial startup sequence */
3299 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3300 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3301 if (rc < 0) {
3302 IPW_ERROR("device failed to boot initial fw image\n");
3303 goto error;
3305 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3307 /* ack fw init done interrupt */
3308 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3310 #ifdef CONFIG_PM
3311 if (!fw_loaded) {
3312 #endif
3313 rc = ipw_get_fw(priv, &ucode, ucode_name);
3314 if (rc < 0)
3315 goto error;
3316 #ifdef CONFIG_PM
3318 #endif
3320 /* DMA the ucode into the device */
3321 rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header),
3322 ucode->size - sizeof(struct fw_header));
3323 if (rc < 0) {
3324 IPW_ERROR("Unable to load ucode: %d\n", rc);
3325 goto error;
3328 /* stop nic */
3329 ipw_stop_nic(priv);
3331 #ifdef CONFIG_PM
3332 if (!fw_loaded) {
3333 #endif
3334 rc = ipw_get_fw(priv, &firmware, fw_name);
3335 if (rc < 0)
3336 goto error;
3337 #ifdef CONFIG_PM
3339 #endif
3341 /* DMA bss firmware into the device */
3342 rc = ipw_load_firmware(priv, firmware->data +
3343 sizeof(struct fw_header),
3344 firmware->size - sizeof(struct fw_header));
3345 if (rc < 0) {
3346 IPW_ERROR("Unable to load firmware: %d\n", rc);
3347 goto error;
3349 #ifdef CONFIG_PM
3350 fw_loaded = 1;
3351 #endif
3353 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3355 rc = ipw_queue_reset(priv);
3356 if (rc < 0) {
3357 IPW_ERROR("Unable to initialize queues\n");
3358 goto error;
3361 /* Ensure interrupts are disabled */
3362 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3363 /* ack pending interrupts */
3364 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3366 /* kick start the device */
3367 ipw_start_nic(priv);
3369 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3370 if (retries > 0) {
3371 IPW_WARNING("Parity error. Retrying init.\n");
3372 retries--;
3373 goto retry;
3376 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3377 rc = -EIO;
3378 goto error;
3381 /* wait for the device */
3382 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3383 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3384 if (rc < 0) {
3385 IPW_ERROR("device failed to start within 500ms\n");
3386 goto error;
3388 IPW_DEBUG_INFO("device response after %dms\n", rc);
3390 /* ack fw init done interrupt */
3391 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3393 /* read eeprom data and initialize the eeprom region of sram */
3394 priv->eeprom_delay = 1;
3395 ipw_eeprom_init_sram(priv);
3397 /* enable interrupts */
3398 ipw_enable_interrupts(priv);
3400 /* Ensure our queue has valid packets */
3401 ipw_rx_queue_replenish(priv);
3403 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3405 /* ack pending interrupts */
3406 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3408 #ifndef CONFIG_PM
3409 release_firmware(bootfw);
3410 release_firmware(ucode);
3411 release_firmware(firmware);
3412 #endif
3413 return 0;
3415 error:
3416 if (priv->rxq) {
3417 ipw_rx_queue_free(priv, priv->rxq);
3418 priv->rxq = NULL;
3420 ipw_tx_queue_free(priv);
3421 if (bootfw)
3422 release_firmware(bootfw);
3423 if (ucode)
3424 release_firmware(ucode);
3425 if (firmware)
3426 release_firmware(firmware);
3427 #ifdef CONFIG_PM
3428 fw_loaded = 0;
3429 bootfw = ucode = firmware = NULL;
3430 #endif
3432 return rc;
3436 * DMA services
3438 * Theory of operation
3440 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3441 * 2 empty entries always kept in the buffer to protect from overflow.
3443 * For Tx queue, there are low mark and high mark limits. If, after queuing
3444 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3445 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3446 * Tx queue resumed.
3448 * The IPW operates with six queues, one receive queue in the device's
3449 * sram, one transmit queue for sending commands to the device firmware,
3450 * and four transmit queues for data.
3452 * The four transmit queues allow for performing quality of service (qos)
3453 * transmissions as per the 802.11 protocol. Currently Linux does not
3454 * provide a mechanism to the user for utilizing prioritized queues, so
3455 * we only utilize the first data transmit queue (queue1).
3459 * Driver allocates buffers of this size for Rx
3462 static inline int ipw_queue_space(const struct clx2_queue *q)
3464 int s = q->last_used - q->first_empty;
3465 if (s <= 0)
3466 s += q->n_bd;
3467 s -= 2; /* keep some reserve to not confuse empty and full situations */
3468 if (s < 0)
3469 s = 0;
3470 return s;
3473 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3475 return (++index == n_bd) ? 0 : index;
3479 * Initialize common DMA queue structure
3481 * @param q queue to init
3482 * @param count Number of BD's to allocate. Should be power of 2
3483 * @param read_register Address for 'read' register
3484 * (not offset within BAR, full address)
3485 * @param write_register Address for 'write' register
3486 * (not offset within BAR, full address)
3487 * @param base_register Address for 'base' register
3488 * (not offset within BAR, full address)
3489 * @param size Address for 'size' register
3490 * (not offset within BAR, full address)
3492 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3493 int count, u32 read, u32 write, u32 base, u32 size)
3495 q->n_bd = count;
3497 q->low_mark = q->n_bd / 4;
3498 if (q->low_mark < 4)
3499 q->low_mark = 4;
3501 q->high_mark = q->n_bd / 8;
3502 if (q->high_mark < 2)
3503 q->high_mark = 2;
3505 q->first_empty = q->last_used = 0;
3506 q->reg_r = read;
3507 q->reg_w = write;
3509 ipw_write32(priv, base, q->dma_addr);
3510 ipw_write32(priv, size, count);
3511 ipw_write32(priv, read, 0);
3512 ipw_write32(priv, write, 0);
3514 _ipw_read32(priv, 0x90);
3517 static int ipw_queue_tx_init(struct ipw_priv *priv,
3518 struct clx2_tx_queue *q,
3519 int count, u32 read, u32 write, u32 base, u32 size)
3521 struct pci_dev *dev = priv->pci_dev;
3523 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3524 if (!q->txb) {
3525 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3526 return -ENOMEM;
3529 q->bd =
3530 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3531 if (!q->bd) {
3532 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3533 sizeof(q->bd[0]) * count);
3534 kfree(q->txb);
3535 q->txb = NULL;
3536 return -ENOMEM;
3539 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3540 return 0;
3544 * Free one TFD, those at index [txq->q.last_used].
3545 * Do NOT advance any indexes
3547 * @param dev
3548 * @param txq
3550 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3551 struct clx2_tx_queue *txq)
3553 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3554 struct pci_dev *dev = priv->pci_dev;
3555 int i;
3557 /* classify bd */
3558 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3559 /* nothing to cleanup after for host commands */
3560 return;
3562 /* sanity check */
3563 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3564 IPW_ERROR("Too many chunks: %i\n",
3565 le32_to_cpu(bd->u.data.num_chunks));
3566 /** @todo issue fatal error, it is quite serious situation */
3567 return;
3570 /* unmap chunks if any */
3571 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3572 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3573 le16_to_cpu(bd->u.data.chunk_len[i]),
3574 PCI_DMA_TODEVICE);
3575 if (txq->txb[txq->q.last_used]) {
3576 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3577 txq->txb[txq->q.last_used] = NULL;
3583 * Deallocate DMA queue.
3585 * Empty queue by removing and destroying all BD's.
3586 * Free all buffers.
3588 * @param dev
3589 * @param q
3591 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3593 struct clx2_queue *q = &txq->q;
3594 struct pci_dev *dev = priv->pci_dev;
3596 if (q->n_bd == 0)
3597 return;
3599 /* first, empty all BD's */
3600 for (; q->first_empty != q->last_used;
3601 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3602 ipw_queue_tx_free_tfd(priv, txq);
3605 /* free buffers belonging to queue itself */
3606 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3607 q->dma_addr);
3608 kfree(txq->txb);
3610 /* 0 fill whole structure */
3611 memset(txq, 0, sizeof(*txq));
3615 * Destroy all DMA queues and structures
3617 * @param priv
3619 static void ipw_tx_queue_free(struct ipw_priv *priv)
3621 /* Tx CMD queue */
3622 ipw_queue_tx_free(priv, &priv->txq_cmd);
3624 /* Tx queues */
3625 ipw_queue_tx_free(priv, &priv->txq[0]);
3626 ipw_queue_tx_free(priv, &priv->txq[1]);
3627 ipw_queue_tx_free(priv, &priv->txq[2]);
3628 ipw_queue_tx_free(priv, &priv->txq[3]);
3631 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3633 /* First 3 bytes are manufacturer */
3634 bssid[0] = priv->mac_addr[0];
3635 bssid[1] = priv->mac_addr[1];
3636 bssid[2] = priv->mac_addr[2];
3638 /* Last bytes are random */
3639 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3641 bssid[0] &= 0xfe; /* clear multicast bit */
3642 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3645 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3647 struct ipw_station_entry entry;
3648 int i;
3650 for (i = 0; i < priv->num_stations; i++) {
3651 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3652 /* Another node is active in network */
3653 priv->missed_adhoc_beacons = 0;
3654 if (!(priv->config & CFG_STATIC_CHANNEL))
3655 /* when other nodes drop out, we drop out */
3656 priv->config &= ~CFG_ADHOC_PERSIST;
3658 return i;
3662 if (i == MAX_STATIONS)
3663 return IPW_INVALID_STATION;
3665 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3667 entry.reserved = 0;
3668 entry.support_mode = 0;
3669 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3670 memcpy(priv->stations[i], bssid, ETH_ALEN);
3671 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3672 &entry, sizeof(entry));
3673 priv->num_stations++;
3675 return i;
3678 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3680 int i;
3682 for (i = 0; i < priv->num_stations; i++)
3683 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3684 return i;
3686 return IPW_INVALID_STATION;
3689 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3691 int err;
3693 if (priv->status & STATUS_ASSOCIATING) {
3694 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3695 queue_work(priv->workqueue, &priv->disassociate);
3696 return;
3699 if (!(priv->status & STATUS_ASSOCIATED)) {
3700 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3701 return;
3704 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3705 "on channel %d.\n",
3706 MAC_ARG(priv->assoc_request.bssid),
3707 priv->assoc_request.channel);
3709 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3710 priv->status |= STATUS_DISASSOCIATING;
3712 if (quiet)
3713 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3714 else
3715 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3717 err = ipw_send_associate(priv, &priv->assoc_request);
3718 if (err) {
3719 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3720 "failed.\n");
3721 return;
3726 static int ipw_disassociate(void *data)
3728 struct ipw_priv *priv = data;
3729 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3730 return 0;
3731 ipw_send_disassociate(data, 0);
3732 return 1;
3735 static void ipw_bg_disassociate(void *data)
3737 struct ipw_priv *priv = data;
3738 mutex_lock(&priv->mutex);
3739 ipw_disassociate(data);
3740 mutex_unlock(&priv->mutex);
3743 static void ipw_system_config(void *data)
3745 struct ipw_priv *priv = data;
3746 ipw_send_system_config(priv, &priv->sys_config);
3749 struct ipw_status_code {
3750 u16 status;
3751 const char *reason;
3754 static const struct ipw_status_code ipw_status_codes[] = {
3755 {0x00, "Successful"},
3756 {0x01, "Unspecified failure"},
3757 {0x0A, "Cannot support all requested capabilities in the "
3758 "Capability information field"},
3759 {0x0B, "Reassociation denied due to inability to confirm that "
3760 "association exists"},
3761 {0x0C, "Association denied due to reason outside the scope of this "
3762 "standard"},
3763 {0x0D,
3764 "Responding station does not support the specified authentication "
3765 "algorithm"},
3766 {0x0E,
3767 "Received an Authentication frame with authentication sequence "
3768 "transaction sequence number out of expected sequence"},
3769 {0x0F, "Authentication rejected because of challenge failure"},
3770 {0x10, "Authentication rejected due to timeout waiting for next "
3771 "frame in sequence"},
3772 {0x11, "Association denied because AP is unable to handle additional "
3773 "associated stations"},
3774 {0x12,
3775 "Association denied due to requesting station not supporting all "
3776 "of the datarates in the BSSBasicServiceSet Parameter"},
3777 {0x13,
3778 "Association denied due to requesting station not supporting "
3779 "short preamble operation"},
3780 {0x14,
3781 "Association denied due to requesting station not supporting "
3782 "PBCC encoding"},
3783 {0x15,
3784 "Association denied due to requesting station not supporting "
3785 "channel agility"},
3786 {0x19,
3787 "Association denied due to requesting station not supporting "
3788 "short slot operation"},
3789 {0x1A,
3790 "Association denied due to requesting station not supporting "
3791 "DSSS-OFDM operation"},
3792 {0x28, "Invalid Information Element"},
3793 {0x29, "Group Cipher is not valid"},
3794 {0x2A, "Pairwise Cipher is not valid"},
3795 {0x2B, "AKMP is not valid"},
3796 {0x2C, "Unsupported RSN IE version"},
3797 {0x2D, "Invalid RSN IE Capabilities"},
3798 {0x2E, "Cipher suite is rejected per security policy"},
3801 #ifdef CONFIG_IPW2200_DEBUG
3802 static const char *ipw_get_status_code(u16 status)
3804 int i;
3805 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3806 if (ipw_status_codes[i].status == (status & 0xff))
3807 return ipw_status_codes[i].reason;
3808 return "Unknown status value.";
3810 #endif
3812 static void inline average_init(struct average *avg)
3814 memset(avg, 0, sizeof(*avg));
3817 static void average_add(struct average *avg, s16 val)
3819 avg->sum -= avg->entries[avg->pos];
3820 avg->sum += val;
3821 avg->entries[avg->pos++] = val;
3822 if (unlikely(avg->pos == AVG_ENTRIES)) {
3823 avg->init = 1;
3824 avg->pos = 0;
3828 static s16 average_value(struct average *avg)
3830 if (!unlikely(avg->init)) {
3831 if (avg->pos)
3832 return avg->sum / avg->pos;
3833 return 0;
3836 return avg->sum / AVG_ENTRIES;
3839 static void ipw_reset_stats(struct ipw_priv *priv)
3841 u32 len = sizeof(u32);
3843 priv->quality = 0;
3845 average_init(&priv->average_missed_beacons);
3846 average_init(&priv->average_rssi);
3847 average_init(&priv->average_noise);
3849 priv->last_rate = 0;
3850 priv->last_missed_beacons = 0;
3851 priv->last_rx_packets = 0;
3852 priv->last_tx_packets = 0;
3853 priv->last_tx_failures = 0;
3855 /* Firmware managed, reset only when NIC is restarted, so we have to
3856 * normalize on the current value */
3857 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3858 &priv->last_rx_err, &len);
3859 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3860 &priv->last_tx_failures, &len);
3862 /* Driver managed, reset with each association */
3863 priv->missed_adhoc_beacons = 0;
3864 priv->missed_beacons = 0;
3865 priv->tx_packets = 0;
3866 priv->rx_packets = 0;
3870 static u32 ipw_get_max_rate(struct ipw_priv *priv)
3872 u32 i = 0x80000000;
3873 u32 mask = priv->rates_mask;
3874 /* If currently associated in B mode, restrict the maximum
3875 * rate match to B rates */
3876 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3877 mask &= IEEE80211_CCK_RATES_MASK;
3879 /* TODO: Verify that the rate is supported by the current rates
3880 * list. */
3882 while (i && !(mask & i))
3883 i >>= 1;
3884 switch (i) {
3885 case IEEE80211_CCK_RATE_1MB_MASK:
3886 return 1000000;
3887 case IEEE80211_CCK_RATE_2MB_MASK:
3888 return 2000000;
3889 case IEEE80211_CCK_RATE_5MB_MASK:
3890 return 5500000;
3891 case IEEE80211_OFDM_RATE_6MB_MASK:
3892 return 6000000;
3893 case IEEE80211_OFDM_RATE_9MB_MASK:
3894 return 9000000;
3895 case IEEE80211_CCK_RATE_11MB_MASK:
3896 return 11000000;
3897 case IEEE80211_OFDM_RATE_12MB_MASK:
3898 return 12000000;
3899 case IEEE80211_OFDM_RATE_18MB_MASK:
3900 return 18000000;
3901 case IEEE80211_OFDM_RATE_24MB_MASK:
3902 return 24000000;
3903 case IEEE80211_OFDM_RATE_36MB_MASK:
3904 return 36000000;
3905 case IEEE80211_OFDM_RATE_48MB_MASK:
3906 return 48000000;
3907 case IEEE80211_OFDM_RATE_54MB_MASK:
3908 return 54000000;
3911 if (priv->ieee->mode == IEEE_B)
3912 return 11000000;
3913 else
3914 return 54000000;
3917 static u32 ipw_get_current_rate(struct ipw_priv *priv)
3919 u32 rate, len = sizeof(rate);
3920 int err;
3922 if (!(priv->status & STATUS_ASSOCIATED))
3923 return 0;
3925 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
3926 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
3927 &len);
3928 if (err) {
3929 IPW_DEBUG_INFO("failed querying ordinals.\n");
3930 return 0;
3932 } else
3933 return ipw_get_max_rate(priv);
3935 switch (rate) {
3936 case IPW_TX_RATE_1MB:
3937 return 1000000;
3938 case IPW_TX_RATE_2MB:
3939 return 2000000;
3940 case IPW_TX_RATE_5MB:
3941 return 5500000;
3942 case IPW_TX_RATE_6MB:
3943 return 6000000;
3944 case IPW_TX_RATE_9MB:
3945 return 9000000;
3946 case IPW_TX_RATE_11MB:
3947 return 11000000;
3948 case IPW_TX_RATE_12MB:
3949 return 12000000;
3950 case IPW_TX_RATE_18MB:
3951 return 18000000;
3952 case IPW_TX_RATE_24MB:
3953 return 24000000;
3954 case IPW_TX_RATE_36MB:
3955 return 36000000;
3956 case IPW_TX_RATE_48MB:
3957 return 48000000;
3958 case IPW_TX_RATE_54MB:
3959 return 54000000;
3962 return 0;
3965 #define IPW_STATS_INTERVAL (2 * HZ)
3966 static void ipw_gather_stats(struct ipw_priv *priv)
3968 u32 rx_err, rx_err_delta, rx_packets_delta;
3969 u32 tx_failures, tx_failures_delta, tx_packets_delta;
3970 u32 missed_beacons_percent, missed_beacons_delta;
3971 u32 quality = 0;
3972 u32 len = sizeof(u32);
3973 s16 rssi;
3974 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
3975 rate_quality;
3976 u32 max_rate;
3978 if (!(priv->status & STATUS_ASSOCIATED)) {
3979 priv->quality = 0;
3980 return;
3983 /* Update the statistics */
3984 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
3985 &priv->missed_beacons, &len);
3986 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
3987 priv->last_missed_beacons = priv->missed_beacons;
3988 if (priv->assoc_request.beacon_interval) {
3989 missed_beacons_percent = missed_beacons_delta *
3990 (HZ * priv->assoc_request.beacon_interval) /
3991 (IPW_STATS_INTERVAL * 10);
3992 } else {
3993 missed_beacons_percent = 0;
3995 average_add(&priv->average_missed_beacons, missed_beacons_percent);
3997 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
3998 rx_err_delta = rx_err - priv->last_rx_err;
3999 priv->last_rx_err = rx_err;
4001 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4002 tx_failures_delta = tx_failures - priv->last_tx_failures;
4003 priv->last_tx_failures = tx_failures;
4005 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4006 priv->last_rx_packets = priv->rx_packets;
4008 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4009 priv->last_tx_packets = priv->tx_packets;
4011 /* Calculate quality based on the following:
4013 * Missed beacon: 100% = 0, 0% = 70% missed
4014 * Rate: 60% = 1Mbs, 100% = Max
4015 * Rx and Tx errors represent a straight % of total Rx/Tx
4016 * RSSI: 100% = > -50, 0% = < -80
4017 * Rx errors: 100% = 0, 0% = 50% missed
4019 * The lowest computed quality is used.
4022 #define BEACON_THRESHOLD 5
4023 beacon_quality = 100 - missed_beacons_percent;
4024 if (beacon_quality < BEACON_THRESHOLD)
4025 beacon_quality = 0;
4026 else
4027 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4028 (100 - BEACON_THRESHOLD);
4029 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4030 beacon_quality, missed_beacons_percent);
4032 priv->last_rate = ipw_get_current_rate(priv);
4033 max_rate = ipw_get_max_rate(priv);
4034 rate_quality = priv->last_rate * 40 / max_rate + 60;
4035 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4036 rate_quality, priv->last_rate / 1000000);
4038 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4039 rx_quality = 100 - (rx_err_delta * 100) /
4040 (rx_packets_delta + rx_err_delta);
4041 else
4042 rx_quality = 100;
4043 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4044 rx_quality, rx_err_delta, rx_packets_delta);
4046 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4047 tx_quality = 100 - (tx_failures_delta * 100) /
4048 (tx_packets_delta + tx_failures_delta);
4049 else
4050 tx_quality = 100;
4051 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4052 tx_quality, tx_failures_delta, tx_packets_delta);
4054 rssi = average_value(&priv->average_rssi);
4055 signal_quality =
4056 (100 *
4057 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4058 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4059 (priv->ieee->perfect_rssi - rssi) *
4060 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4061 62 * (priv->ieee->perfect_rssi - rssi))) /
4062 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4063 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4064 if (signal_quality > 100)
4065 signal_quality = 100;
4066 else if (signal_quality < 1)
4067 signal_quality = 0;
4069 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4070 signal_quality, rssi);
4072 quality = min(beacon_quality,
4073 min(rate_quality,
4074 min(tx_quality, min(rx_quality, signal_quality))));
4075 if (quality == beacon_quality)
4076 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4077 quality);
4078 if (quality == rate_quality)
4079 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4080 quality);
4081 if (quality == tx_quality)
4082 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4083 quality);
4084 if (quality == rx_quality)
4085 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4086 quality);
4087 if (quality == signal_quality)
4088 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4089 quality);
4091 priv->quality = quality;
4093 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4094 IPW_STATS_INTERVAL);
4097 static void ipw_bg_gather_stats(void *data)
4099 struct ipw_priv *priv = data;
4100 mutex_lock(&priv->mutex);
4101 ipw_gather_stats(data);
4102 mutex_unlock(&priv->mutex);
4105 /* Missed beacon behavior:
4106 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4107 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4108 * Above disassociate threshold, give up and stop scanning.
4109 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4110 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4111 int missed_count)
4113 priv->notif_missed_beacons = missed_count;
4115 if (missed_count > priv->disassociate_threshold &&
4116 priv->status & STATUS_ASSOCIATED) {
4117 /* If associated and we've hit the missed
4118 * beacon threshold, disassociate, turn
4119 * off roaming, and abort any active scans */
4120 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4121 IPW_DL_STATE | IPW_DL_ASSOC,
4122 "Missed beacon: %d - disassociate\n", missed_count);
4123 priv->status &= ~STATUS_ROAMING;
4124 if (priv->status & STATUS_SCANNING) {
4125 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4126 IPW_DL_STATE,
4127 "Aborting scan with missed beacon.\n");
4128 queue_work(priv->workqueue, &priv->abort_scan);
4131 queue_work(priv->workqueue, &priv->disassociate);
4132 return;
4135 if (priv->status & STATUS_ROAMING) {
4136 /* If we are currently roaming, then just
4137 * print a debug statement... */
4138 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4139 "Missed beacon: %d - roam in progress\n",
4140 missed_count);
4141 return;
4144 if (roaming &&
4145 (missed_count > priv->roaming_threshold &&
4146 missed_count <= priv->disassociate_threshold)) {
4147 /* If we are not already roaming, set the ROAM
4148 * bit in the status and kick off a scan.
4149 * This can happen several times before we reach
4150 * disassociate_threshold. */
4151 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4152 "Missed beacon: %d - initiate "
4153 "roaming\n", missed_count);
4154 if (!(priv->status & STATUS_ROAMING)) {
4155 priv->status |= STATUS_ROAMING;
4156 if (!(priv->status & STATUS_SCANNING))
4157 queue_work(priv->workqueue,
4158 &priv->request_scan);
4160 return;
4163 if (priv->status & STATUS_SCANNING) {
4164 /* Stop scan to keep fw from getting
4165 * stuck (only if we aren't roaming --
4166 * otherwise we'll never scan more than 2 or 3
4167 * channels..) */
4168 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4169 "Aborting scan with missed beacon.\n");
4170 queue_work(priv->workqueue, &priv->abort_scan);
4173 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4177 * Handle host notification packet.
4178 * Called from interrupt routine
4180 static void ipw_rx_notification(struct ipw_priv *priv,
4181 struct ipw_rx_notification *notif)
4183 notif->size = le16_to_cpu(notif->size);
4185 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4187 switch (notif->subtype) {
4188 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4189 struct notif_association *assoc = &notif->u.assoc;
4191 switch (assoc->state) {
4192 case CMAS_ASSOCIATED:{
4193 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4194 IPW_DL_ASSOC,
4195 "associated: '%s' " MAC_FMT
4196 " \n",
4197 escape_essid(priv->essid,
4198 priv->essid_len),
4199 MAC_ARG(priv->bssid));
4201 switch (priv->ieee->iw_mode) {
4202 case IW_MODE_INFRA:
4203 memcpy(priv->ieee->bssid,
4204 priv->bssid, ETH_ALEN);
4205 break;
4207 case IW_MODE_ADHOC:
4208 memcpy(priv->ieee->bssid,
4209 priv->bssid, ETH_ALEN);
4211 /* clear out the station table */
4212 priv->num_stations = 0;
4214 IPW_DEBUG_ASSOC
4215 ("queueing adhoc check\n");
4216 queue_delayed_work(priv->
4217 workqueue,
4218 &priv->
4219 adhoc_check,
4220 priv->
4221 assoc_request.
4222 beacon_interval);
4223 break;
4226 priv->status &= ~STATUS_ASSOCIATING;
4227 priv->status |= STATUS_ASSOCIATED;
4228 queue_work(priv->workqueue,
4229 &priv->system_config);
4231 #ifdef CONFIG_IPW_QOS
4232 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4233 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4234 if ((priv->status & STATUS_AUTH) &&
4235 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4236 == IEEE80211_STYPE_ASSOC_RESP)) {
4237 if ((sizeof
4238 (struct
4239 ieee80211_assoc_response)
4240 <= notif->size)
4241 && (notif->size <= 2314)) {
4242 struct
4243 ieee80211_rx_stats
4244 stats = {
4245 .len =
4246 notif->
4247 size - 1,
4250 IPW_DEBUG_QOS
4251 ("QoS Associate "
4252 "size %d\n",
4253 notif->size);
4254 ieee80211_rx_mgt(priv->
4255 ieee,
4256 (struct
4257 ieee80211_hdr_4addr
4259 &notif->u.raw, &stats);
4262 #endif
4264 schedule_work(&priv->link_up);
4266 break;
4269 case CMAS_AUTHENTICATED:{
4270 if (priv->
4271 status & (STATUS_ASSOCIATED |
4272 STATUS_AUTH)) {
4273 #ifdef CONFIG_IPW2200_DEBUG
4274 struct notif_authenticate *auth
4275 = &notif->u.auth;
4276 IPW_DEBUG(IPW_DL_NOTIF |
4277 IPW_DL_STATE |
4278 IPW_DL_ASSOC,
4279 "deauthenticated: '%s' "
4280 MAC_FMT
4281 ": (0x%04X) - %s \n",
4282 escape_essid(priv->
4283 essid,
4284 priv->
4285 essid_len),
4286 MAC_ARG(priv->bssid),
4287 ntohs(auth->status),
4288 ipw_get_status_code
4289 (ntohs
4290 (auth->status)));
4291 #endif
4293 priv->status &=
4294 ~(STATUS_ASSOCIATING |
4295 STATUS_AUTH |
4296 STATUS_ASSOCIATED);
4298 schedule_work(&priv->link_down);
4299 break;
4302 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4303 IPW_DL_ASSOC,
4304 "authenticated: '%s' " MAC_FMT
4305 "\n",
4306 escape_essid(priv->essid,
4307 priv->essid_len),
4308 MAC_ARG(priv->bssid));
4309 break;
4312 case CMAS_INIT:{
4313 if (priv->status & STATUS_AUTH) {
4314 struct
4315 ieee80211_assoc_response
4316 *resp;
4317 resp =
4318 (struct
4319 ieee80211_assoc_response
4320 *)&notif->u.raw;
4321 IPW_DEBUG(IPW_DL_NOTIF |
4322 IPW_DL_STATE |
4323 IPW_DL_ASSOC,
4324 "association failed (0x%04X): %s\n",
4325 ntohs(resp->status),
4326 ipw_get_status_code
4327 (ntohs
4328 (resp->status)));
4331 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4332 IPW_DL_ASSOC,
4333 "disassociated: '%s' " MAC_FMT
4334 " \n",
4335 escape_essid(priv->essid,
4336 priv->essid_len),
4337 MAC_ARG(priv->bssid));
4339 priv->status &=
4340 ~(STATUS_DISASSOCIATING |
4341 STATUS_ASSOCIATING |
4342 STATUS_ASSOCIATED | STATUS_AUTH);
4343 if (priv->assoc_network
4344 && (priv->assoc_network->
4345 capability &
4346 WLAN_CAPABILITY_IBSS))
4347 ipw_remove_current_network
4348 (priv);
4350 schedule_work(&priv->link_down);
4352 break;
4355 case CMAS_RX_ASSOC_RESP:
4356 break;
4358 default:
4359 IPW_ERROR("assoc: unknown (%d)\n",
4360 assoc->state);
4361 break;
4364 break;
4367 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4368 struct notif_authenticate *auth = &notif->u.auth;
4369 switch (auth->state) {
4370 case CMAS_AUTHENTICATED:
4371 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4372 "authenticated: '%s' " MAC_FMT " \n",
4373 escape_essid(priv->essid,
4374 priv->essid_len),
4375 MAC_ARG(priv->bssid));
4376 priv->status |= STATUS_AUTH;
4377 break;
4379 case CMAS_INIT:
4380 if (priv->status & STATUS_AUTH) {
4381 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4382 IPW_DL_ASSOC,
4383 "authentication failed (0x%04X): %s\n",
4384 ntohs(auth->status),
4385 ipw_get_status_code(ntohs
4386 (auth->
4387 status)));
4389 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4390 IPW_DL_ASSOC,
4391 "deauthenticated: '%s' " MAC_FMT "\n",
4392 escape_essid(priv->essid,
4393 priv->essid_len),
4394 MAC_ARG(priv->bssid));
4396 priv->status &= ~(STATUS_ASSOCIATING |
4397 STATUS_AUTH |
4398 STATUS_ASSOCIATED);
4400 schedule_work(&priv->link_down);
4401 break;
4403 case CMAS_TX_AUTH_SEQ_1:
4404 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4405 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4406 break;
4407 case CMAS_RX_AUTH_SEQ_2:
4408 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4409 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4410 break;
4411 case CMAS_AUTH_SEQ_1_PASS:
4412 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4413 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4414 break;
4415 case CMAS_AUTH_SEQ_1_FAIL:
4416 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4417 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4418 break;
4419 case CMAS_TX_AUTH_SEQ_3:
4420 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4421 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4422 break;
4423 case CMAS_RX_AUTH_SEQ_4:
4424 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4425 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4426 break;
4427 case CMAS_AUTH_SEQ_2_PASS:
4428 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4429 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4430 break;
4431 case CMAS_AUTH_SEQ_2_FAIL:
4432 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4433 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4434 break;
4435 case CMAS_TX_ASSOC:
4436 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4437 IPW_DL_ASSOC, "TX_ASSOC\n");
4438 break;
4439 case CMAS_RX_ASSOC_RESP:
4440 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4441 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4443 break;
4444 case CMAS_ASSOCIATED:
4445 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4446 IPW_DL_ASSOC, "ASSOCIATED\n");
4447 break;
4448 default:
4449 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4450 auth->state);
4451 break;
4453 break;
4456 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4457 struct notif_channel_result *x =
4458 &notif->u.channel_result;
4460 if (notif->size == sizeof(*x)) {
4461 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4462 x->channel_num);
4463 } else {
4464 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4465 "(should be %zd)\n",
4466 notif->size, sizeof(*x));
4468 break;
4471 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4472 struct notif_scan_complete *x = &notif->u.scan_complete;
4473 if (notif->size == sizeof(*x)) {
4474 IPW_DEBUG_SCAN
4475 ("Scan completed: type %d, %d channels, "
4476 "%d status\n", x->scan_type,
4477 x->num_channels, x->status);
4478 } else {
4479 IPW_ERROR("Scan completed of wrong size %d "
4480 "(should be %zd)\n",
4481 notif->size, sizeof(*x));
4484 priv->status &=
4485 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4487 wake_up_interruptible(&priv->wait_state);
4488 cancel_delayed_work(&priv->scan_check);
4490 if (priv->status & STATUS_EXIT_PENDING)
4491 break;
4493 priv->ieee->scans++;
4495 #ifdef CONFIG_IPW2200_MONITOR
4496 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4497 priv->status |= STATUS_SCAN_FORCED;
4498 queue_work(priv->workqueue,
4499 &priv->request_scan);
4500 break;
4502 priv->status &= ~STATUS_SCAN_FORCED;
4503 #endif /* CONFIG_IPW2200_MONITOR */
4505 if (!(priv->status & (STATUS_ASSOCIATED |
4506 STATUS_ASSOCIATING |
4507 STATUS_ROAMING |
4508 STATUS_DISASSOCIATING)))
4509 queue_work(priv->workqueue, &priv->associate);
4510 else if (priv->status & STATUS_ROAMING) {
4511 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4512 /* If a scan completed and we are in roam mode, then
4513 * the scan that completed was the one requested as a
4514 * result of entering roam... so, schedule the
4515 * roam work */
4516 queue_work(priv->workqueue,
4517 &priv->roam);
4518 else
4519 /* Don't schedule if we aborted the scan */
4520 priv->status &= ~STATUS_ROAMING;
4521 } else if (priv->status & STATUS_SCAN_PENDING)
4522 queue_work(priv->workqueue,
4523 &priv->request_scan);
4524 else if (priv->config & CFG_BACKGROUND_SCAN
4525 && priv->status & STATUS_ASSOCIATED)
4526 queue_delayed_work(priv->workqueue,
4527 &priv->request_scan, HZ);
4528 break;
4531 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4532 struct notif_frag_length *x = &notif->u.frag_len;
4534 if (notif->size == sizeof(*x))
4535 IPW_ERROR("Frag length: %d\n",
4536 le16_to_cpu(x->frag_length));
4537 else
4538 IPW_ERROR("Frag length of wrong size %d "
4539 "(should be %zd)\n",
4540 notif->size, sizeof(*x));
4541 break;
4544 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4545 struct notif_link_deterioration *x =
4546 &notif->u.link_deterioration;
4548 if (notif->size == sizeof(*x)) {
4549 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4550 "link deterioration: '%s' " MAC_FMT
4551 " \n", escape_essid(priv->essid,
4552 priv->essid_len),
4553 MAC_ARG(priv->bssid));
4554 memcpy(&priv->last_link_deterioration, x,
4555 sizeof(*x));
4556 } else {
4557 IPW_ERROR("Link Deterioration of wrong size %d "
4558 "(should be %zd)\n",
4559 notif->size, sizeof(*x));
4561 break;
4564 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4565 IPW_ERROR("Dino config\n");
4566 if (priv->hcmd
4567 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4568 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4570 break;
4573 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4574 struct notif_beacon_state *x = &notif->u.beacon_state;
4575 if (notif->size != sizeof(*x)) {
4576 IPW_ERROR
4577 ("Beacon state of wrong size %d (should "
4578 "be %zd)\n", notif->size, sizeof(*x));
4579 break;
4582 if (le32_to_cpu(x->state) ==
4583 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4584 ipw_handle_missed_beacon(priv,
4585 le32_to_cpu(x->
4586 number));
4588 break;
4591 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4592 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4593 if (notif->size == sizeof(*x)) {
4594 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4595 "0x%02x station %d\n",
4596 x->key_state, x->security_type,
4597 x->station_index);
4598 break;
4601 IPW_ERROR
4602 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4603 notif->size, sizeof(*x));
4604 break;
4607 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4608 struct notif_calibration *x = &notif->u.calibration;
4610 if (notif->size == sizeof(*x)) {
4611 memcpy(&priv->calib, x, sizeof(*x));
4612 IPW_DEBUG_INFO("TODO: Calibration\n");
4613 break;
4616 IPW_ERROR
4617 ("Calibration of wrong size %d (should be %zd)\n",
4618 notif->size, sizeof(*x));
4619 break;
4622 case HOST_NOTIFICATION_NOISE_STATS:{
4623 if (notif->size == sizeof(u32)) {
4624 priv->last_noise =
4625 (u8) (le32_to_cpu(notif->u.noise.value) &
4626 0xff);
4627 average_add(&priv->average_noise,
4628 priv->last_noise);
4629 break;
4632 IPW_ERROR
4633 ("Noise stat is wrong size %d (should be %zd)\n",
4634 notif->size, sizeof(u32));
4635 break;
4638 default:
4639 IPW_DEBUG_NOTIF("Unknown notification: "
4640 "subtype=%d,flags=0x%2x,size=%d\n",
4641 notif->subtype, notif->flags, notif->size);
4646 * Destroys all DMA structures and initialise them again
4648 * @param priv
4649 * @return error code
4651 static int ipw_queue_reset(struct ipw_priv *priv)
4653 int rc = 0;
4654 /** @todo customize queue sizes */
4655 int nTx = 64, nTxCmd = 8;
4656 ipw_tx_queue_free(priv);
4657 /* Tx CMD queue */
4658 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4659 IPW_TX_CMD_QUEUE_READ_INDEX,
4660 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4661 IPW_TX_CMD_QUEUE_BD_BASE,
4662 IPW_TX_CMD_QUEUE_BD_SIZE);
4663 if (rc) {
4664 IPW_ERROR("Tx Cmd queue init failed\n");
4665 goto error;
4667 /* Tx queue(s) */
4668 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4669 IPW_TX_QUEUE_0_READ_INDEX,
4670 IPW_TX_QUEUE_0_WRITE_INDEX,
4671 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4672 if (rc) {
4673 IPW_ERROR("Tx 0 queue init failed\n");
4674 goto error;
4676 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4677 IPW_TX_QUEUE_1_READ_INDEX,
4678 IPW_TX_QUEUE_1_WRITE_INDEX,
4679 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4680 if (rc) {
4681 IPW_ERROR("Tx 1 queue init failed\n");
4682 goto error;
4684 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4685 IPW_TX_QUEUE_2_READ_INDEX,
4686 IPW_TX_QUEUE_2_WRITE_INDEX,
4687 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4688 if (rc) {
4689 IPW_ERROR("Tx 2 queue init failed\n");
4690 goto error;
4692 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4693 IPW_TX_QUEUE_3_READ_INDEX,
4694 IPW_TX_QUEUE_3_WRITE_INDEX,
4695 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4696 if (rc) {
4697 IPW_ERROR("Tx 3 queue init failed\n");
4698 goto error;
4700 /* statistics */
4701 priv->rx_bufs_min = 0;
4702 priv->rx_pend_max = 0;
4703 return rc;
4705 error:
4706 ipw_tx_queue_free(priv);
4707 return rc;
4711 * Reclaim Tx queue entries no more used by NIC.
4713 * When FW adwances 'R' index, all entries between old and
4714 * new 'R' index need to be reclaimed. As result, some free space
4715 * forms. If there is enough free space (> low mark), wake Tx queue.
4717 * @note Need to protect against garbage in 'R' index
4718 * @param priv
4719 * @param txq
4720 * @param qindex
4721 * @return Number of used entries remains in the queue
4723 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4724 struct clx2_tx_queue *txq, int qindex)
4726 u32 hw_tail;
4727 int used;
4728 struct clx2_queue *q = &txq->q;
4730 hw_tail = ipw_read32(priv, q->reg_r);
4731 if (hw_tail >= q->n_bd) {
4732 IPW_ERROR
4733 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4734 hw_tail, q->n_bd);
4735 goto done;
4737 for (; q->last_used != hw_tail;
4738 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4739 ipw_queue_tx_free_tfd(priv, txq);
4740 priv->tx_packets++;
4742 done:
4743 if ((ipw_queue_space(q) > q->low_mark) &&
4744 (qindex >= 0) &&
4745 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4746 netif_wake_queue(priv->net_dev);
4747 used = q->first_empty - q->last_used;
4748 if (used < 0)
4749 used += q->n_bd;
4751 return used;
4754 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4755 int len, int sync)
4757 struct clx2_tx_queue *txq = &priv->txq_cmd;
4758 struct clx2_queue *q = &txq->q;
4759 struct tfd_frame *tfd;
4761 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4762 IPW_ERROR("No space for Tx\n");
4763 return -EBUSY;
4766 tfd = &txq->bd[q->first_empty];
4767 txq->txb[q->first_empty] = NULL;
4769 memset(tfd, 0, sizeof(*tfd));
4770 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4771 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4772 priv->hcmd_seq++;
4773 tfd->u.cmd.index = hcmd;
4774 tfd->u.cmd.length = len;
4775 memcpy(tfd->u.cmd.payload, buf, len);
4776 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4777 ipw_write32(priv, q->reg_w, q->first_empty);
4778 _ipw_read32(priv, 0x90);
4780 return 0;
4784 * Rx theory of operation
4786 * The host allocates 32 DMA target addresses and passes the host address
4787 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4788 * 0 to 31
4790 * Rx Queue Indexes
4791 * The host/firmware share two index registers for managing the Rx buffers.
4793 * The READ index maps to the first position that the firmware may be writing
4794 * to -- the driver can read up to (but not including) this position and get
4795 * good data.
4796 * The READ index is managed by the firmware once the card is enabled.
4798 * The WRITE index maps to the last position the driver has read from -- the
4799 * position preceding WRITE is the last slot the firmware can place a packet.
4801 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4802 * WRITE = READ.
4804 * During initialization the host sets up the READ queue position to the first
4805 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4807 * When the firmware places a packet in a buffer it will advance the READ index
4808 * and fire the RX interrupt. The driver can then query the READ index and
4809 * process as many packets as possible, moving the WRITE index forward as it
4810 * resets the Rx queue buffers with new memory.
4812 * The management in the driver is as follows:
4813 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
4814 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4815 * to replensish the ipw->rxq->rx_free.
4816 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4817 * ipw->rxq is replenished and the READ INDEX is updated (updating the
4818 * 'processed' and 'read' driver indexes as well)
4819 * + A received packet is processed and handed to the kernel network stack,
4820 * detached from the ipw->rxq. The driver 'processed' index is updated.
4821 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4822 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4823 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
4824 * were enough free buffers and RX_STALLED is set it is cleared.
4827 * Driver sequence:
4829 * ipw_rx_queue_alloc() Allocates rx_free
4830 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
4831 * ipw_rx_queue_restock
4832 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
4833 * queue, updates firmware pointers, and updates
4834 * the WRITE index. If insufficient rx_free buffers
4835 * are available, schedules ipw_rx_queue_replenish
4837 * -- enable interrupts --
4838 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
4839 * READ INDEX, detaching the SKB from the pool.
4840 * Moves the packet buffer from queue to rx_used.
4841 * Calls ipw_rx_queue_restock to refill any empty
4842 * slots.
4843 * ...
4848 * If there are slots in the RX queue that need to be restocked,
4849 * and we have free pre-allocated buffers, fill the ranks as much
4850 * as we can pulling from rx_free.
4852 * This moves the 'write' index forward to catch up with 'processed', and
4853 * also updates the memory address in the firmware to reference the new
4854 * target buffer.
4856 static void ipw_rx_queue_restock(struct ipw_priv *priv)
4858 struct ipw_rx_queue *rxq = priv->rxq;
4859 struct list_head *element;
4860 struct ipw_rx_mem_buffer *rxb;
4861 unsigned long flags;
4862 int write;
4864 spin_lock_irqsave(&rxq->lock, flags);
4865 write = rxq->write;
4866 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
4867 element = rxq->rx_free.next;
4868 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4869 list_del(element);
4871 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
4872 rxb->dma_addr);
4873 rxq->queue[rxq->write] = rxb;
4874 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
4875 rxq->free_count--;
4877 spin_unlock_irqrestore(&rxq->lock, flags);
4879 /* If the pre-allocated buffer pool is dropping low, schedule to
4880 * refill it */
4881 if (rxq->free_count <= RX_LOW_WATERMARK)
4882 queue_work(priv->workqueue, &priv->rx_replenish);
4884 /* If we've added more space for the firmware to place data, tell it */
4885 if (write != rxq->write)
4886 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
4890 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
4891 * Also restock the Rx queue via ipw_rx_queue_restock.
4893 * This is called as a scheduled work item (except for during intialization)
4895 static void ipw_rx_queue_replenish(void *data)
4897 struct ipw_priv *priv = data;
4898 struct ipw_rx_queue *rxq = priv->rxq;
4899 struct list_head *element;
4900 struct ipw_rx_mem_buffer *rxb;
4901 unsigned long flags;
4903 spin_lock_irqsave(&rxq->lock, flags);
4904 while (!list_empty(&rxq->rx_used)) {
4905 element = rxq->rx_used.next;
4906 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4907 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
4908 if (!rxb->skb) {
4909 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
4910 priv->net_dev->name);
4911 /* We don't reschedule replenish work here -- we will
4912 * call the restock method and if it still needs
4913 * more buffers it will schedule replenish */
4914 break;
4916 list_del(element);
4918 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
4919 rxb->dma_addr =
4920 pci_map_single(priv->pci_dev, rxb->skb->data,
4921 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4923 list_add_tail(&rxb->list, &rxq->rx_free);
4924 rxq->free_count++;
4926 spin_unlock_irqrestore(&rxq->lock, flags);
4928 ipw_rx_queue_restock(priv);
4931 static void ipw_bg_rx_queue_replenish(void *data)
4933 struct ipw_priv *priv = data;
4934 mutex_lock(&priv->mutex);
4935 ipw_rx_queue_replenish(data);
4936 mutex_unlock(&priv->mutex);
4939 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4940 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
4941 * This free routine walks the list of POOL entries and if SKB is set to
4942 * non NULL it is unmapped and freed
4944 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
4946 int i;
4948 if (!rxq)
4949 return;
4951 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4952 if (rxq->pool[i].skb != NULL) {
4953 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
4954 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4955 dev_kfree_skb(rxq->pool[i].skb);
4959 kfree(rxq);
4962 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4964 struct ipw_rx_queue *rxq;
4965 int i;
4967 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
4968 if (unlikely(!rxq)) {
4969 IPW_ERROR("memory allocation failed\n");
4970 return NULL;
4972 spin_lock_init(&rxq->lock);
4973 INIT_LIST_HEAD(&rxq->rx_free);
4974 INIT_LIST_HEAD(&rxq->rx_used);
4976 /* Fill the rx_used queue with _all_ of the Rx buffers */
4977 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4978 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4980 /* Set us so that we have processed and used all buffers, but have
4981 * not restocked the Rx queue with fresh buffers */
4982 rxq->read = rxq->write = 0;
4983 rxq->processed = RX_QUEUE_SIZE - 1;
4984 rxq->free_count = 0;
4986 return rxq;
4989 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
4991 rate &= ~IEEE80211_BASIC_RATE_MASK;
4992 if (ieee_mode == IEEE_A) {
4993 switch (rate) {
4994 case IEEE80211_OFDM_RATE_6MB:
4995 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
4996 1 : 0;
4997 case IEEE80211_OFDM_RATE_9MB:
4998 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
4999 1 : 0;
5000 case IEEE80211_OFDM_RATE_12MB:
5001 return priv->
5002 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5003 case IEEE80211_OFDM_RATE_18MB:
5004 return priv->
5005 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5006 case IEEE80211_OFDM_RATE_24MB:
5007 return priv->
5008 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5009 case IEEE80211_OFDM_RATE_36MB:
5010 return priv->
5011 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5012 case IEEE80211_OFDM_RATE_48MB:
5013 return priv->
5014 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5015 case IEEE80211_OFDM_RATE_54MB:
5016 return priv->
5017 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5018 default:
5019 return 0;
5023 /* B and G mixed */
5024 switch (rate) {
5025 case IEEE80211_CCK_RATE_1MB:
5026 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5027 case IEEE80211_CCK_RATE_2MB:
5028 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5029 case IEEE80211_CCK_RATE_5MB:
5030 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5031 case IEEE80211_CCK_RATE_11MB:
5032 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5035 /* If we are limited to B modulations, bail at this point */
5036 if (ieee_mode == IEEE_B)
5037 return 0;
5039 /* G */
5040 switch (rate) {
5041 case IEEE80211_OFDM_RATE_6MB:
5042 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5043 case IEEE80211_OFDM_RATE_9MB:
5044 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5045 case IEEE80211_OFDM_RATE_12MB:
5046 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5047 case IEEE80211_OFDM_RATE_18MB:
5048 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5049 case IEEE80211_OFDM_RATE_24MB:
5050 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5051 case IEEE80211_OFDM_RATE_36MB:
5052 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5053 case IEEE80211_OFDM_RATE_48MB:
5054 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5055 case IEEE80211_OFDM_RATE_54MB:
5056 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5059 return 0;
5062 static int ipw_compatible_rates(struct ipw_priv *priv,
5063 const struct ieee80211_network *network,
5064 struct ipw_supported_rates *rates)
5066 int num_rates, i;
5068 memset(rates, 0, sizeof(*rates));
5069 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5070 rates->num_rates = 0;
5071 for (i = 0; i < num_rates; i++) {
5072 if (!ipw_is_rate_in_mask(priv, network->mode,
5073 network->rates[i])) {
5075 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5076 IPW_DEBUG_SCAN("Adding masked mandatory "
5077 "rate %02X\n",
5078 network->rates[i]);
5079 rates->supported_rates[rates->num_rates++] =
5080 network->rates[i];
5081 continue;
5084 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5085 network->rates[i], priv->rates_mask);
5086 continue;
5089 rates->supported_rates[rates->num_rates++] = network->rates[i];
5092 num_rates = min(network->rates_ex_len,
5093 (u8) (IPW_MAX_RATES - num_rates));
5094 for (i = 0; i < num_rates; i++) {
5095 if (!ipw_is_rate_in_mask(priv, network->mode,
5096 network->rates_ex[i])) {
5097 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5098 IPW_DEBUG_SCAN("Adding masked mandatory "
5099 "rate %02X\n",
5100 network->rates_ex[i]);
5101 rates->supported_rates[rates->num_rates++] =
5102 network->rates[i];
5103 continue;
5106 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5107 network->rates_ex[i], priv->rates_mask);
5108 continue;
5111 rates->supported_rates[rates->num_rates++] =
5112 network->rates_ex[i];
5115 return 1;
5118 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5119 const struct ipw_supported_rates *src)
5121 u8 i;
5122 for (i = 0; i < src->num_rates; i++)
5123 dest->supported_rates[i] = src->supported_rates[i];
5124 dest->num_rates = src->num_rates;
5127 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5128 * mask should ever be used -- right now all callers to add the scan rates are
5129 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5130 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5131 u8 modulation, u32 rate_mask)
5133 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5134 IEEE80211_BASIC_RATE_MASK : 0;
5136 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5137 rates->supported_rates[rates->num_rates++] =
5138 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5140 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5141 rates->supported_rates[rates->num_rates++] =
5142 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5144 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5145 rates->supported_rates[rates->num_rates++] = basic_mask |
5146 IEEE80211_CCK_RATE_5MB;
5148 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5149 rates->supported_rates[rates->num_rates++] = basic_mask |
5150 IEEE80211_CCK_RATE_11MB;
5153 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5154 u8 modulation, u32 rate_mask)
5156 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5157 IEEE80211_BASIC_RATE_MASK : 0;
5159 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5160 rates->supported_rates[rates->num_rates++] = basic_mask |
5161 IEEE80211_OFDM_RATE_6MB;
5163 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5164 rates->supported_rates[rates->num_rates++] =
5165 IEEE80211_OFDM_RATE_9MB;
5167 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5168 rates->supported_rates[rates->num_rates++] = basic_mask |
5169 IEEE80211_OFDM_RATE_12MB;
5171 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5172 rates->supported_rates[rates->num_rates++] =
5173 IEEE80211_OFDM_RATE_18MB;
5175 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5176 rates->supported_rates[rates->num_rates++] = basic_mask |
5177 IEEE80211_OFDM_RATE_24MB;
5179 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5180 rates->supported_rates[rates->num_rates++] =
5181 IEEE80211_OFDM_RATE_36MB;
5183 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5184 rates->supported_rates[rates->num_rates++] =
5185 IEEE80211_OFDM_RATE_48MB;
5187 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5188 rates->supported_rates[rates->num_rates++] =
5189 IEEE80211_OFDM_RATE_54MB;
5192 struct ipw_network_match {
5193 struct ieee80211_network *network;
5194 struct ipw_supported_rates rates;
5197 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5198 struct ipw_network_match *match,
5199 struct ieee80211_network *network,
5200 int roaming)
5202 struct ipw_supported_rates rates;
5204 /* Verify that this network's capability is compatible with the
5205 * current mode (AdHoc or Infrastructure) */
5206 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5207 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5208 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5209 "capability mismatch.\n",
5210 escape_essid(network->ssid, network->ssid_len),
5211 MAC_ARG(network->bssid));
5212 return 0;
5215 /* If we do not have an ESSID for this AP, we can not associate with
5216 * it */
5217 if (network->flags & NETWORK_EMPTY_ESSID) {
5218 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5219 "because of hidden ESSID.\n",
5220 escape_essid(network->ssid, network->ssid_len),
5221 MAC_ARG(network->bssid));
5222 return 0;
5225 if (unlikely(roaming)) {
5226 /* If we are roaming, then ensure check if this is a valid
5227 * network to try and roam to */
5228 if ((network->ssid_len != match->network->ssid_len) ||
5229 memcmp(network->ssid, match->network->ssid,
5230 network->ssid_len)) {
5231 IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5232 "because of non-network ESSID.\n",
5233 escape_essid(network->ssid,
5234 network->ssid_len),
5235 MAC_ARG(network->bssid));
5236 return 0;
5238 } else {
5239 /* If an ESSID has been configured then compare the broadcast
5240 * ESSID to ours */
5241 if ((priv->config & CFG_STATIC_ESSID) &&
5242 ((network->ssid_len != priv->essid_len) ||
5243 memcmp(network->ssid, priv->essid,
5244 min(network->ssid_len, priv->essid_len)))) {
5245 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5247 strncpy(escaped,
5248 escape_essid(network->ssid, network->ssid_len),
5249 sizeof(escaped));
5250 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5251 "because of ESSID mismatch: '%s'.\n",
5252 escaped, MAC_ARG(network->bssid),
5253 escape_essid(priv->essid,
5254 priv->essid_len));
5255 return 0;
5259 /* If the old network rate is better than this one, don't bother
5260 * testing everything else. */
5262 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5263 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5264 "current network.\n",
5265 escape_essid(match->network->ssid,
5266 match->network->ssid_len));
5267 return 0;
5268 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5269 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5270 "current network.\n",
5271 escape_essid(match->network->ssid,
5272 match->network->ssid_len));
5273 return 0;
5276 /* Now go through and see if the requested network is valid... */
5277 if (priv->ieee->scan_age != 0 &&
5278 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5279 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5280 "because of age: %ums.\n",
5281 escape_essid(network->ssid, network->ssid_len),
5282 MAC_ARG(network->bssid),
5283 jiffies_to_msecs(jiffies -
5284 network->last_scanned));
5285 return 0;
5288 if ((priv->config & CFG_STATIC_CHANNEL) &&
5289 (network->channel != priv->channel)) {
5290 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5291 "because of channel mismatch: %d != %d.\n",
5292 escape_essid(network->ssid, network->ssid_len),
5293 MAC_ARG(network->bssid),
5294 network->channel, priv->channel);
5295 return 0;
5298 /* Verify privacy compatability */
5299 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5300 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5301 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5302 "because of privacy mismatch: %s != %s.\n",
5303 escape_essid(network->ssid, network->ssid_len),
5304 MAC_ARG(network->bssid),
5305 priv->
5306 capability & CAP_PRIVACY_ON ? "on" : "off",
5307 network->
5308 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5309 "off");
5310 return 0;
5313 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5314 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5315 "because of the same BSSID match: " MAC_FMT
5316 ".\n", escape_essid(network->ssid,
5317 network->ssid_len),
5318 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5319 return 0;
5322 /* Filter out any incompatible freq / mode combinations */
5323 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5324 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5325 "because of invalid frequency/mode "
5326 "combination.\n",
5327 escape_essid(network->ssid, network->ssid_len),
5328 MAC_ARG(network->bssid));
5329 return 0;
5332 /* Ensure that the rates supported by the driver are compatible with
5333 * this AP, including verification of basic rates (mandatory) */
5334 if (!ipw_compatible_rates(priv, network, &rates)) {
5335 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5336 "because configured rate mask excludes "
5337 "AP mandatory rate.\n",
5338 escape_essid(network->ssid, network->ssid_len),
5339 MAC_ARG(network->bssid));
5340 return 0;
5343 if (rates.num_rates == 0) {
5344 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5345 "because of no compatible rates.\n",
5346 escape_essid(network->ssid, network->ssid_len),
5347 MAC_ARG(network->bssid));
5348 return 0;
5351 /* TODO: Perform any further minimal comparititive tests. We do not
5352 * want to put too much policy logic here; intelligent scan selection
5353 * should occur within a generic IEEE 802.11 user space tool. */
5355 /* Set up 'new' AP to this network */
5356 ipw_copy_rates(&match->rates, &rates);
5357 match->network = network;
5358 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5359 escape_essid(network->ssid, network->ssid_len),
5360 MAC_ARG(network->bssid));
5362 return 1;
5365 static void ipw_merge_adhoc_network(void *data)
5367 struct ipw_priv *priv = data;
5368 struct ieee80211_network *network = NULL;
5369 struct ipw_network_match match = {
5370 .network = priv->assoc_network
5373 if ((priv->status & STATUS_ASSOCIATED) &&
5374 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5375 /* First pass through ROAM process -- look for a better
5376 * network */
5377 unsigned long flags;
5379 spin_lock_irqsave(&priv->ieee->lock, flags);
5380 list_for_each_entry(network, &priv->ieee->network_list, list) {
5381 if (network != priv->assoc_network)
5382 ipw_find_adhoc_network(priv, &match, network,
5385 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5387 if (match.network == priv->assoc_network) {
5388 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5389 "merge to.\n");
5390 return;
5393 mutex_lock(&priv->mutex);
5394 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5395 IPW_DEBUG_MERGE("remove network %s\n",
5396 escape_essid(priv->essid,
5397 priv->essid_len));
5398 ipw_remove_current_network(priv);
5401 ipw_disassociate(priv);
5402 priv->assoc_network = match.network;
5403 mutex_unlock(&priv->mutex);
5404 return;
5408 static int ipw_best_network(struct ipw_priv *priv,
5409 struct ipw_network_match *match,
5410 struct ieee80211_network *network, int roaming)
5412 struct ipw_supported_rates rates;
5414 /* Verify that this network's capability is compatible with the
5415 * current mode (AdHoc or Infrastructure) */
5416 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5417 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5418 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5419 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5420 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5421 "capability mismatch.\n",
5422 escape_essid(network->ssid, network->ssid_len),
5423 MAC_ARG(network->bssid));
5424 return 0;
5427 /* If we do not have an ESSID for this AP, we can not associate with
5428 * it */
5429 if (network->flags & NETWORK_EMPTY_ESSID) {
5430 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5431 "because of hidden ESSID.\n",
5432 escape_essid(network->ssid, network->ssid_len),
5433 MAC_ARG(network->bssid));
5434 return 0;
5437 if (unlikely(roaming)) {
5438 /* If we are roaming, then ensure check if this is a valid
5439 * network to try and roam to */
5440 if ((network->ssid_len != match->network->ssid_len) ||
5441 memcmp(network->ssid, match->network->ssid,
5442 network->ssid_len)) {
5443 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5444 "because of non-network ESSID.\n",
5445 escape_essid(network->ssid,
5446 network->ssid_len),
5447 MAC_ARG(network->bssid));
5448 return 0;
5450 } else {
5451 /* If an ESSID has been configured then compare the broadcast
5452 * ESSID to ours */
5453 if ((priv->config & CFG_STATIC_ESSID) &&
5454 ((network->ssid_len != priv->essid_len) ||
5455 memcmp(network->ssid, priv->essid,
5456 min(network->ssid_len, priv->essid_len)))) {
5457 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5458 strncpy(escaped,
5459 escape_essid(network->ssid, network->ssid_len),
5460 sizeof(escaped));
5461 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5462 "because of ESSID mismatch: '%s'.\n",
5463 escaped, MAC_ARG(network->bssid),
5464 escape_essid(priv->essid,
5465 priv->essid_len));
5466 return 0;
5470 /* If the old network rate is better than this one, don't bother
5471 * testing everything else. */
5472 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5473 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5474 strncpy(escaped,
5475 escape_essid(network->ssid, network->ssid_len),
5476 sizeof(escaped));
5477 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5478 "'%s (" MAC_FMT ")' has a stronger signal.\n",
5479 escaped, MAC_ARG(network->bssid),
5480 escape_essid(match->network->ssid,
5481 match->network->ssid_len),
5482 MAC_ARG(match->network->bssid));
5483 return 0;
5486 /* If this network has already had an association attempt within the
5487 * last 3 seconds, do not try and associate again... */
5488 if (network->last_associate &&
5489 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5490 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5491 "because of storming (%ums since last "
5492 "assoc attempt).\n",
5493 escape_essid(network->ssid, network->ssid_len),
5494 MAC_ARG(network->bssid),
5495 jiffies_to_msecs(jiffies -
5496 network->last_associate));
5497 return 0;
5500 /* Now go through and see if the requested network is valid... */
5501 if (priv->ieee->scan_age != 0 &&
5502 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5503 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5504 "because of age: %ums.\n",
5505 escape_essid(network->ssid, network->ssid_len),
5506 MAC_ARG(network->bssid),
5507 jiffies_to_msecs(jiffies -
5508 network->last_scanned));
5509 return 0;
5512 if ((priv->config & CFG_STATIC_CHANNEL) &&
5513 (network->channel != priv->channel)) {
5514 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5515 "because of channel mismatch: %d != %d.\n",
5516 escape_essid(network->ssid, network->ssid_len),
5517 MAC_ARG(network->bssid),
5518 network->channel, priv->channel);
5519 return 0;
5522 /* Verify privacy compatability */
5523 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5524 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5525 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5526 "because of privacy mismatch: %s != %s.\n",
5527 escape_essid(network->ssid, network->ssid_len),
5528 MAC_ARG(network->bssid),
5529 priv->capability & CAP_PRIVACY_ON ? "on" :
5530 "off",
5531 network->capability &
5532 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5533 return 0;
5536 if (!priv->ieee->wpa_enabled && (network->wpa_ie_len > 0 ||
5537 network->rsn_ie_len > 0)) {
5538 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5539 "because of WPA capability mismatch.\n",
5540 escape_essid(network->ssid, network->ssid_len),
5541 MAC_ARG(network->bssid));
5542 return 0;
5545 if ((priv->config & CFG_STATIC_BSSID) &&
5546 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5547 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5548 "because of BSSID mismatch: " MAC_FMT ".\n",
5549 escape_essid(network->ssid, network->ssid_len),
5550 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5551 return 0;
5554 /* Filter out any incompatible freq / mode combinations */
5555 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5556 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5557 "because of invalid frequency/mode "
5558 "combination.\n",
5559 escape_essid(network->ssid, network->ssid_len),
5560 MAC_ARG(network->bssid));
5561 return 0;
5564 /* Filter out invalid channel in current GEO */
5565 if (!ipw_is_valid_channel(priv->ieee, network->channel)) {
5566 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5567 "because of invalid channel in current GEO\n",
5568 escape_essid(network->ssid, network->ssid_len),
5569 MAC_ARG(network->bssid));
5570 return 0;
5573 /* Ensure that the rates supported by the driver are compatible with
5574 * this AP, including verification of basic rates (mandatory) */
5575 if (!ipw_compatible_rates(priv, network, &rates)) {
5576 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5577 "because configured rate mask excludes "
5578 "AP mandatory rate.\n",
5579 escape_essid(network->ssid, network->ssid_len),
5580 MAC_ARG(network->bssid));
5581 return 0;
5584 if (rates.num_rates == 0) {
5585 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5586 "because of no compatible rates.\n",
5587 escape_essid(network->ssid, network->ssid_len),
5588 MAC_ARG(network->bssid));
5589 return 0;
5592 /* TODO: Perform any further minimal comparititive tests. We do not
5593 * want to put too much policy logic here; intelligent scan selection
5594 * should occur within a generic IEEE 802.11 user space tool. */
5596 /* Set up 'new' AP to this network */
5597 ipw_copy_rates(&match->rates, &rates);
5598 match->network = network;
5600 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5601 escape_essid(network->ssid, network->ssid_len),
5602 MAC_ARG(network->bssid));
5604 return 1;
5607 static void ipw_adhoc_create(struct ipw_priv *priv,
5608 struct ieee80211_network *network)
5610 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
5611 int i;
5614 * For the purposes of scanning, we can set our wireless mode
5615 * to trigger scans across combinations of bands, but when it
5616 * comes to creating a new ad-hoc network, we have tell the FW
5617 * exactly which band to use.
5619 * We also have the possibility of an invalid channel for the
5620 * chossen band. Attempting to create a new ad-hoc network
5621 * with an invalid channel for wireless mode will trigger a
5622 * FW fatal error.
5625 switch (ipw_is_valid_channel(priv->ieee, priv->channel)) {
5626 case IEEE80211_52GHZ_BAND:
5627 network->mode = IEEE_A;
5628 i = ipw_channel_to_index(priv->ieee, priv->channel);
5629 if (i == -1)
5630 BUG();
5631 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5632 IPW_WARNING("Overriding invalid channel\n");
5633 priv->channel = geo->a[0].channel;
5635 break;
5637 case IEEE80211_24GHZ_BAND:
5638 if (priv->ieee->mode & IEEE_G)
5639 network->mode = IEEE_G;
5640 else
5641 network->mode = IEEE_B;
5642 i = ipw_channel_to_index(priv->ieee, priv->channel);
5643 if (i == -1)
5644 BUG();
5645 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5646 IPW_WARNING("Overriding invalid channel\n");
5647 priv->channel = geo->bg[0].channel;
5649 break;
5651 default:
5652 IPW_WARNING("Overriding invalid channel\n");
5653 if (priv->ieee->mode & IEEE_A) {
5654 network->mode = IEEE_A;
5655 priv->channel = geo->a[0].channel;
5656 } else if (priv->ieee->mode & IEEE_G) {
5657 network->mode = IEEE_G;
5658 priv->channel = geo->bg[0].channel;
5659 } else {
5660 network->mode = IEEE_B;
5661 priv->channel = geo->bg[0].channel;
5663 break;
5666 network->channel = priv->channel;
5667 priv->config |= CFG_ADHOC_PERSIST;
5668 ipw_create_bssid(priv, network->bssid);
5669 network->ssid_len = priv->essid_len;
5670 memcpy(network->ssid, priv->essid, priv->essid_len);
5671 memset(&network->stats, 0, sizeof(network->stats));
5672 network->capability = WLAN_CAPABILITY_IBSS;
5673 if (!(priv->config & CFG_PREAMBLE_LONG))
5674 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5675 if (priv->capability & CAP_PRIVACY_ON)
5676 network->capability |= WLAN_CAPABILITY_PRIVACY;
5677 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5678 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5679 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5680 memcpy(network->rates_ex,
5681 &priv->rates.supported_rates[network->rates_len],
5682 network->rates_ex_len);
5683 network->last_scanned = 0;
5684 network->flags = 0;
5685 network->last_associate = 0;
5686 network->time_stamp[0] = 0;
5687 network->time_stamp[1] = 0;
5688 network->beacon_interval = 100; /* Default */
5689 network->listen_interval = 10; /* Default */
5690 network->atim_window = 0; /* Default */
5691 network->wpa_ie_len = 0;
5692 network->rsn_ie_len = 0;
5695 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5697 struct ipw_tgi_tx_key key;
5699 if (!(priv->ieee->sec.flags & (1 << index)))
5700 return;
5702 key.key_id = index;
5703 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5704 key.security_type = type;
5705 key.station_index = 0; /* always 0 for BSS */
5706 key.flags = 0;
5707 /* 0 for new key; previous value of counter (after fatal error) */
5708 key.tx_counter[0] = 0;
5709 key.tx_counter[1] = 0;
5711 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5714 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5716 struct ipw_wep_key key;
5717 int i;
5719 key.cmd_id = DINO_CMD_WEP_KEY;
5720 key.seq_num = 0;
5722 /* Note: AES keys cannot be set for multiple times.
5723 * Only set it at the first time. */
5724 for (i = 0; i < 4; i++) {
5725 key.key_index = i | type;
5726 if (!(priv->ieee->sec.flags & (1 << i))) {
5727 key.key_size = 0;
5728 continue;
5731 key.key_size = priv->ieee->sec.key_sizes[i];
5732 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5734 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5738 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5740 if (priv->ieee->host_encrypt)
5741 return;
5743 switch (level) {
5744 case SEC_LEVEL_3:
5745 priv->sys_config.disable_unicast_decryption = 0;
5746 priv->ieee->host_decrypt = 0;
5747 break;
5748 case SEC_LEVEL_2:
5749 priv->sys_config.disable_unicast_decryption = 1;
5750 priv->ieee->host_decrypt = 1;
5751 break;
5752 case SEC_LEVEL_1:
5753 priv->sys_config.disable_unicast_decryption = 0;
5754 priv->ieee->host_decrypt = 0;
5755 break;
5756 case SEC_LEVEL_0:
5757 priv->sys_config.disable_unicast_decryption = 1;
5758 break;
5759 default:
5760 break;
5764 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5766 if (priv->ieee->host_encrypt)
5767 return;
5769 switch (level) {
5770 case SEC_LEVEL_3:
5771 priv->sys_config.disable_multicast_decryption = 0;
5772 break;
5773 case SEC_LEVEL_2:
5774 priv->sys_config.disable_multicast_decryption = 1;
5775 break;
5776 case SEC_LEVEL_1:
5777 priv->sys_config.disable_multicast_decryption = 0;
5778 break;
5779 case SEC_LEVEL_0:
5780 priv->sys_config.disable_multicast_decryption = 1;
5781 break;
5782 default:
5783 break;
5787 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5789 switch (priv->ieee->sec.level) {
5790 case SEC_LEVEL_3:
5791 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5792 ipw_send_tgi_tx_key(priv,
5793 DCT_FLAG_EXT_SECURITY_CCM,
5794 priv->ieee->sec.active_key);
5796 if (!priv->ieee->host_mc_decrypt)
5797 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5798 break;
5799 case SEC_LEVEL_2:
5800 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5801 ipw_send_tgi_tx_key(priv,
5802 DCT_FLAG_EXT_SECURITY_TKIP,
5803 priv->ieee->sec.active_key);
5804 break;
5805 case SEC_LEVEL_1:
5806 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5807 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5808 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5809 break;
5810 case SEC_LEVEL_0:
5811 default:
5812 break;
5816 static void ipw_adhoc_check(void *data)
5818 struct ipw_priv *priv = data;
5820 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
5821 !(priv->config & CFG_ADHOC_PERSIST)) {
5822 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
5823 IPW_DL_STATE | IPW_DL_ASSOC,
5824 "Missed beacon: %d - disassociate\n",
5825 priv->missed_adhoc_beacons);
5826 ipw_remove_current_network(priv);
5827 ipw_disassociate(priv);
5828 return;
5831 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
5832 priv->assoc_request.beacon_interval);
5835 static void ipw_bg_adhoc_check(void *data)
5837 struct ipw_priv *priv = data;
5838 mutex_lock(&priv->mutex);
5839 ipw_adhoc_check(data);
5840 mutex_unlock(&priv->mutex);
5843 #ifdef CONFIG_IPW2200_DEBUG
5844 static void ipw_debug_config(struct ipw_priv *priv)
5846 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
5847 "[CFG 0x%08X]\n", priv->config);
5848 if (priv->config & CFG_STATIC_CHANNEL)
5849 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
5850 else
5851 IPW_DEBUG_INFO("Channel unlocked.\n");
5852 if (priv->config & CFG_STATIC_ESSID)
5853 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
5854 escape_essid(priv->essid, priv->essid_len));
5855 else
5856 IPW_DEBUG_INFO("ESSID unlocked.\n");
5857 if (priv->config & CFG_STATIC_BSSID)
5858 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
5859 MAC_ARG(priv->bssid));
5860 else
5861 IPW_DEBUG_INFO("BSSID unlocked.\n");
5862 if (priv->capability & CAP_PRIVACY_ON)
5863 IPW_DEBUG_INFO("PRIVACY on\n");
5864 else
5865 IPW_DEBUG_INFO("PRIVACY off\n");
5866 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
5868 #else
5869 #define ipw_debug_config(x) do {} while (0)
5870 #endif
5872 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
5874 /* TODO: Verify that this works... */
5875 struct ipw_fixed_rate fr = {
5876 .tx_rates = priv->rates_mask
5878 u32 reg;
5879 u16 mask = 0;
5881 /* Identify 'current FW band' and match it with the fixed
5882 * Tx rates */
5884 switch (priv->ieee->freq_band) {
5885 case IEEE80211_52GHZ_BAND: /* A only */
5886 /* IEEE_A */
5887 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
5888 /* Invalid fixed rate mask */
5889 IPW_DEBUG_WX
5890 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5891 fr.tx_rates = 0;
5892 break;
5895 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
5896 break;
5898 default: /* 2.4Ghz or Mixed */
5899 /* IEEE_B */
5900 if (mode == IEEE_B) {
5901 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
5902 /* Invalid fixed rate mask */
5903 IPW_DEBUG_WX
5904 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5905 fr.tx_rates = 0;
5907 break;
5910 /* IEEE_G */
5911 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
5912 IEEE80211_OFDM_RATES_MASK)) {
5913 /* Invalid fixed rate mask */
5914 IPW_DEBUG_WX
5915 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5916 fr.tx_rates = 0;
5917 break;
5920 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
5921 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
5922 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
5925 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
5926 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
5927 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
5930 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
5931 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
5932 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
5935 fr.tx_rates |= mask;
5936 break;
5939 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
5940 ipw_write_reg32(priv, reg, *(u32 *) & fr);
5943 static void ipw_abort_scan(struct ipw_priv *priv)
5945 int err;
5947 if (priv->status & STATUS_SCAN_ABORTING) {
5948 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
5949 return;
5951 priv->status |= STATUS_SCAN_ABORTING;
5953 err = ipw_send_scan_abort(priv);
5954 if (err)
5955 IPW_DEBUG_HC("Request to abort scan failed.\n");
5958 static void ipw_add_scan_channels(struct ipw_priv *priv,
5959 struct ipw_scan_request_ext *scan,
5960 int scan_type)
5962 int channel_index = 0;
5963 const struct ieee80211_geo *geo;
5964 int i;
5966 geo = ipw_get_geo(priv->ieee);
5968 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
5969 int start = channel_index;
5970 for (i = 0; i < geo->a_channels; i++) {
5971 if ((priv->status & STATUS_ASSOCIATED) &&
5972 geo->a[i].channel == priv->channel)
5973 continue;
5974 channel_index++;
5975 scan->channels_list[channel_index] = geo->a[i].channel;
5976 ipw_set_scan_type(scan, channel_index,
5977 geo->a[i].
5978 flags & IEEE80211_CH_PASSIVE_ONLY ?
5979 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
5980 scan_type);
5983 if (start != channel_index) {
5984 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
5985 (channel_index - start);
5986 channel_index++;
5990 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
5991 int start = channel_index;
5992 if (priv->config & CFG_SPEED_SCAN) {
5993 int index;
5994 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
5995 /* nop out the list */
5996 [0] = 0
5999 u8 channel;
6000 while (channel_index < IPW_SCAN_CHANNELS) {
6001 channel =
6002 priv->speed_scan[priv->speed_scan_pos];
6003 if (channel == 0) {
6004 priv->speed_scan_pos = 0;
6005 channel = priv->speed_scan[0];
6007 if ((priv->status & STATUS_ASSOCIATED) &&
6008 channel == priv->channel) {
6009 priv->speed_scan_pos++;
6010 continue;
6013 /* If this channel has already been
6014 * added in scan, break from loop
6015 * and this will be the first channel
6016 * in the next scan.
6018 if (channels[channel - 1] != 0)
6019 break;
6021 channels[channel - 1] = 1;
6022 priv->speed_scan_pos++;
6023 channel_index++;
6024 scan->channels_list[channel_index] = channel;
6025 index =
6026 ipw_channel_to_index(priv->ieee, channel);
6027 ipw_set_scan_type(scan, channel_index,
6028 geo->bg[index].
6029 flags &
6030 IEEE80211_CH_PASSIVE_ONLY ?
6031 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6032 : scan_type);
6034 } else {
6035 for (i = 0; i < geo->bg_channels; i++) {
6036 if ((priv->status & STATUS_ASSOCIATED) &&
6037 geo->bg[i].channel == priv->channel)
6038 continue;
6039 channel_index++;
6040 scan->channels_list[channel_index] =
6041 geo->bg[i].channel;
6042 ipw_set_scan_type(scan, channel_index,
6043 geo->bg[i].
6044 flags &
6045 IEEE80211_CH_PASSIVE_ONLY ?
6046 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6047 : scan_type);
6051 if (start != channel_index) {
6052 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6053 (channel_index - start);
6058 static int ipw_request_scan(struct ipw_priv *priv)
6060 struct ipw_scan_request_ext scan;
6061 int err = 0, scan_type;
6063 if (!(priv->status & STATUS_INIT) ||
6064 (priv->status & STATUS_EXIT_PENDING))
6065 return 0;
6067 mutex_lock(&priv->mutex);
6069 if (priv->status & STATUS_SCANNING) {
6070 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6071 priv->status |= STATUS_SCAN_PENDING;
6072 goto done;
6075 if (!(priv->status & STATUS_SCAN_FORCED) &&
6076 priv->status & STATUS_SCAN_ABORTING) {
6077 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6078 priv->status |= STATUS_SCAN_PENDING;
6079 goto done;
6082 if (priv->status & STATUS_RF_KILL_MASK) {
6083 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6084 priv->status |= STATUS_SCAN_PENDING;
6085 goto done;
6088 memset(&scan, 0, sizeof(scan));
6090 if (priv->config & CFG_SPEED_SCAN)
6091 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6092 cpu_to_le16(30);
6093 else
6094 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6095 cpu_to_le16(20);
6097 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6098 cpu_to_le16(20);
6099 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6101 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6103 #ifdef CONFIG_IPW2200_MONITOR
6104 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6105 u8 channel;
6106 u8 band = 0;
6108 switch (ipw_is_valid_channel(priv->ieee, priv->channel)) {
6109 case IEEE80211_52GHZ_BAND:
6110 band = (u8) (IPW_A_MODE << 6) | 1;
6111 channel = priv->channel;
6112 break;
6114 case IEEE80211_24GHZ_BAND:
6115 band = (u8) (IPW_B_MODE << 6) | 1;
6116 channel = priv->channel;
6117 break;
6119 default:
6120 band = (u8) (IPW_B_MODE << 6) | 1;
6121 channel = 9;
6122 break;
6125 scan.channels_list[0] = band;
6126 scan.channels_list[1] = channel;
6127 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6129 /* NOTE: The card will sit on this channel for this time
6130 * period. Scan aborts are timing sensitive and frequently
6131 * result in firmware restarts. As such, it is best to
6132 * set a small dwell_time here and just keep re-issuing
6133 * scans. Otherwise fast channel hopping will not actually
6134 * hop channels.
6136 * TODO: Move SPEED SCAN support to all modes and bands */
6137 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6138 cpu_to_le16(2000);
6139 } else {
6140 #endif /* CONFIG_IPW2200_MONITOR */
6141 /* If we are roaming, then make this a directed scan for the
6142 * current network. Otherwise, ensure that every other scan
6143 * is a fast channel hop scan */
6144 if ((priv->status & STATUS_ROAMING)
6145 || (!(priv->status & STATUS_ASSOCIATED)
6146 && (priv->config & CFG_STATIC_ESSID)
6147 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6148 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6149 if (err) {
6150 IPW_DEBUG_HC("Attempt to send SSID command "
6151 "failed.\n");
6152 goto done;
6155 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6156 } else
6157 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6159 ipw_add_scan_channels(priv, &scan, scan_type);
6160 #ifdef CONFIG_IPW2200_MONITOR
6162 #endif
6164 err = ipw_send_scan_request_ext(priv, &scan);
6165 if (err) {
6166 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6167 goto done;
6170 priv->status |= STATUS_SCANNING;
6171 priv->status &= ~STATUS_SCAN_PENDING;
6172 queue_delayed_work(priv->workqueue, &priv->scan_check,
6173 IPW_SCAN_CHECK_WATCHDOG);
6174 done:
6175 mutex_unlock(&priv->mutex);
6176 return err;
6179 static void ipw_bg_abort_scan(void *data)
6181 struct ipw_priv *priv = data;
6182 mutex_lock(&priv->mutex);
6183 ipw_abort_scan(data);
6184 mutex_unlock(&priv->mutex);
6187 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6189 /* This is called when wpa_supplicant loads and closes the driver
6190 * interface. */
6191 priv->ieee->wpa_enabled = value;
6192 return 0;
6195 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6197 struct ieee80211_device *ieee = priv->ieee;
6198 struct ieee80211_security sec = {
6199 .flags = SEC_AUTH_MODE,
6201 int ret = 0;
6203 if (value & IW_AUTH_ALG_SHARED_KEY) {
6204 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6205 ieee->open_wep = 0;
6206 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6207 sec.auth_mode = WLAN_AUTH_OPEN;
6208 ieee->open_wep = 1;
6209 } else if (value & IW_AUTH_ALG_LEAP) {
6210 sec.auth_mode = WLAN_AUTH_LEAP;
6211 ieee->open_wep = 1;
6212 } else
6213 return -EINVAL;
6215 if (ieee->set_security)
6216 ieee->set_security(ieee->dev, &sec);
6217 else
6218 ret = -EOPNOTSUPP;
6220 return ret;
6223 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6224 int wpa_ie_len)
6226 /* make sure WPA is enabled */
6227 ipw_wpa_enable(priv, 1);
6229 ipw_disassociate(priv);
6232 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6233 char *capabilities, int length)
6235 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6237 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6238 capabilities);
6242 * WE-18 support
6245 /* SIOCSIWGENIE */
6246 static int ipw_wx_set_genie(struct net_device *dev,
6247 struct iw_request_info *info,
6248 union iwreq_data *wrqu, char *extra)
6250 struct ipw_priv *priv = ieee80211_priv(dev);
6251 struct ieee80211_device *ieee = priv->ieee;
6252 u8 *buf;
6253 int err = 0;
6255 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6256 (wrqu->data.length && extra == NULL))
6257 return -EINVAL;
6259 //mutex_lock(&priv->mutex);
6261 //if (!ieee->wpa_enabled) {
6262 // err = -EOPNOTSUPP;
6263 // goto out;
6266 if (wrqu->data.length) {
6267 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6268 if (buf == NULL) {
6269 err = -ENOMEM;
6270 goto out;
6273 memcpy(buf, extra, wrqu->data.length);
6274 kfree(ieee->wpa_ie);
6275 ieee->wpa_ie = buf;
6276 ieee->wpa_ie_len = wrqu->data.length;
6277 } else {
6278 kfree(ieee->wpa_ie);
6279 ieee->wpa_ie = NULL;
6280 ieee->wpa_ie_len = 0;
6283 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6284 out:
6285 //mutex_unlock(&priv->mutex);
6286 return err;
6289 /* SIOCGIWGENIE */
6290 static int ipw_wx_get_genie(struct net_device *dev,
6291 struct iw_request_info *info,
6292 union iwreq_data *wrqu, char *extra)
6294 struct ipw_priv *priv = ieee80211_priv(dev);
6295 struct ieee80211_device *ieee = priv->ieee;
6296 int err = 0;
6298 //mutex_lock(&priv->mutex);
6300 //if (!ieee->wpa_enabled) {
6301 // err = -EOPNOTSUPP;
6302 // goto out;
6305 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6306 wrqu->data.length = 0;
6307 goto out;
6310 if (wrqu->data.length < ieee->wpa_ie_len) {
6311 err = -E2BIG;
6312 goto out;
6315 wrqu->data.length = ieee->wpa_ie_len;
6316 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6318 out:
6319 //mutex_unlock(&priv->mutex);
6320 return err;
6323 static int wext_cipher2level(int cipher)
6325 switch (cipher) {
6326 case IW_AUTH_CIPHER_NONE:
6327 return SEC_LEVEL_0;
6328 case IW_AUTH_CIPHER_WEP40:
6329 case IW_AUTH_CIPHER_WEP104:
6330 return SEC_LEVEL_1;
6331 case IW_AUTH_CIPHER_TKIP:
6332 return SEC_LEVEL_2;
6333 case IW_AUTH_CIPHER_CCMP:
6334 return SEC_LEVEL_3;
6335 default:
6336 return -1;
6340 /* SIOCSIWAUTH */
6341 static int ipw_wx_set_auth(struct net_device *dev,
6342 struct iw_request_info *info,
6343 union iwreq_data *wrqu, char *extra)
6345 struct ipw_priv *priv = ieee80211_priv(dev);
6346 struct ieee80211_device *ieee = priv->ieee;
6347 struct iw_param *param = &wrqu->param;
6348 struct ieee80211_crypt_data *crypt;
6349 unsigned long flags;
6350 int ret = 0;
6352 switch (param->flags & IW_AUTH_INDEX) {
6353 case IW_AUTH_WPA_VERSION:
6354 break;
6355 case IW_AUTH_CIPHER_PAIRWISE:
6356 ipw_set_hw_decrypt_unicast(priv,
6357 wext_cipher2level(param->value));
6358 break;
6359 case IW_AUTH_CIPHER_GROUP:
6360 ipw_set_hw_decrypt_multicast(priv,
6361 wext_cipher2level(param->value));
6362 break;
6363 case IW_AUTH_KEY_MGMT:
6365 * ipw2200 does not use these parameters
6367 break;
6369 case IW_AUTH_TKIP_COUNTERMEASURES:
6370 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6371 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6372 break;
6374 flags = crypt->ops->get_flags(crypt->priv);
6376 if (param->value)
6377 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6378 else
6379 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6381 crypt->ops->set_flags(flags, crypt->priv);
6383 break;
6385 case IW_AUTH_DROP_UNENCRYPTED:{
6386 /* HACK:
6388 * wpa_supplicant calls set_wpa_enabled when the driver
6389 * is loaded and unloaded, regardless of if WPA is being
6390 * used. No other calls are made which can be used to
6391 * determine if encryption will be used or not prior to
6392 * association being expected. If encryption is not being
6393 * used, drop_unencrypted is set to false, else true -- we
6394 * can use this to determine if the CAP_PRIVACY_ON bit should
6395 * be set.
6397 struct ieee80211_security sec = {
6398 .flags = SEC_ENABLED,
6399 .enabled = param->value,
6401 priv->ieee->drop_unencrypted = param->value;
6402 /* We only change SEC_LEVEL for open mode. Others
6403 * are set by ipw_wpa_set_encryption.
6405 if (!param->value) {
6406 sec.flags |= SEC_LEVEL;
6407 sec.level = SEC_LEVEL_0;
6408 } else {
6409 sec.flags |= SEC_LEVEL;
6410 sec.level = SEC_LEVEL_1;
6412 if (priv->ieee->set_security)
6413 priv->ieee->set_security(priv->ieee->dev, &sec);
6414 break;
6417 case IW_AUTH_80211_AUTH_ALG:
6418 ret = ipw_wpa_set_auth_algs(priv, param->value);
6419 break;
6421 case IW_AUTH_WPA_ENABLED:
6422 ret = ipw_wpa_enable(priv, param->value);
6423 break;
6425 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6426 ieee->ieee802_1x = param->value;
6427 break;
6429 //case IW_AUTH_ROAMING_CONTROL:
6430 case IW_AUTH_PRIVACY_INVOKED:
6431 ieee->privacy_invoked = param->value;
6432 break;
6434 default:
6435 return -EOPNOTSUPP;
6437 return ret;
6440 /* SIOCGIWAUTH */
6441 static int ipw_wx_get_auth(struct net_device *dev,
6442 struct iw_request_info *info,
6443 union iwreq_data *wrqu, char *extra)
6445 struct ipw_priv *priv = ieee80211_priv(dev);
6446 struct ieee80211_device *ieee = priv->ieee;
6447 struct ieee80211_crypt_data *crypt;
6448 struct iw_param *param = &wrqu->param;
6449 int ret = 0;
6451 switch (param->flags & IW_AUTH_INDEX) {
6452 case IW_AUTH_WPA_VERSION:
6453 case IW_AUTH_CIPHER_PAIRWISE:
6454 case IW_AUTH_CIPHER_GROUP:
6455 case IW_AUTH_KEY_MGMT:
6457 * wpa_supplicant will control these internally
6459 ret = -EOPNOTSUPP;
6460 break;
6462 case IW_AUTH_TKIP_COUNTERMEASURES:
6463 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6464 if (!crypt || !crypt->ops->get_flags)
6465 break;
6467 param->value = (crypt->ops->get_flags(crypt->priv) &
6468 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6470 break;
6472 case IW_AUTH_DROP_UNENCRYPTED:
6473 param->value = ieee->drop_unencrypted;
6474 break;
6476 case IW_AUTH_80211_AUTH_ALG:
6477 param->value = ieee->sec.auth_mode;
6478 break;
6480 case IW_AUTH_WPA_ENABLED:
6481 param->value = ieee->wpa_enabled;
6482 break;
6484 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6485 param->value = ieee->ieee802_1x;
6486 break;
6488 case IW_AUTH_ROAMING_CONTROL:
6489 case IW_AUTH_PRIVACY_INVOKED:
6490 param->value = ieee->privacy_invoked;
6491 break;
6493 default:
6494 return -EOPNOTSUPP;
6496 return 0;
6499 /* SIOCSIWENCODEEXT */
6500 static int ipw_wx_set_encodeext(struct net_device *dev,
6501 struct iw_request_info *info,
6502 union iwreq_data *wrqu, char *extra)
6504 struct ipw_priv *priv = ieee80211_priv(dev);
6505 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6507 if (hwcrypto) {
6508 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6509 /* IPW HW can't build TKIP MIC,
6510 host decryption still needed */
6511 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6512 priv->ieee->host_mc_decrypt = 1;
6513 else {
6514 priv->ieee->host_encrypt = 0;
6515 priv->ieee->host_encrypt_msdu = 1;
6516 priv->ieee->host_decrypt = 1;
6518 } else {
6519 priv->ieee->host_encrypt = 0;
6520 priv->ieee->host_encrypt_msdu = 0;
6521 priv->ieee->host_decrypt = 0;
6522 priv->ieee->host_mc_decrypt = 0;
6526 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6529 /* SIOCGIWENCODEEXT */
6530 static int ipw_wx_get_encodeext(struct net_device *dev,
6531 struct iw_request_info *info,
6532 union iwreq_data *wrqu, char *extra)
6534 struct ipw_priv *priv = ieee80211_priv(dev);
6535 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6538 /* SIOCSIWMLME */
6539 static int ipw_wx_set_mlme(struct net_device *dev,
6540 struct iw_request_info *info,
6541 union iwreq_data *wrqu, char *extra)
6543 struct ipw_priv *priv = ieee80211_priv(dev);
6544 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6545 u16 reason;
6547 reason = cpu_to_le16(mlme->reason_code);
6549 switch (mlme->cmd) {
6550 case IW_MLME_DEAUTH:
6551 // silently ignore
6552 break;
6554 case IW_MLME_DISASSOC:
6555 ipw_disassociate(priv);
6556 break;
6558 default:
6559 return -EOPNOTSUPP;
6561 return 0;
6564 #ifdef CONFIG_IPW_QOS
6566 /* QoS */
6568 * get the modulation type of the current network or
6569 * the card current mode
6571 u8 ipw_qos_current_mode(struct ipw_priv * priv)
6573 u8 mode = 0;
6575 if (priv->status & STATUS_ASSOCIATED) {
6576 unsigned long flags;
6578 spin_lock_irqsave(&priv->ieee->lock, flags);
6579 mode = priv->assoc_network->mode;
6580 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6581 } else {
6582 mode = priv->ieee->mode;
6584 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6585 return mode;
6589 * Handle management frame beacon and probe response
6591 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6592 int active_network,
6593 struct ieee80211_network *network)
6595 u32 size = sizeof(struct ieee80211_qos_parameters);
6597 if (network->capability & WLAN_CAPABILITY_IBSS)
6598 network->qos_data.active = network->qos_data.supported;
6600 if (network->flags & NETWORK_HAS_QOS_MASK) {
6601 if (active_network &&
6602 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6603 network->qos_data.active = network->qos_data.supported;
6605 if ((network->qos_data.active == 1) && (active_network == 1) &&
6606 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6607 (network->qos_data.old_param_count !=
6608 network->qos_data.param_count)) {
6609 network->qos_data.old_param_count =
6610 network->qos_data.param_count;
6611 schedule_work(&priv->qos_activate);
6612 IPW_DEBUG_QOS("QoS parameters change call "
6613 "qos_activate\n");
6615 } else {
6616 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6617 memcpy(&network->qos_data.parameters,
6618 &def_parameters_CCK, size);
6619 else
6620 memcpy(&network->qos_data.parameters,
6621 &def_parameters_OFDM, size);
6623 if ((network->qos_data.active == 1) && (active_network == 1)) {
6624 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6625 schedule_work(&priv->qos_activate);
6628 network->qos_data.active = 0;
6629 network->qos_data.supported = 0;
6631 if ((priv->status & STATUS_ASSOCIATED) &&
6632 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6633 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6634 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6635 !(network->flags & NETWORK_EMPTY_ESSID))
6636 if ((network->ssid_len ==
6637 priv->assoc_network->ssid_len) &&
6638 !memcmp(network->ssid,
6639 priv->assoc_network->ssid,
6640 network->ssid_len)) {
6641 queue_work(priv->workqueue,
6642 &priv->merge_networks);
6646 return 0;
6650 * This function set up the firmware to support QoS. It sends
6651 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6653 static int ipw_qos_activate(struct ipw_priv *priv,
6654 struct ieee80211_qos_data *qos_network_data)
6656 int err;
6657 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6658 struct ieee80211_qos_parameters *active_one = NULL;
6659 u32 size = sizeof(struct ieee80211_qos_parameters);
6660 u32 burst_duration;
6661 int i;
6662 u8 type;
6664 type = ipw_qos_current_mode(priv);
6666 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6667 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6668 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6669 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6671 if (qos_network_data == NULL) {
6672 if (type == IEEE_B) {
6673 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6674 active_one = &def_parameters_CCK;
6675 } else
6676 active_one = &def_parameters_OFDM;
6678 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6679 burst_duration = ipw_qos_get_burst_duration(priv);
6680 for (i = 0; i < QOS_QUEUE_NUM; i++)
6681 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6682 (u16) burst_duration;
6683 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6684 if (type == IEEE_B) {
6685 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6686 type);
6687 if (priv->qos_data.qos_enable == 0)
6688 active_one = &def_parameters_CCK;
6689 else
6690 active_one = priv->qos_data.def_qos_parm_CCK;
6691 } else {
6692 if (priv->qos_data.qos_enable == 0)
6693 active_one = &def_parameters_OFDM;
6694 else
6695 active_one = priv->qos_data.def_qos_parm_OFDM;
6697 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6698 } else {
6699 unsigned long flags;
6700 int active;
6702 spin_lock_irqsave(&priv->ieee->lock, flags);
6703 active_one = &(qos_network_data->parameters);
6704 qos_network_data->old_param_count =
6705 qos_network_data->param_count;
6706 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6707 active = qos_network_data->supported;
6708 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6710 if (active == 0) {
6711 burst_duration = ipw_qos_get_burst_duration(priv);
6712 for (i = 0; i < QOS_QUEUE_NUM; i++)
6713 qos_parameters[QOS_PARAM_SET_ACTIVE].
6714 tx_op_limit[i] = (u16) burst_duration;
6718 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6719 err = ipw_send_qos_params_command(priv,
6720 (struct ieee80211_qos_parameters *)
6721 &(qos_parameters[0]));
6722 if (err)
6723 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6725 return err;
6729 * send IPW_CMD_WME_INFO to the firmware
6731 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6733 int ret = 0;
6734 struct ieee80211_qos_information_element qos_info;
6736 if (priv == NULL)
6737 return -1;
6739 qos_info.elementID = QOS_ELEMENT_ID;
6740 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6742 qos_info.version = QOS_VERSION_1;
6743 qos_info.ac_info = 0;
6745 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6746 qos_info.qui_type = QOS_OUI_TYPE;
6747 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6749 ret = ipw_send_qos_info_command(priv, &qos_info);
6750 if (ret != 0) {
6751 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6753 return ret;
6757 * Set the QoS parameter with the association request structure
6759 static int ipw_qos_association(struct ipw_priv *priv,
6760 struct ieee80211_network *network)
6762 int err = 0;
6763 struct ieee80211_qos_data *qos_data = NULL;
6764 struct ieee80211_qos_data ibss_data = {
6765 .supported = 1,
6766 .active = 1,
6769 switch (priv->ieee->iw_mode) {
6770 case IW_MODE_ADHOC:
6771 if (!(network->capability & WLAN_CAPABILITY_IBSS))
6772 BUG();
6774 qos_data = &ibss_data;
6775 break;
6777 case IW_MODE_INFRA:
6778 qos_data = &network->qos_data;
6779 break;
6781 default:
6782 BUG();
6783 break;
6786 err = ipw_qos_activate(priv, qos_data);
6787 if (err) {
6788 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6789 return err;
6792 if (priv->qos_data.qos_enable && qos_data->supported) {
6793 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6794 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6795 return ipw_qos_set_info_element(priv);
6798 return 0;
6802 * handling the beaconing responces. if we get different QoS setting
6803 * of the network from the the associated setting adjust the QoS
6804 * setting
6806 static int ipw_qos_association_resp(struct ipw_priv *priv,
6807 struct ieee80211_network *network)
6809 int ret = 0;
6810 unsigned long flags;
6811 u32 size = sizeof(struct ieee80211_qos_parameters);
6812 int set_qos_param = 0;
6814 if ((priv == NULL) || (network == NULL) ||
6815 (priv->assoc_network == NULL))
6816 return ret;
6818 if (!(priv->status & STATUS_ASSOCIATED))
6819 return ret;
6821 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
6822 return ret;
6824 spin_lock_irqsave(&priv->ieee->lock, flags);
6825 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
6826 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
6827 sizeof(struct ieee80211_qos_data));
6828 priv->assoc_network->qos_data.active = 1;
6829 if ((network->qos_data.old_param_count !=
6830 network->qos_data.param_count)) {
6831 set_qos_param = 1;
6832 network->qos_data.old_param_count =
6833 network->qos_data.param_count;
6836 } else {
6837 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
6838 memcpy(&priv->assoc_network->qos_data.parameters,
6839 &def_parameters_CCK, size);
6840 else
6841 memcpy(&priv->assoc_network->qos_data.parameters,
6842 &def_parameters_OFDM, size);
6843 priv->assoc_network->qos_data.active = 0;
6844 priv->assoc_network->qos_data.supported = 0;
6845 set_qos_param = 1;
6848 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6850 if (set_qos_param == 1)
6851 schedule_work(&priv->qos_activate);
6853 return ret;
6856 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
6858 u32 ret = 0;
6860 if ((priv == NULL))
6861 return 0;
6863 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
6864 ret = priv->qos_data.burst_duration_CCK;
6865 else
6866 ret = priv->qos_data.burst_duration_OFDM;
6868 return ret;
6872 * Initialize the setting of QoS global
6874 static void ipw_qos_init(struct ipw_priv *priv, int enable,
6875 int burst_enable, u32 burst_duration_CCK,
6876 u32 burst_duration_OFDM)
6878 priv->qos_data.qos_enable = enable;
6880 if (priv->qos_data.qos_enable) {
6881 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
6882 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
6883 IPW_DEBUG_QOS("QoS is enabled\n");
6884 } else {
6885 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
6886 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
6887 IPW_DEBUG_QOS("QoS is not enabled\n");
6890 priv->qos_data.burst_enable = burst_enable;
6892 if (burst_enable) {
6893 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
6894 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
6895 } else {
6896 priv->qos_data.burst_duration_CCK = 0;
6897 priv->qos_data.burst_duration_OFDM = 0;
6902 * map the packet priority to the right TX Queue
6904 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
6906 if (priority > 7 || !priv->qos_data.qos_enable)
6907 priority = 0;
6909 return from_priority_to_tx_queue[priority] - 1;
6913 * add QoS parameter to the TX command
6915 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
6916 u16 priority,
6917 struct tfd_data *tfd, u8 unicast)
6919 int ret = 0;
6920 int tx_queue_id = 0;
6921 struct ieee80211_qos_data *qos_data = NULL;
6922 int active, supported;
6923 unsigned long flags;
6925 if (!(priv->status & STATUS_ASSOCIATED))
6926 return 0;
6928 qos_data = &priv->assoc_network->qos_data;
6930 spin_lock_irqsave(&priv->ieee->lock, flags);
6932 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6933 if (unicast == 0)
6934 qos_data->active = 0;
6935 else
6936 qos_data->active = qos_data->supported;
6939 active = qos_data->active;
6940 supported = qos_data->supported;
6942 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6944 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
6945 "unicast %d\n",
6946 priv->qos_data.qos_enable, active, supported, unicast);
6947 if (active && priv->qos_data.qos_enable) {
6948 ret = from_priority_to_tx_queue[priority];
6949 tx_queue_id = ret - 1;
6950 IPW_DEBUG_QOS("QoS packet priority is %d \n", priority);
6951 if (priority <= 7) {
6952 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
6953 tfd->tfd.tfd_26.mchdr.qos_ctrl = priority;
6954 tfd->tfd.tfd_26.mchdr.frame_ctl |=
6955 IEEE80211_STYPE_QOS_DATA;
6957 if (priv->qos_data.qos_no_ack_mask &
6958 (1UL << tx_queue_id)) {
6959 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
6960 tfd->tfd.tfd_26.mchdr.qos_ctrl |=
6961 CTRL_QOS_NO_ACK;
6966 return ret;
6970 * background support to run QoS activate functionality
6972 static void ipw_bg_qos_activate(void *data)
6974 struct ipw_priv *priv = data;
6976 if (priv == NULL)
6977 return;
6979 mutex_lock(&priv->mutex);
6981 if (priv->status & STATUS_ASSOCIATED)
6982 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
6984 mutex_unlock(&priv->mutex);
6987 static int ipw_handle_probe_response(struct net_device *dev,
6988 struct ieee80211_probe_response *resp,
6989 struct ieee80211_network *network)
6991 struct ipw_priv *priv = ieee80211_priv(dev);
6992 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
6993 (network == priv->assoc_network));
6995 ipw_qos_handle_probe_response(priv, active_network, network);
6997 return 0;
7000 static int ipw_handle_beacon(struct net_device *dev,
7001 struct ieee80211_beacon *resp,
7002 struct ieee80211_network *network)
7004 struct ipw_priv *priv = ieee80211_priv(dev);
7005 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7006 (network == priv->assoc_network));
7008 ipw_qos_handle_probe_response(priv, active_network, network);
7010 return 0;
7013 static int ipw_handle_assoc_response(struct net_device *dev,
7014 struct ieee80211_assoc_response *resp,
7015 struct ieee80211_network *network)
7017 struct ipw_priv *priv = ieee80211_priv(dev);
7018 ipw_qos_association_resp(priv, network);
7019 return 0;
7022 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7023 *qos_param)
7025 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7026 sizeof(*qos_param) * 3, qos_param);
7029 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7030 *qos_param)
7032 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7033 qos_param);
7036 #endif /* CONFIG_IPW_QOS */
7038 static int ipw_associate_network(struct ipw_priv *priv,
7039 struct ieee80211_network *network,
7040 struct ipw_supported_rates *rates, int roaming)
7042 int err;
7044 if (priv->config & CFG_FIXED_RATE)
7045 ipw_set_fixed_rate(priv, network->mode);
7047 if (!(priv->config & CFG_STATIC_ESSID)) {
7048 priv->essid_len = min(network->ssid_len,
7049 (u8) IW_ESSID_MAX_SIZE);
7050 memcpy(priv->essid, network->ssid, priv->essid_len);
7053 network->last_associate = jiffies;
7055 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7056 priv->assoc_request.channel = network->channel;
7057 priv->assoc_request.auth_key = 0;
7059 if ((priv->capability & CAP_PRIVACY_ON) &&
7060 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7061 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7062 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7064 if ((priv->ieee->sec.level == SEC_LEVEL_1) &&
7065 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
7066 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7068 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7069 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7070 priv->assoc_request.auth_type = AUTH_LEAP;
7071 else
7072 priv->assoc_request.auth_type = AUTH_OPEN;
7074 if (priv->ieee->wpa_ie_len) {
7075 priv->assoc_request.policy_support = 0x02; /* RSN active */
7076 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7077 priv->ieee->wpa_ie_len);
7081 * It is valid for our ieee device to support multiple modes, but
7082 * when it comes to associating to a given network we have to choose
7083 * just one mode.
7085 if (network->mode & priv->ieee->mode & IEEE_A)
7086 priv->assoc_request.ieee_mode = IPW_A_MODE;
7087 else if (network->mode & priv->ieee->mode & IEEE_G)
7088 priv->assoc_request.ieee_mode = IPW_G_MODE;
7089 else if (network->mode & priv->ieee->mode & IEEE_B)
7090 priv->assoc_request.ieee_mode = IPW_B_MODE;
7092 priv->assoc_request.capability = network->capability;
7093 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7094 && !(priv->config & CFG_PREAMBLE_LONG)) {
7095 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7096 } else {
7097 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7099 /* Clear the short preamble if we won't be supporting it */
7100 priv->assoc_request.capability &=
7101 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7104 /* Clear capability bits that aren't used in Ad Hoc */
7105 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7106 priv->assoc_request.capability &=
7107 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7109 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7110 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7111 roaming ? "Rea" : "A",
7112 escape_essid(priv->essid, priv->essid_len),
7113 network->channel,
7114 ipw_modes[priv->assoc_request.ieee_mode],
7115 rates->num_rates,
7116 (priv->assoc_request.preamble_length ==
7117 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7118 network->capability &
7119 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7120 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7121 priv->capability & CAP_PRIVACY_ON ?
7122 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7123 "(open)") : "",
7124 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7125 priv->capability & CAP_PRIVACY_ON ?
7126 '1' + priv->ieee->sec.active_key : '.',
7127 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7129 priv->assoc_request.beacon_interval = network->beacon_interval;
7130 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7131 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7132 priv->assoc_request.assoc_type = HC_IBSS_START;
7133 priv->assoc_request.assoc_tsf_msw = 0;
7134 priv->assoc_request.assoc_tsf_lsw = 0;
7135 } else {
7136 if (unlikely(roaming))
7137 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7138 else
7139 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7140 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7141 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7144 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7146 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7147 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7148 priv->assoc_request.atim_window = network->atim_window;
7149 } else {
7150 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7151 priv->assoc_request.atim_window = 0;
7154 priv->assoc_request.listen_interval = network->listen_interval;
7156 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7157 if (err) {
7158 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7159 return err;
7162 rates->ieee_mode = priv->assoc_request.ieee_mode;
7163 rates->purpose = IPW_RATE_CONNECT;
7164 ipw_send_supported_rates(priv, rates);
7166 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7167 priv->sys_config.dot11g_auto_detection = 1;
7168 else
7169 priv->sys_config.dot11g_auto_detection = 0;
7171 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7172 priv->sys_config.answer_broadcast_ssid_probe = 1;
7173 else
7174 priv->sys_config.answer_broadcast_ssid_probe = 0;
7176 err = ipw_send_system_config(priv, &priv->sys_config);
7177 if (err) {
7178 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7179 return err;
7182 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7183 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7184 if (err) {
7185 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7186 return err;
7190 * If preemption is enabled, it is possible for the association
7191 * to complete before we return from ipw_send_associate. Therefore
7192 * we have to be sure and update our priviate data first.
7194 priv->channel = network->channel;
7195 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7196 priv->status |= STATUS_ASSOCIATING;
7197 priv->status &= ~STATUS_SECURITY_UPDATED;
7199 priv->assoc_network = network;
7201 #ifdef CONFIG_IPW_QOS
7202 ipw_qos_association(priv, network);
7203 #endif
7205 err = ipw_send_associate(priv, &priv->assoc_request);
7206 if (err) {
7207 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7208 return err;
7211 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
7212 escape_essid(priv->essid, priv->essid_len),
7213 MAC_ARG(priv->bssid));
7215 return 0;
7218 static void ipw_roam(void *data)
7220 struct ipw_priv *priv = data;
7221 struct ieee80211_network *network = NULL;
7222 struct ipw_network_match match = {
7223 .network = priv->assoc_network
7226 /* The roaming process is as follows:
7228 * 1. Missed beacon threshold triggers the roaming process by
7229 * setting the status ROAM bit and requesting a scan.
7230 * 2. When the scan completes, it schedules the ROAM work
7231 * 3. The ROAM work looks at all of the known networks for one that
7232 * is a better network than the currently associated. If none
7233 * found, the ROAM process is over (ROAM bit cleared)
7234 * 4. If a better network is found, a disassociation request is
7235 * sent.
7236 * 5. When the disassociation completes, the roam work is again
7237 * scheduled. The second time through, the driver is no longer
7238 * associated, and the newly selected network is sent an
7239 * association request.
7240 * 6. At this point ,the roaming process is complete and the ROAM
7241 * status bit is cleared.
7244 /* If we are no longer associated, and the roaming bit is no longer
7245 * set, then we are not actively roaming, so just return */
7246 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7247 return;
7249 if (priv->status & STATUS_ASSOCIATED) {
7250 /* First pass through ROAM process -- look for a better
7251 * network */
7252 unsigned long flags;
7253 u8 rssi = priv->assoc_network->stats.rssi;
7254 priv->assoc_network->stats.rssi = -128;
7255 spin_lock_irqsave(&priv->ieee->lock, flags);
7256 list_for_each_entry(network, &priv->ieee->network_list, list) {
7257 if (network != priv->assoc_network)
7258 ipw_best_network(priv, &match, network, 1);
7260 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7261 priv->assoc_network->stats.rssi = rssi;
7263 if (match.network == priv->assoc_network) {
7264 IPW_DEBUG_ASSOC("No better APs in this network to "
7265 "roam to.\n");
7266 priv->status &= ~STATUS_ROAMING;
7267 ipw_debug_config(priv);
7268 return;
7271 ipw_send_disassociate(priv, 1);
7272 priv->assoc_network = match.network;
7274 return;
7277 /* Second pass through ROAM process -- request association */
7278 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7279 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7280 priv->status &= ~STATUS_ROAMING;
7283 static void ipw_bg_roam(void *data)
7285 struct ipw_priv *priv = data;
7286 mutex_lock(&priv->mutex);
7287 ipw_roam(data);
7288 mutex_unlock(&priv->mutex);
7291 static int ipw_associate(void *data)
7293 struct ipw_priv *priv = data;
7295 struct ieee80211_network *network = NULL;
7296 struct ipw_network_match match = {
7297 .network = NULL
7299 struct ipw_supported_rates *rates;
7300 struct list_head *element;
7301 unsigned long flags;
7303 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7304 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7305 return 0;
7308 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7309 IPW_DEBUG_ASSOC("Not attempting association (already in "
7310 "progress)\n");
7311 return 0;
7314 if (priv->status & STATUS_DISASSOCIATING) {
7315 IPW_DEBUG_ASSOC("Not attempting association (in "
7316 "disassociating)\n ");
7317 queue_work(priv->workqueue, &priv->associate);
7318 return 0;
7321 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7322 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7323 "initialized)\n");
7324 return 0;
7327 if (!(priv->config & CFG_ASSOCIATE) &&
7328 !(priv->config & (CFG_STATIC_ESSID |
7329 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7330 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7331 return 0;
7334 /* Protect our use of the network_list */
7335 spin_lock_irqsave(&priv->ieee->lock, flags);
7336 list_for_each_entry(network, &priv->ieee->network_list, list)
7337 ipw_best_network(priv, &match, network, 0);
7339 network = match.network;
7340 rates = &match.rates;
7342 if (network == NULL &&
7343 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7344 priv->config & CFG_ADHOC_CREATE &&
7345 priv->config & CFG_STATIC_ESSID &&
7346 priv->config & CFG_STATIC_CHANNEL &&
7347 !list_empty(&priv->ieee->network_free_list)) {
7348 element = priv->ieee->network_free_list.next;
7349 network = list_entry(element, struct ieee80211_network, list);
7350 ipw_adhoc_create(priv, network);
7351 rates = &priv->rates;
7352 list_del(element);
7353 list_add_tail(&network->list, &priv->ieee->network_list);
7355 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7357 /* If we reached the end of the list, then we don't have any valid
7358 * matching APs */
7359 if (!network) {
7360 ipw_debug_config(priv);
7362 if (!(priv->status & STATUS_SCANNING)) {
7363 if (!(priv->config & CFG_SPEED_SCAN))
7364 queue_delayed_work(priv->workqueue,
7365 &priv->request_scan,
7366 SCAN_INTERVAL);
7367 else
7368 queue_work(priv->workqueue,
7369 &priv->request_scan);
7372 return 0;
7375 ipw_associate_network(priv, network, rates, 0);
7377 return 1;
7380 static void ipw_bg_associate(void *data)
7382 struct ipw_priv *priv = data;
7383 mutex_lock(&priv->mutex);
7384 ipw_associate(data);
7385 mutex_unlock(&priv->mutex);
7388 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7389 struct sk_buff *skb)
7391 struct ieee80211_hdr *hdr;
7392 u16 fc;
7394 hdr = (struct ieee80211_hdr *)skb->data;
7395 fc = le16_to_cpu(hdr->frame_ctl);
7396 if (!(fc & IEEE80211_FCTL_PROTECTED))
7397 return;
7399 fc &= ~IEEE80211_FCTL_PROTECTED;
7400 hdr->frame_ctl = cpu_to_le16(fc);
7401 switch (priv->ieee->sec.level) {
7402 case SEC_LEVEL_3:
7403 /* Remove CCMP HDR */
7404 memmove(skb->data + IEEE80211_3ADDR_LEN,
7405 skb->data + IEEE80211_3ADDR_LEN + 8,
7406 skb->len - IEEE80211_3ADDR_LEN - 8);
7407 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7408 break;
7409 case SEC_LEVEL_2:
7410 break;
7411 case SEC_LEVEL_1:
7412 /* Remove IV */
7413 memmove(skb->data + IEEE80211_3ADDR_LEN,
7414 skb->data + IEEE80211_3ADDR_LEN + 4,
7415 skb->len - IEEE80211_3ADDR_LEN - 4);
7416 skb_trim(skb, skb->len - 8); /* IV + ICV */
7417 break;
7418 case SEC_LEVEL_0:
7419 break;
7420 default:
7421 printk(KERN_ERR "Unknow security level %d\n",
7422 priv->ieee->sec.level);
7423 break;
7427 static void ipw_handle_data_packet(struct ipw_priv *priv,
7428 struct ipw_rx_mem_buffer *rxb,
7429 struct ieee80211_rx_stats *stats)
7431 struct ieee80211_hdr_4addr *hdr;
7432 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7434 /* We received data from the HW, so stop the watchdog */
7435 priv->net_dev->trans_start = jiffies;
7437 /* We only process data packets if the
7438 * interface is open */
7439 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7440 skb_tailroom(rxb->skb))) {
7441 priv->ieee->stats.rx_errors++;
7442 priv->wstats.discard.misc++;
7443 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7444 return;
7445 } else if (unlikely(!netif_running(priv->net_dev))) {
7446 priv->ieee->stats.rx_dropped++;
7447 priv->wstats.discard.misc++;
7448 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7449 return;
7452 /* Advance skb->data to the start of the actual payload */
7453 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7455 /* Set the size of the skb to the size of the frame */
7456 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7458 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7460 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7461 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7462 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7463 (is_multicast_ether_addr(hdr->addr1) ?
7464 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7465 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7467 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7468 priv->ieee->stats.rx_errors++;
7469 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7470 rxb->skb = NULL;
7471 __ipw_led_activity_on(priv);
7475 #ifdef CONFIG_IEEE80211_RADIOTAP
7476 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7477 struct ipw_rx_mem_buffer *rxb,
7478 struct ieee80211_rx_stats *stats)
7480 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7481 struct ipw_rx_frame *frame = &pkt->u.frame;
7483 /* initial pull of some data */
7484 u16 received_channel = frame->received_channel;
7485 u8 antennaAndPhy = frame->antennaAndPhy;
7486 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7487 u16 pktrate = frame->rate;
7489 /* Magic struct that slots into the radiotap header -- no reason
7490 * to build this manually element by element, we can write it much
7491 * more efficiently than we can parse it. ORDER MATTERS HERE */
7492 struct ipw_rt_hdr {
7493 struct ieee80211_radiotap_header rt_hdr;
7494 u8 rt_flags; /* radiotap packet flags */
7495 u8 rt_rate; /* rate in 500kb/s */
7496 u16 rt_channel; /* channel in mhz */
7497 u16 rt_chbitmask; /* channel bitfield */
7498 s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
7499 u8 rt_antenna; /* antenna number */
7500 } *ipw_rt;
7502 short len = le16_to_cpu(pkt->u.frame.length);
7504 /* We received data from the HW, so stop the watchdog */
7505 priv->net_dev->trans_start = jiffies;
7507 /* We only process data packets if the
7508 * interface is open */
7509 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7510 skb_tailroom(rxb->skb))) {
7511 priv->ieee->stats.rx_errors++;
7512 priv->wstats.discard.misc++;
7513 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7514 return;
7515 } else if (unlikely(!netif_running(priv->net_dev))) {
7516 priv->ieee->stats.rx_dropped++;
7517 priv->wstats.discard.misc++;
7518 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7519 return;
7522 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7523 * that now */
7524 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7525 /* FIXME: Should alloc bigger skb instead */
7526 priv->ieee->stats.rx_dropped++;
7527 priv->wstats.discard.misc++;
7528 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7529 return;
7532 /* copy the frame itself */
7533 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7534 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7536 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7537 * part of our real header, saves a little time.
7539 * No longer necessary since we fill in all our data. Purge before merging
7540 * patch officially.
7541 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7542 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7545 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7547 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7548 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7549 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7551 /* Big bitfield of all the fields we provide in radiotap */
7552 ipw_rt->rt_hdr.it_present =
7553 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7554 (1 << IEEE80211_RADIOTAP_RATE) |
7555 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7556 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7557 (1 << IEEE80211_RADIOTAP_ANTENNA));
7559 /* Zero the flags, we'll add to them as we go */
7560 ipw_rt->rt_flags = 0;
7562 /* Convert signal to DBM */
7563 ipw_rt->rt_dbmsignal = antsignal;
7565 /* Convert the channel data and set the flags */
7566 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7567 if (received_channel > 14) { /* 802.11a */
7568 ipw_rt->rt_chbitmask =
7569 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7570 } else if (antennaAndPhy & 32) { /* 802.11b */
7571 ipw_rt->rt_chbitmask =
7572 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7573 } else { /* 802.11g */
7574 ipw_rt->rt_chbitmask =
7575 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7578 /* set the rate in multiples of 500k/s */
7579 switch (pktrate) {
7580 case IPW_TX_RATE_1MB:
7581 ipw_rt->rt_rate = 2;
7582 break;
7583 case IPW_TX_RATE_2MB:
7584 ipw_rt->rt_rate = 4;
7585 break;
7586 case IPW_TX_RATE_5MB:
7587 ipw_rt->rt_rate = 10;
7588 break;
7589 case IPW_TX_RATE_6MB:
7590 ipw_rt->rt_rate = 12;
7591 break;
7592 case IPW_TX_RATE_9MB:
7593 ipw_rt->rt_rate = 18;
7594 break;
7595 case IPW_TX_RATE_11MB:
7596 ipw_rt->rt_rate = 22;
7597 break;
7598 case IPW_TX_RATE_12MB:
7599 ipw_rt->rt_rate = 24;
7600 break;
7601 case IPW_TX_RATE_18MB:
7602 ipw_rt->rt_rate = 36;
7603 break;
7604 case IPW_TX_RATE_24MB:
7605 ipw_rt->rt_rate = 48;
7606 break;
7607 case IPW_TX_RATE_36MB:
7608 ipw_rt->rt_rate = 72;
7609 break;
7610 case IPW_TX_RATE_48MB:
7611 ipw_rt->rt_rate = 96;
7612 break;
7613 case IPW_TX_RATE_54MB:
7614 ipw_rt->rt_rate = 108;
7615 break;
7616 default:
7617 ipw_rt->rt_rate = 0;
7618 break;
7621 /* antenna number */
7622 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7624 /* set the preamble flag if we have it */
7625 if ((antennaAndPhy & 64))
7626 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7628 /* Set the size of the skb to the size of the frame */
7629 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7631 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7633 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7634 priv->ieee->stats.rx_errors++;
7635 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7636 rxb->skb = NULL;
7637 /* no LED during capture */
7640 #endif
7642 static int is_network_packet(struct ipw_priv *priv,
7643 struct ieee80211_hdr_4addr *header)
7645 /* Filter incoming packets to determine if they are targetted toward
7646 * this network, discarding packets coming from ourselves */
7647 switch (priv->ieee->iw_mode) {
7648 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
7649 /* packets from our adapter are dropped (echo) */
7650 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
7651 return 0;
7653 /* {broad,multi}cast packets to our BSSID go through */
7654 if (is_multicast_ether_addr(header->addr1))
7655 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
7657 /* packets to our adapter go through */
7658 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7659 ETH_ALEN);
7661 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
7662 /* packets from our adapter are dropped (echo) */
7663 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
7664 return 0;
7666 /* {broad,multi}cast packets to our BSS go through */
7667 if (is_multicast_ether_addr(header->addr1))
7668 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
7670 /* packets to our adapter go through */
7671 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7672 ETH_ALEN);
7675 return 1;
7678 #define IPW_PACKET_RETRY_TIME HZ
7680 static int is_duplicate_packet(struct ipw_priv *priv,
7681 struct ieee80211_hdr_4addr *header)
7683 u16 sc = le16_to_cpu(header->seq_ctl);
7684 u16 seq = WLAN_GET_SEQ_SEQ(sc);
7685 u16 frag = WLAN_GET_SEQ_FRAG(sc);
7686 u16 *last_seq, *last_frag;
7687 unsigned long *last_time;
7689 switch (priv->ieee->iw_mode) {
7690 case IW_MODE_ADHOC:
7692 struct list_head *p;
7693 struct ipw_ibss_seq *entry = NULL;
7694 u8 *mac = header->addr2;
7695 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
7697 __list_for_each(p, &priv->ibss_mac_hash[index]) {
7698 entry =
7699 list_entry(p, struct ipw_ibss_seq, list);
7700 if (!memcmp(entry->mac, mac, ETH_ALEN))
7701 break;
7703 if (p == &priv->ibss_mac_hash[index]) {
7704 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
7705 if (!entry) {
7706 IPW_ERROR
7707 ("Cannot malloc new mac entry\n");
7708 return 0;
7710 memcpy(entry->mac, mac, ETH_ALEN);
7711 entry->seq_num = seq;
7712 entry->frag_num = frag;
7713 entry->packet_time = jiffies;
7714 list_add(&entry->list,
7715 &priv->ibss_mac_hash[index]);
7716 return 0;
7718 last_seq = &entry->seq_num;
7719 last_frag = &entry->frag_num;
7720 last_time = &entry->packet_time;
7721 break;
7723 case IW_MODE_INFRA:
7724 last_seq = &priv->last_seq_num;
7725 last_frag = &priv->last_frag_num;
7726 last_time = &priv->last_packet_time;
7727 break;
7728 default:
7729 return 0;
7731 if ((*last_seq == seq) &&
7732 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
7733 if (*last_frag == frag)
7734 goto drop;
7735 if (*last_frag + 1 != frag)
7736 /* out-of-order fragment */
7737 goto drop;
7738 } else
7739 *last_seq = seq;
7741 *last_frag = frag;
7742 *last_time = jiffies;
7743 return 0;
7745 drop:
7746 /* Comment this line now since we observed the card receives
7747 * duplicate packets but the FCTL_RETRY bit is not set in the
7748 * IBSS mode with fragmentation enabled.
7749 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
7750 return 1;
7753 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
7754 struct ipw_rx_mem_buffer *rxb,
7755 struct ieee80211_rx_stats *stats)
7757 struct sk_buff *skb = rxb->skb;
7758 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
7759 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
7760 (skb->data + IPW_RX_FRAME_SIZE);
7762 ieee80211_rx_mgt(priv->ieee, header, stats);
7764 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
7765 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
7766 IEEE80211_STYPE_PROBE_RESP) ||
7767 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
7768 IEEE80211_STYPE_BEACON))) {
7769 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
7770 ipw_add_station(priv, header->addr2);
7773 if (priv->config & CFG_NET_STATS) {
7774 IPW_DEBUG_HC("sending stat packet\n");
7776 /* Set the size of the skb to the size of the full
7777 * ipw header and 802.11 frame */
7778 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
7779 IPW_RX_FRAME_SIZE);
7781 /* Advance past the ipw packet header to the 802.11 frame */
7782 skb_pull(skb, IPW_RX_FRAME_SIZE);
7784 /* Push the ieee80211_rx_stats before the 802.11 frame */
7785 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
7787 skb->dev = priv->ieee->dev;
7789 /* Point raw at the ieee80211_stats */
7790 skb->mac.raw = skb->data;
7792 skb->pkt_type = PACKET_OTHERHOST;
7793 skb->protocol = __constant_htons(ETH_P_80211_STATS);
7794 memset(skb->cb, 0, sizeof(rxb->skb->cb));
7795 netif_rx(skb);
7796 rxb->skb = NULL;
7801 * Main entry function for recieving a packet with 80211 headers. This
7802 * should be called when ever the FW has notified us that there is a new
7803 * skb in the recieve queue.
7805 static void ipw_rx(struct ipw_priv *priv)
7807 struct ipw_rx_mem_buffer *rxb;
7808 struct ipw_rx_packet *pkt;
7809 struct ieee80211_hdr_4addr *header;
7810 u32 r, w, i;
7811 u8 network_packet;
7813 r = ipw_read32(priv, IPW_RX_READ_INDEX);
7814 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
7815 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
7817 while (i != r) {
7818 rxb = priv->rxq->queue[i];
7819 #ifdef CONFIG_IPW2200_DEBUG
7820 if (unlikely(rxb == NULL)) {
7821 printk(KERN_CRIT "Queue not allocated!\n");
7822 break;
7824 #endif
7825 priv->rxq->queue[i] = NULL;
7827 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
7828 IPW_RX_BUF_SIZE,
7829 PCI_DMA_FROMDEVICE);
7831 pkt = (struct ipw_rx_packet *)rxb->skb->data;
7832 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
7833 pkt->header.message_type,
7834 pkt->header.rx_seq_num, pkt->header.control_bits);
7836 switch (pkt->header.message_type) {
7837 case RX_FRAME_TYPE: /* 802.11 frame */ {
7838 struct ieee80211_rx_stats stats = {
7839 .rssi =
7840 le16_to_cpu(pkt->u.frame.rssi_dbm) -
7841 IPW_RSSI_TO_DBM,
7842 .signal =
7843 le16_to_cpu(pkt->u.frame.signal),
7844 .noise =
7845 le16_to_cpu(pkt->u.frame.noise),
7846 .rate = pkt->u.frame.rate,
7847 .mac_time = jiffies,
7848 .received_channel =
7849 pkt->u.frame.received_channel,
7850 .freq =
7851 (pkt->u.frame.
7852 control & (1 << 0)) ?
7853 IEEE80211_24GHZ_BAND :
7854 IEEE80211_52GHZ_BAND,
7855 .len = le16_to_cpu(pkt->u.frame.length),
7858 if (stats.rssi != 0)
7859 stats.mask |= IEEE80211_STATMASK_RSSI;
7860 if (stats.signal != 0)
7861 stats.mask |= IEEE80211_STATMASK_SIGNAL;
7862 if (stats.noise != 0)
7863 stats.mask |= IEEE80211_STATMASK_NOISE;
7864 if (stats.rate != 0)
7865 stats.mask |= IEEE80211_STATMASK_RATE;
7867 priv->rx_packets++;
7869 #ifdef CONFIG_IPW2200_MONITOR
7870 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7871 #ifdef CONFIG_IEEE80211_RADIOTAP
7872 ipw_handle_data_packet_monitor(priv,
7873 rxb,
7874 &stats);
7875 #else
7876 ipw_handle_data_packet(priv, rxb,
7877 &stats);
7878 #endif
7879 break;
7881 #endif
7883 header =
7884 (struct ieee80211_hdr_4addr *)(rxb->skb->
7885 data +
7886 IPW_RX_FRAME_SIZE);
7887 /* TODO: Check Ad-Hoc dest/source and make sure
7888 * that we are actually parsing these packets
7889 * correctly -- we should probably use the
7890 * frame control of the packet and disregard
7891 * the current iw_mode */
7893 network_packet =
7894 is_network_packet(priv, header);
7895 if (network_packet && priv->assoc_network) {
7896 priv->assoc_network->stats.rssi =
7897 stats.rssi;
7898 average_add(&priv->average_rssi,
7899 stats.rssi);
7900 priv->last_rx_rssi = stats.rssi;
7903 IPW_DEBUG_RX("Frame: len=%u\n",
7904 le16_to_cpu(pkt->u.frame.length));
7906 if (le16_to_cpu(pkt->u.frame.length) <
7907 frame_hdr_len(header)) {
7908 IPW_DEBUG_DROP
7909 ("Received packet is too small. "
7910 "Dropping.\n");
7911 priv->ieee->stats.rx_errors++;
7912 priv->wstats.discard.misc++;
7913 break;
7916 switch (WLAN_FC_GET_TYPE
7917 (le16_to_cpu(header->frame_ctl))) {
7919 case IEEE80211_FTYPE_MGMT:
7920 ipw_handle_mgmt_packet(priv, rxb,
7921 &stats);
7922 break;
7924 case IEEE80211_FTYPE_CTL:
7925 break;
7927 case IEEE80211_FTYPE_DATA:
7928 if (unlikely(!network_packet ||
7929 is_duplicate_packet(priv,
7930 header)))
7932 IPW_DEBUG_DROP("Dropping: "
7933 MAC_FMT ", "
7934 MAC_FMT ", "
7935 MAC_FMT "\n",
7936 MAC_ARG(header->
7937 addr1),
7938 MAC_ARG(header->
7939 addr2),
7940 MAC_ARG(header->
7941 addr3));
7942 break;
7945 ipw_handle_data_packet(priv, rxb,
7946 &stats);
7948 break;
7950 break;
7953 case RX_HOST_NOTIFICATION_TYPE:{
7954 IPW_DEBUG_RX
7955 ("Notification: subtype=%02X flags=%02X size=%d\n",
7956 pkt->u.notification.subtype,
7957 pkt->u.notification.flags,
7958 pkt->u.notification.size);
7959 ipw_rx_notification(priv, &pkt->u.notification);
7960 break;
7963 default:
7964 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
7965 pkt->header.message_type);
7966 break;
7969 /* For now we just don't re-use anything. We can tweak this
7970 * later to try and re-use notification packets and SKBs that
7971 * fail to Rx correctly */
7972 if (rxb->skb != NULL) {
7973 dev_kfree_skb_any(rxb->skb);
7974 rxb->skb = NULL;
7977 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
7978 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
7979 list_add_tail(&rxb->list, &priv->rxq->rx_used);
7981 i = (i + 1) % RX_QUEUE_SIZE;
7984 /* Backtrack one entry */
7985 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
7987 ipw_rx_queue_restock(priv);
7990 #define DEFAULT_RTS_THRESHOLD 2304U
7991 #define MIN_RTS_THRESHOLD 1U
7992 #define MAX_RTS_THRESHOLD 2304U
7993 #define DEFAULT_BEACON_INTERVAL 100U
7994 #define DEFAULT_SHORT_RETRY_LIMIT 7U
7995 #define DEFAULT_LONG_RETRY_LIMIT 4U
7997 static int ipw_sw_reset(struct ipw_priv *priv, int init)
7999 int band, modulation;
8000 int old_mode = priv->ieee->iw_mode;
8002 /* Initialize module parameter values here */
8003 priv->config = 0;
8005 /* We default to disabling the LED code as right now it causes
8006 * too many systems to lock up... */
8007 if (!led)
8008 priv->config |= CFG_NO_LED;
8010 if (associate)
8011 priv->config |= CFG_ASSOCIATE;
8012 else
8013 IPW_DEBUG_INFO("Auto associate disabled.\n");
8015 if (auto_create)
8016 priv->config |= CFG_ADHOC_CREATE;
8017 else
8018 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8020 priv->config &= ~CFG_STATIC_ESSID;
8021 priv->essid_len = 0;
8022 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8024 if (disable) {
8025 priv->status |= STATUS_RF_KILL_SW;
8026 IPW_DEBUG_INFO("Radio disabled.\n");
8029 if (channel != 0) {
8030 priv->config |= CFG_STATIC_CHANNEL;
8031 priv->channel = channel;
8032 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8033 /* TODO: Validate that provided channel is in range */
8035 #ifdef CONFIG_IPW_QOS
8036 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8037 burst_duration_CCK, burst_duration_OFDM);
8038 #endif /* CONFIG_IPW_QOS */
8040 switch (mode) {
8041 case 1:
8042 priv->ieee->iw_mode = IW_MODE_ADHOC;
8043 priv->net_dev->type = ARPHRD_ETHER;
8045 break;
8046 #ifdef CONFIG_IPW2200_MONITOR
8047 case 2:
8048 priv->ieee->iw_mode = IW_MODE_MONITOR;
8049 #ifdef CONFIG_IEEE80211_RADIOTAP
8050 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8051 #else
8052 priv->net_dev->type = ARPHRD_IEEE80211;
8053 #endif
8054 break;
8055 #endif
8056 default:
8057 case 0:
8058 priv->net_dev->type = ARPHRD_ETHER;
8059 priv->ieee->iw_mode = IW_MODE_INFRA;
8060 break;
8063 if (hwcrypto) {
8064 priv->ieee->host_encrypt = 0;
8065 priv->ieee->host_encrypt_msdu = 0;
8066 priv->ieee->host_decrypt = 0;
8067 priv->ieee->host_mc_decrypt = 0;
8069 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8071 /* IPW2200/2915 is abled to do hardware fragmentation. */
8072 priv->ieee->host_open_frag = 0;
8074 if ((priv->pci_dev->device == 0x4223) ||
8075 (priv->pci_dev->device == 0x4224)) {
8076 if (init)
8077 printk(KERN_INFO DRV_NAME
8078 ": Detected Intel PRO/Wireless 2915ABG Network "
8079 "Connection\n");
8080 priv->ieee->abg_true = 1;
8081 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8082 modulation = IEEE80211_OFDM_MODULATION |
8083 IEEE80211_CCK_MODULATION;
8084 priv->adapter = IPW_2915ABG;
8085 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8086 } else {
8087 if (init)
8088 printk(KERN_INFO DRV_NAME
8089 ": Detected Intel PRO/Wireless 2200BG Network "
8090 "Connection\n");
8092 priv->ieee->abg_true = 0;
8093 band = IEEE80211_24GHZ_BAND;
8094 modulation = IEEE80211_OFDM_MODULATION |
8095 IEEE80211_CCK_MODULATION;
8096 priv->adapter = IPW_2200BG;
8097 priv->ieee->mode = IEEE_G | IEEE_B;
8100 priv->ieee->freq_band = band;
8101 priv->ieee->modulation = modulation;
8103 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8105 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8106 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8108 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8109 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8110 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8112 /* If power management is turned on, default to AC mode */
8113 priv->power_mode = IPW_POWER_AC;
8114 priv->tx_power = IPW_TX_POWER_DEFAULT;
8116 return old_mode == priv->ieee->iw_mode;
8120 * This file defines the Wireless Extension handlers. It does not
8121 * define any methods of hardware manipulation and relies on the
8122 * functions defined in ipw_main to provide the HW interaction.
8124 * The exception to this is the use of the ipw_get_ordinal()
8125 * function used to poll the hardware vs. making unecessary calls.
8129 static int ipw_wx_get_name(struct net_device *dev,
8130 struct iw_request_info *info,
8131 union iwreq_data *wrqu, char *extra)
8133 struct ipw_priv *priv = ieee80211_priv(dev);
8134 mutex_lock(&priv->mutex);
8135 if (priv->status & STATUS_RF_KILL_MASK)
8136 strcpy(wrqu->name, "radio off");
8137 else if (!(priv->status & STATUS_ASSOCIATED))
8138 strcpy(wrqu->name, "unassociated");
8139 else
8140 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8141 ipw_modes[priv->assoc_request.ieee_mode]);
8142 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8143 mutex_unlock(&priv->mutex);
8144 return 0;
8147 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8149 if (channel == 0) {
8150 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8151 priv->config &= ~CFG_STATIC_CHANNEL;
8152 IPW_DEBUG_ASSOC("Attempting to associate with new "
8153 "parameters.\n");
8154 ipw_associate(priv);
8155 return 0;
8158 priv->config |= CFG_STATIC_CHANNEL;
8160 if (priv->channel == channel) {
8161 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8162 channel);
8163 return 0;
8166 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8167 priv->channel = channel;
8169 #ifdef CONFIG_IPW2200_MONITOR
8170 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8171 int i;
8172 if (priv->status & STATUS_SCANNING) {
8173 IPW_DEBUG_SCAN("Scan abort triggered due to "
8174 "channel change.\n");
8175 ipw_abort_scan(priv);
8178 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8179 udelay(10);
8181 if (priv->status & STATUS_SCANNING)
8182 IPW_DEBUG_SCAN("Still scanning...\n");
8183 else
8184 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8185 1000 - i);
8187 return 0;
8189 #endif /* CONFIG_IPW2200_MONITOR */
8191 /* Network configuration changed -- force [re]association */
8192 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8193 if (!ipw_disassociate(priv))
8194 ipw_associate(priv);
8196 return 0;
8199 static int ipw_wx_set_freq(struct net_device *dev,
8200 struct iw_request_info *info,
8201 union iwreq_data *wrqu, char *extra)
8203 struct ipw_priv *priv = ieee80211_priv(dev);
8204 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
8205 struct iw_freq *fwrq = &wrqu->freq;
8206 int ret = 0, i;
8207 u8 channel, flags;
8208 int band;
8210 if (fwrq->m == 0) {
8211 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8212 mutex_lock(&priv->mutex);
8213 ret = ipw_set_channel(priv, 0);
8214 mutex_unlock(&priv->mutex);
8215 return ret;
8217 /* if setting by freq convert to channel */
8218 if (fwrq->e == 1) {
8219 channel = ipw_freq_to_channel(priv->ieee, fwrq->m);
8220 if (channel == 0)
8221 return -EINVAL;
8222 } else
8223 channel = fwrq->m;
8225 if (!(band = ipw_is_valid_channel(priv->ieee, channel)))
8226 return -EINVAL;
8228 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8229 i = ipw_channel_to_index(priv->ieee, channel);
8230 if (i == -1)
8231 return -EINVAL;
8233 flags = (band == IEEE80211_24GHZ_BAND) ?
8234 geo->bg[i].flags : geo->a[i].flags;
8235 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8236 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8237 return -EINVAL;
8241 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8242 mutex_lock(&priv->mutex);
8243 ret = ipw_set_channel(priv, channel);
8244 mutex_unlock(&priv->mutex);
8245 return ret;
8248 static int ipw_wx_get_freq(struct net_device *dev,
8249 struct iw_request_info *info,
8250 union iwreq_data *wrqu, char *extra)
8252 struct ipw_priv *priv = ieee80211_priv(dev);
8254 wrqu->freq.e = 0;
8256 /* If we are associated, trying to associate, or have a statically
8257 * configured CHANNEL then return that; otherwise return ANY */
8258 mutex_lock(&priv->mutex);
8259 if (priv->config & CFG_STATIC_CHANNEL ||
8260 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
8261 wrqu->freq.m = priv->channel;
8262 else
8263 wrqu->freq.m = 0;
8265 mutex_unlock(&priv->mutex);
8266 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8267 return 0;
8270 static int ipw_wx_set_mode(struct net_device *dev,
8271 struct iw_request_info *info,
8272 union iwreq_data *wrqu, char *extra)
8274 struct ipw_priv *priv = ieee80211_priv(dev);
8275 int err = 0;
8277 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8279 switch (wrqu->mode) {
8280 #ifdef CONFIG_IPW2200_MONITOR
8281 case IW_MODE_MONITOR:
8282 #endif
8283 case IW_MODE_ADHOC:
8284 case IW_MODE_INFRA:
8285 break;
8286 case IW_MODE_AUTO:
8287 wrqu->mode = IW_MODE_INFRA;
8288 break;
8289 default:
8290 return -EINVAL;
8292 if (wrqu->mode == priv->ieee->iw_mode)
8293 return 0;
8295 mutex_lock(&priv->mutex);
8297 ipw_sw_reset(priv, 0);
8299 #ifdef CONFIG_IPW2200_MONITOR
8300 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8301 priv->net_dev->type = ARPHRD_ETHER;
8303 if (wrqu->mode == IW_MODE_MONITOR)
8304 #ifdef CONFIG_IEEE80211_RADIOTAP
8305 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8306 #else
8307 priv->net_dev->type = ARPHRD_IEEE80211;
8308 #endif
8309 #endif /* CONFIG_IPW2200_MONITOR */
8311 /* Free the existing firmware and reset the fw_loaded
8312 * flag so ipw_load() will bring in the new firmawre */
8313 free_firmware();
8315 priv->ieee->iw_mode = wrqu->mode;
8317 queue_work(priv->workqueue, &priv->adapter_restart);
8318 mutex_unlock(&priv->mutex);
8319 return err;
8322 static int ipw_wx_get_mode(struct net_device *dev,
8323 struct iw_request_info *info,
8324 union iwreq_data *wrqu, char *extra)
8326 struct ipw_priv *priv = ieee80211_priv(dev);
8327 mutex_lock(&priv->mutex);
8328 wrqu->mode = priv->ieee->iw_mode;
8329 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8330 mutex_unlock(&priv->mutex);
8331 return 0;
8334 /* Values are in microsecond */
8335 static const s32 timeout_duration[] = {
8336 350000,
8337 250000,
8338 75000,
8339 37000,
8340 25000,
8343 static const s32 period_duration[] = {
8344 400000,
8345 700000,
8346 1000000,
8347 1000000,
8348 1000000
8351 static int ipw_wx_get_range(struct net_device *dev,
8352 struct iw_request_info *info,
8353 union iwreq_data *wrqu, char *extra)
8355 struct ipw_priv *priv = ieee80211_priv(dev);
8356 struct iw_range *range = (struct iw_range *)extra;
8357 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
8358 int i = 0, j;
8360 wrqu->data.length = sizeof(*range);
8361 memset(range, 0, sizeof(*range));
8363 /* 54Mbs == ~27 Mb/s real (802.11g) */
8364 range->throughput = 27 * 1000 * 1000;
8366 range->max_qual.qual = 100;
8367 /* TODO: Find real max RSSI and stick here */
8368 range->max_qual.level = 0;
8369 range->max_qual.noise = priv->ieee->worst_rssi + 0x100;
8370 range->max_qual.updated = 7; /* Updated all three */
8372 range->avg_qual.qual = 70;
8373 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8374 range->avg_qual.level = 0; /* FIXME to real average level */
8375 range->avg_qual.noise = 0;
8376 range->avg_qual.updated = 7; /* Updated all three */
8377 mutex_lock(&priv->mutex);
8378 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8380 for (i = 0; i < range->num_bitrates; i++)
8381 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8382 500000;
8384 range->max_rts = DEFAULT_RTS_THRESHOLD;
8385 range->min_frag = MIN_FRAG_THRESHOLD;
8386 range->max_frag = MAX_FRAG_THRESHOLD;
8388 range->encoding_size[0] = 5;
8389 range->encoding_size[1] = 13;
8390 range->num_encoding_sizes = 2;
8391 range->max_encoding_tokens = WEP_KEYS;
8393 /* Set the Wireless Extension versions */
8394 range->we_version_compiled = WIRELESS_EXT;
8395 range->we_version_source = 18;
8397 i = 0;
8398 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8399 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES;
8400 i++, j++) {
8401 range->freq[i].i = geo->bg[j].channel;
8402 range->freq[i].m = geo->bg[j].freq * 100000;
8403 range->freq[i].e = 1;
8407 if (priv->ieee->mode & IEEE_A) {
8408 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES;
8409 i++, j++) {
8410 range->freq[i].i = geo->a[j].channel;
8411 range->freq[i].m = geo->a[j].freq * 100000;
8412 range->freq[i].e = 1;
8416 range->num_channels = i;
8417 range->num_frequency = i;
8419 mutex_unlock(&priv->mutex);
8421 /* Event capability (kernel + driver) */
8422 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8423 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8424 IW_EVENT_CAPA_MASK(SIOCGIWAP));
8425 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8427 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8428 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8430 IPW_DEBUG_WX("GET Range\n");
8431 return 0;
8434 static int ipw_wx_set_wap(struct net_device *dev,
8435 struct iw_request_info *info,
8436 union iwreq_data *wrqu, char *extra)
8438 struct ipw_priv *priv = ieee80211_priv(dev);
8440 static const unsigned char any[] = {
8441 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8443 static const unsigned char off[] = {
8444 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8447 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8448 return -EINVAL;
8449 mutex_lock(&priv->mutex);
8450 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8451 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8452 /* we disable mandatory BSSID association */
8453 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8454 priv->config &= ~CFG_STATIC_BSSID;
8455 IPW_DEBUG_ASSOC("Attempting to associate with new "
8456 "parameters.\n");
8457 ipw_associate(priv);
8458 mutex_unlock(&priv->mutex);
8459 return 0;
8462 priv->config |= CFG_STATIC_BSSID;
8463 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8464 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8465 mutex_unlock(&priv->mutex);
8466 return 0;
8469 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
8470 MAC_ARG(wrqu->ap_addr.sa_data));
8472 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8474 /* Network configuration changed -- force [re]association */
8475 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8476 if (!ipw_disassociate(priv))
8477 ipw_associate(priv);
8479 mutex_unlock(&priv->mutex);
8480 return 0;
8483 static int ipw_wx_get_wap(struct net_device *dev,
8484 struct iw_request_info *info,
8485 union iwreq_data *wrqu, char *extra)
8487 struct ipw_priv *priv = ieee80211_priv(dev);
8488 /* If we are associated, trying to associate, or have a statically
8489 * configured BSSID then return that; otherwise return ANY */
8490 mutex_lock(&priv->mutex);
8491 if (priv->config & CFG_STATIC_BSSID ||
8492 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8493 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8494 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8495 } else
8496 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8498 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8499 MAC_ARG(wrqu->ap_addr.sa_data));
8500 mutex_unlock(&priv->mutex);
8501 return 0;
8504 static int ipw_wx_set_essid(struct net_device *dev,
8505 struct iw_request_info *info,
8506 union iwreq_data *wrqu, char *extra)
8508 struct ipw_priv *priv = ieee80211_priv(dev);
8509 char *essid = ""; /* ANY */
8510 int length = 0;
8511 mutex_lock(&priv->mutex);
8512 if (wrqu->essid.flags && wrqu->essid.length) {
8513 length = wrqu->essid.length - 1;
8514 essid = extra;
8516 if (length == 0) {
8517 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8518 if ((priv->config & CFG_STATIC_ESSID) &&
8519 !(priv->status & (STATUS_ASSOCIATED |
8520 STATUS_ASSOCIATING))) {
8521 IPW_DEBUG_ASSOC("Attempting to associate with new "
8522 "parameters.\n");
8523 priv->config &= ~CFG_STATIC_ESSID;
8524 ipw_associate(priv);
8526 mutex_unlock(&priv->mutex);
8527 return 0;
8530 length = min(length, IW_ESSID_MAX_SIZE);
8532 priv->config |= CFG_STATIC_ESSID;
8534 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
8535 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8536 mutex_unlock(&priv->mutex);
8537 return 0;
8540 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
8541 length);
8543 priv->essid_len = length;
8544 memcpy(priv->essid, essid, priv->essid_len);
8546 /* Network configuration changed -- force [re]association */
8547 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8548 if (!ipw_disassociate(priv))
8549 ipw_associate(priv);
8551 mutex_unlock(&priv->mutex);
8552 return 0;
8555 static int ipw_wx_get_essid(struct net_device *dev,
8556 struct iw_request_info *info,
8557 union iwreq_data *wrqu, char *extra)
8559 struct ipw_priv *priv = ieee80211_priv(dev);
8561 /* If we are associated, trying to associate, or have a statically
8562 * configured ESSID then return that; otherwise return ANY */
8563 mutex_lock(&priv->mutex);
8564 if (priv->config & CFG_STATIC_ESSID ||
8565 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8566 IPW_DEBUG_WX("Getting essid: '%s'\n",
8567 escape_essid(priv->essid, priv->essid_len));
8568 memcpy(extra, priv->essid, priv->essid_len);
8569 wrqu->essid.length = priv->essid_len;
8570 wrqu->essid.flags = 1; /* active */
8571 } else {
8572 IPW_DEBUG_WX("Getting essid: ANY\n");
8573 wrqu->essid.length = 0;
8574 wrqu->essid.flags = 0; /* active */
8576 mutex_unlock(&priv->mutex);
8577 return 0;
8580 static int ipw_wx_set_nick(struct net_device *dev,
8581 struct iw_request_info *info,
8582 union iwreq_data *wrqu, char *extra)
8584 struct ipw_priv *priv = ieee80211_priv(dev);
8586 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
8587 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
8588 return -E2BIG;
8589 mutex_lock(&priv->mutex);
8590 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
8591 memset(priv->nick, 0, sizeof(priv->nick));
8592 memcpy(priv->nick, extra, wrqu->data.length);
8593 IPW_DEBUG_TRACE("<<\n");
8594 mutex_unlock(&priv->mutex);
8595 return 0;
8599 static int ipw_wx_get_nick(struct net_device *dev,
8600 struct iw_request_info *info,
8601 union iwreq_data *wrqu, char *extra)
8603 struct ipw_priv *priv = ieee80211_priv(dev);
8604 IPW_DEBUG_WX("Getting nick\n");
8605 mutex_lock(&priv->mutex);
8606 wrqu->data.length = strlen(priv->nick) + 1;
8607 memcpy(extra, priv->nick, wrqu->data.length);
8608 wrqu->data.flags = 1; /* active */
8609 mutex_unlock(&priv->mutex);
8610 return 0;
8613 static int ipw_wx_set_rate(struct net_device *dev,
8614 struct iw_request_info *info,
8615 union iwreq_data *wrqu, char *extra)
8617 /* TODO: We should use semaphores or locks for access to priv */
8618 struct ipw_priv *priv = ieee80211_priv(dev);
8619 u32 target_rate = wrqu->bitrate.value;
8620 u32 fixed, mask;
8622 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
8623 /* value = X, fixed = 1 means only rate X */
8624 /* value = X, fixed = 0 means all rates lower equal X */
8626 if (target_rate == -1) {
8627 fixed = 0;
8628 mask = IEEE80211_DEFAULT_RATES_MASK;
8629 /* Now we should reassociate */
8630 goto apply;
8633 mask = 0;
8634 fixed = wrqu->bitrate.fixed;
8636 if (target_rate == 1000000 || !fixed)
8637 mask |= IEEE80211_CCK_RATE_1MB_MASK;
8638 if (target_rate == 1000000)
8639 goto apply;
8641 if (target_rate == 2000000 || !fixed)
8642 mask |= IEEE80211_CCK_RATE_2MB_MASK;
8643 if (target_rate == 2000000)
8644 goto apply;
8646 if (target_rate == 5500000 || !fixed)
8647 mask |= IEEE80211_CCK_RATE_5MB_MASK;
8648 if (target_rate == 5500000)
8649 goto apply;
8651 if (target_rate == 6000000 || !fixed)
8652 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
8653 if (target_rate == 6000000)
8654 goto apply;
8656 if (target_rate == 9000000 || !fixed)
8657 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
8658 if (target_rate == 9000000)
8659 goto apply;
8661 if (target_rate == 11000000 || !fixed)
8662 mask |= IEEE80211_CCK_RATE_11MB_MASK;
8663 if (target_rate == 11000000)
8664 goto apply;
8666 if (target_rate == 12000000 || !fixed)
8667 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
8668 if (target_rate == 12000000)
8669 goto apply;
8671 if (target_rate == 18000000 || !fixed)
8672 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
8673 if (target_rate == 18000000)
8674 goto apply;
8676 if (target_rate == 24000000 || !fixed)
8677 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
8678 if (target_rate == 24000000)
8679 goto apply;
8681 if (target_rate == 36000000 || !fixed)
8682 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
8683 if (target_rate == 36000000)
8684 goto apply;
8686 if (target_rate == 48000000 || !fixed)
8687 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
8688 if (target_rate == 48000000)
8689 goto apply;
8691 if (target_rate == 54000000 || !fixed)
8692 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
8693 if (target_rate == 54000000)
8694 goto apply;
8696 IPW_DEBUG_WX("invalid rate specified, returning error\n");
8697 return -EINVAL;
8699 apply:
8700 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
8701 mask, fixed ? "fixed" : "sub-rates");
8702 mutex_lock(&priv->mutex);
8703 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
8704 priv->config &= ~CFG_FIXED_RATE;
8705 ipw_set_fixed_rate(priv, priv->ieee->mode);
8706 } else
8707 priv->config |= CFG_FIXED_RATE;
8709 if (priv->rates_mask == mask) {
8710 IPW_DEBUG_WX("Mask set to current mask.\n");
8711 mutex_unlock(&priv->mutex);
8712 return 0;
8715 priv->rates_mask = mask;
8717 /* Network configuration changed -- force [re]association */
8718 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
8719 if (!ipw_disassociate(priv))
8720 ipw_associate(priv);
8722 mutex_unlock(&priv->mutex);
8723 return 0;
8726 static int ipw_wx_get_rate(struct net_device *dev,
8727 struct iw_request_info *info,
8728 union iwreq_data *wrqu, char *extra)
8730 struct ipw_priv *priv = ieee80211_priv(dev);
8731 mutex_lock(&priv->mutex);
8732 wrqu->bitrate.value = priv->last_rate;
8733 mutex_unlock(&priv->mutex);
8734 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
8735 return 0;
8738 static int ipw_wx_set_rts(struct net_device *dev,
8739 struct iw_request_info *info,
8740 union iwreq_data *wrqu, char *extra)
8742 struct ipw_priv *priv = ieee80211_priv(dev);
8743 mutex_lock(&priv->mutex);
8744 if (wrqu->rts.disabled)
8745 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8746 else {
8747 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
8748 wrqu->rts.value > MAX_RTS_THRESHOLD) {
8749 mutex_unlock(&priv->mutex);
8750 return -EINVAL;
8752 priv->rts_threshold = wrqu->rts.value;
8755 ipw_send_rts_threshold(priv, priv->rts_threshold);
8756 mutex_unlock(&priv->mutex);
8757 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
8758 return 0;
8761 static int ipw_wx_get_rts(struct net_device *dev,
8762 struct iw_request_info *info,
8763 union iwreq_data *wrqu, char *extra)
8765 struct ipw_priv *priv = ieee80211_priv(dev);
8766 mutex_lock(&priv->mutex);
8767 wrqu->rts.value = priv->rts_threshold;
8768 wrqu->rts.fixed = 0; /* no auto select */
8769 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
8770 mutex_unlock(&priv->mutex);
8771 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
8772 return 0;
8775 static int ipw_wx_set_txpow(struct net_device *dev,
8776 struct iw_request_info *info,
8777 union iwreq_data *wrqu, char *extra)
8779 struct ipw_priv *priv = ieee80211_priv(dev);
8780 int err = 0;
8782 mutex_lock(&priv->mutex);
8783 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
8784 err = -EINPROGRESS;
8785 goto out;
8788 if (!wrqu->power.fixed)
8789 wrqu->power.value = IPW_TX_POWER_DEFAULT;
8791 if (wrqu->power.flags != IW_TXPOW_DBM) {
8792 err = -EINVAL;
8793 goto out;
8796 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
8797 (wrqu->power.value < IPW_TX_POWER_MIN)) {
8798 err = -EINVAL;
8799 goto out;
8802 priv->tx_power = wrqu->power.value;
8803 err = ipw_set_tx_power(priv);
8804 out:
8805 mutex_unlock(&priv->mutex);
8806 return err;
8809 static int ipw_wx_get_txpow(struct net_device *dev,
8810 struct iw_request_info *info,
8811 union iwreq_data *wrqu, char *extra)
8813 struct ipw_priv *priv = ieee80211_priv(dev);
8814 mutex_lock(&priv->mutex);
8815 wrqu->power.value = priv->tx_power;
8816 wrqu->power.fixed = 1;
8817 wrqu->power.flags = IW_TXPOW_DBM;
8818 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
8819 mutex_unlock(&priv->mutex);
8821 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
8822 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
8824 return 0;
8827 static int ipw_wx_set_frag(struct net_device *dev,
8828 struct iw_request_info *info,
8829 union iwreq_data *wrqu, char *extra)
8831 struct ipw_priv *priv = ieee80211_priv(dev);
8832 mutex_lock(&priv->mutex);
8833 if (wrqu->frag.disabled)
8834 priv->ieee->fts = DEFAULT_FTS;
8835 else {
8836 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
8837 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
8838 mutex_unlock(&priv->mutex);
8839 return -EINVAL;
8842 priv->ieee->fts = wrqu->frag.value & ~0x1;
8845 ipw_send_frag_threshold(priv, wrqu->frag.value);
8846 mutex_unlock(&priv->mutex);
8847 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
8848 return 0;
8851 static int ipw_wx_get_frag(struct net_device *dev,
8852 struct iw_request_info *info,
8853 union iwreq_data *wrqu, char *extra)
8855 struct ipw_priv *priv = ieee80211_priv(dev);
8856 mutex_lock(&priv->mutex);
8857 wrqu->frag.value = priv->ieee->fts;
8858 wrqu->frag.fixed = 0; /* no auto select */
8859 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
8860 mutex_unlock(&priv->mutex);
8861 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
8863 return 0;
8866 static int ipw_wx_set_retry(struct net_device *dev,
8867 struct iw_request_info *info,
8868 union iwreq_data *wrqu, char *extra)
8870 struct ipw_priv *priv = ieee80211_priv(dev);
8872 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
8873 return -EINVAL;
8875 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
8876 return 0;
8878 if (wrqu->retry.value < 0 || wrqu->retry.value > 255)
8879 return -EINVAL;
8881 mutex_lock(&priv->mutex);
8882 if (wrqu->retry.flags & IW_RETRY_MIN)
8883 priv->short_retry_limit = (u8) wrqu->retry.value;
8884 else if (wrqu->retry.flags & IW_RETRY_MAX)
8885 priv->long_retry_limit = (u8) wrqu->retry.value;
8886 else {
8887 priv->short_retry_limit = (u8) wrqu->retry.value;
8888 priv->long_retry_limit = (u8) wrqu->retry.value;
8891 ipw_send_retry_limit(priv, priv->short_retry_limit,
8892 priv->long_retry_limit);
8893 mutex_unlock(&priv->mutex);
8894 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
8895 priv->short_retry_limit, priv->long_retry_limit);
8896 return 0;
8899 static int ipw_wx_get_retry(struct net_device *dev,
8900 struct iw_request_info *info,
8901 union iwreq_data *wrqu, char *extra)
8903 struct ipw_priv *priv = ieee80211_priv(dev);
8905 mutex_lock(&priv->mutex);
8906 wrqu->retry.disabled = 0;
8908 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
8909 mutex_unlock(&priv->mutex);
8910 return -EINVAL;
8913 if (wrqu->retry.flags & IW_RETRY_MAX) {
8914 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
8915 wrqu->retry.value = priv->long_retry_limit;
8916 } else if (wrqu->retry.flags & IW_RETRY_MIN) {
8917 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
8918 wrqu->retry.value = priv->short_retry_limit;
8919 } else {
8920 wrqu->retry.flags = IW_RETRY_LIMIT;
8921 wrqu->retry.value = priv->short_retry_limit;
8923 mutex_unlock(&priv->mutex);
8925 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
8927 return 0;
8930 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
8931 int essid_len)
8933 struct ipw_scan_request_ext scan;
8934 int err = 0, scan_type;
8936 if (!(priv->status & STATUS_INIT) ||
8937 (priv->status & STATUS_EXIT_PENDING))
8938 return 0;
8940 mutex_lock(&priv->mutex);
8942 if (priv->status & STATUS_RF_KILL_MASK) {
8943 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
8944 priv->status |= STATUS_SCAN_PENDING;
8945 goto done;
8948 IPW_DEBUG_HC("starting request direct scan!\n");
8950 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
8951 /* We should not sleep here; otherwise we will block most
8952 * of the system (for instance, we hold rtnl_lock when we
8953 * get here).
8955 err = -EAGAIN;
8956 goto done;
8958 memset(&scan, 0, sizeof(scan));
8960 if (priv->config & CFG_SPEED_SCAN)
8961 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
8962 cpu_to_le16(30);
8963 else
8964 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
8965 cpu_to_le16(20);
8967 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
8968 cpu_to_le16(20);
8969 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
8970 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
8972 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
8974 err = ipw_send_ssid(priv, essid, essid_len);
8975 if (err) {
8976 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
8977 goto done;
8979 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
8981 ipw_add_scan_channels(priv, &scan, scan_type);
8983 err = ipw_send_scan_request_ext(priv, &scan);
8984 if (err) {
8985 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
8986 goto done;
8989 priv->status |= STATUS_SCANNING;
8991 done:
8992 mutex_unlock(&priv->mutex);
8993 return err;
8996 static int ipw_wx_set_scan(struct net_device *dev,
8997 struct iw_request_info *info,
8998 union iwreq_data *wrqu, char *extra)
9000 struct ipw_priv *priv = ieee80211_priv(dev);
9001 struct iw_scan_req *req = NULL;
9002 if (wrqu->data.length
9003 && wrqu->data.length == sizeof(struct iw_scan_req)) {
9004 req = (struct iw_scan_req *)extra;
9005 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9006 ipw_request_direct_scan(priv, req->essid,
9007 req->essid_len);
9008 return 0;
9012 IPW_DEBUG_WX("Start scan\n");
9014 queue_work(priv->workqueue, &priv->request_scan);
9016 return 0;
9019 static int ipw_wx_get_scan(struct net_device *dev,
9020 struct iw_request_info *info,
9021 union iwreq_data *wrqu, char *extra)
9023 struct ipw_priv *priv = ieee80211_priv(dev);
9024 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9027 static int ipw_wx_set_encode(struct net_device *dev,
9028 struct iw_request_info *info,
9029 union iwreq_data *wrqu, char *key)
9031 struct ipw_priv *priv = ieee80211_priv(dev);
9032 int ret;
9033 u32 cap = priv->capability;
9035 mutex_lock(&priv->mutex);
9036 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9038 /* In IBSS mode, we need to notify the firmware to update
9039 * the beacon info after we changed the capability. */
9040 if (cap != priv->capability &&
9041 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9042 priv->status & STATUS_ASSOCIATED)
9043 ipw_disassociate(priv);
9045 mutex_unlock(&priv->mutex);
9046 return ret;
9049 static int ipw_wx_get_encode(struct net_device *dev,
9050 struct iw_request_info *info,
9051 union iwreq_data *wrqu, char *key)
9053 struct ipw_priv *priv = ieee80211_priv(dev);
9054 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9057 static int ipw_wx_set_power(struct net_device *dev,
9058 struct iw_request_info *info,
9059 union iwreq_data *wrqu, char *extra)
9061 struct ipw_priv *priv = ieee80211_priv(dev);
9062 int err;
9063 mutex_lock(&priv->mutex);
9064 if (wrqu->power.disabled) {
9065 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9066 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9067 if (err) {
9068 IPW_DEBUG_WX("failed setting power mode.\n");
9069 mutex_unlock(&priv->mutex);
9070 return err;
9072 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9073 mutex_unlock(&priv->mutex);
9074 return 0;
9077 switch (wrqu->power.flags & IW_POWER_MODE) {
9078 case IW_POWER_ON: /* If not specified */
9079 case IW_POWER_MODE: /* If set all mask */
9080 case IW_POWER_ALL_R: /* If explicitely state all */
9081 break;
9082 default: /* Otherwise we don't support it */
9083 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9084 wrqu->power.flags);
9085 mutex_unlock(&priv->mutex);
9086 return -EOPNOTSUPP;
9089 /* If the user hasn't specified a power management mode yet, default
9090 * to BATTERY */
9091 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9092 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9093 else
9094 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9095 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9096 if (err) {
9097 IPW_DEBUG_WX("failed setting power mode.\n");
9098 mutex_unlock(&priv->mutex);
9099 return err;
9102 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9103 mutex_unlock(&priv->mutex);
9104 return 0;
9107 static int ipw_wx_get_power(struct net_device *dev,
9108 struct iw_request_info *info,
9109 union iwreq_data *wrqu, char *extra)
9111 struct ipw_priv *priv = ieee80211_priv(dev);
9112 mutex_lock(&priv->mutex);
9113 if (!(priv->power_mode & IPW_POWER_ENABLED))
9114 wrqu->power.disabled = 1;
9115 else
9116 wrqu->power.disabled = 0;
9118 mutex_unlock(&priv->mutex);
9119 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9121 return 0;
9124 static int ipw_wx_set_powermode(struct net_device *dev,
9125 struct iw_request_info *info,
9126 union iwreq_data *wrqu, char *extra)
9128 struct ipw_priv *priv = ieee80211_priv(dev);
9129 int mode = *(int *)extra;
9130 int err;
9131 mutex_lock(&priv->mutex);
9132 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
9133 mode = IPW_POWER_AC;
9134 priv->power_mode = mode;
9135 } else {
9136 priv->power_mode = IPW_POWER_ENABLED | mode;
9139 if (priv->power_mode != mode) {
9140 err = ipw_send_power_mode(priv, mode);
9142 if (err) {
9143 IPW_DEBUG_WX("failed setting power mode.\n");
9144 mutex_unlock(&priv->mutex);
9145 return err;
9148 mutex_unlock(&priv->mutex);
9149 return 0;
9152 #define MAX_WX_STRING 80
9153 static int ipw_wx_get_powermode(struct net_device *dev,
9154 struct iw_request_info *info,
9155 union iwreq_data *wrqu, char *extra)
9157 struct ipw_priv *priv = ieee80211_priv(dev);
9158 int level = IPW_POWER_LEVEL(priv->power_mode);
9159 char *p = extra;
9161 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9163 switch (level) {
9164 case IPW_POWER_AC:
9165 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9166 break;
9167 case IPW_POWER_BATTERY:
9168 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9169 break;
9170 default:
9171 p += snprintf(p, MAX_WX_STRING - (p - extra),
9172 "(Timeout %dms, Period %dms)",
9173 timeout_duration[level - 1] / 1000,
9174 period_duration[level - 1] / 1000);
9177 if (!(priv->power_mode & IPW_POWER_ENABLED))
9178 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9180 wrqu->data.length = p - extra + 1;
9182 return 0;
9185 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9186 struct iw_request_info *info,
9187 union iwreq_data *wrqu, char *extra)
9189 struct ipw_priv *priv = ieee80211_priv(dev);
9190 int mode = *(int *)extra;
9191 u8 band = 0, modulation = 0;
9193 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9194 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9195 return -EINVAL;
9197 mutex_lock(&priv->mutex);
9198 if (priv->adapter == IPW_2915ABG) {
9199 priv->ieee->abg_true = 1;
9200 if (mode & IEEE_A) {
9201 band |= IEEE80211_52GHZ_BAND;
9202 modulation |= IEEE80211_OFDM_MODULATION;
9203 } else
9204 priv->ieee->abg_true = 0;
9205 } else {
9206 if (mode & IEEE_A) {
9207 IPW_WARNING("Attempt to set 2200BG into "
9208 "802.11a mode\n");
9209 mutex_unlock(&priv->mutex);
9210 return -EINVAL;
9213 priv->ieee->abg_true = 0;
9216 if (mode & IEEE_B) {
9217 band |= IEEE80211_24GHZ_BAND;
9218 modulation |= IEEE80211_CCK_MODULATION;
9219 } else
9220 priv->ieee->abg_true = 0;
9222 if (mode & IEEE_G) {
9223 band |= IEEE80211_24GHZ_BAND;
9224 modulation |= IEEE80211_OFDM_MODULATION;
9225 } else
9226 priv->ieee->abg_true = 0;
9228 priv->ieee->mode = mode;
9229 priv->ieee->freq_band = band;
9230 priv->ieee->modulation = modulation;
9231 init_supported_rates(priv, &priv->rates);
9233 /* Network configuration changed -- force [re]association */
9234 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9235 if (!ipw_disassociate(priv)) {
9236 ipw_send_supported_rates(priv, &priv->rates);
9237 ipw_associate(priv);
9240 /* Update the band LEDs */
9241 ipw_led_band_on(priv);
9243 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9244 mode & IEEE_A ? 'a' : '.',
9245 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9246 mutex_unlock(&priv->mutex);
9247 return 0;
9250 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9251 struct iw_request_info *info,
9252 union iwreq_data *wrqu, char *extra)
9254 struct ipw_priv *priv = ieee80211_priv(dev);
9255 mutex_lock(&priv->mutex);
9256 switch (priv->ieee->mode) {
9257 case IEEE_A:
9258 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9259 break;
9260 case IEEE_B:
9261 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9262 break;
9263 case IEEE_A | IEEE_B:
9264 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9265 break;
9266 case IEEE_G:
9267 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9268 break;
9269 case IEEE_A | IEEE_G:
9270 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9271 break;
9272 case IEEE_B | IEEE_G:
9273 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9274 break;
9275 case IEEE_A | IEEE_B | IEEE_G:
9276 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9277 break;
9278 default:
9279 strncpy(extra, "unknown", MAX_WX_STRING);
9280 break;
9283 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9285 wrqu->data.length = strlen(extra) + 1;
9286 mutex_unlock(&priv->mutex);
9288 return 0;
9291 static int ipw_wx_set_preamble(struct net_device *dev,
9292 struct iw_request_info *info,
9293 union iwreq_data *wrqu, char *extra)
9295 struct ipw_priv *priv = ieee80211_priv(dev);
9296 int mode = *(int *)extra;
9297 mutex_lock(&priv->mutex);
9298 /* Switching from SHORT -> LONG requires a disassociation */
9299 if (mode == 1) {
9300 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9301 priv->config |= CFG_PREAMBLE_LONG;
9303 /* Network configuration changed -- force [re]association */
9304 IPW_DEBUG_ASSOC
9305 ("[re]association triggered due to preamble change.\n");
9306 if (!ipw_disassociate(priv))
9307 ipw_associate(priv);
9309 goto done;
9312 if (mode == 0) {
9313 priv->config &= ~CFG_PREAMBLE_LONG;
9314 goto done;
9316 mutex_unlock(&priv->mutex);
9317 return -EINVAL;
9319 done:
9320 mutex_unlock(&priv->mutex);
9321 return 0;
9324 static int ipw_wx_get_preamble(struct net_device *dev,
9325 struct iw_request_info *info,
9326 union iwreq_data *wrqu, char *extra)
9328 struct ipw_priv *priv = ieee80211_priv(dev);
9329 mutex_lock(&priv->mutex);
9330 if (priv->config & CFG_PREAMBLE_LONG)
9331 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9332 else
9333 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9334 mutex_unlock(&priv->mutex);
9335 return 0;
9338 #ifdef CONFIG_IPW2200_MONITOR
9339 static int ipw_wx_set_monitor(struct net_device *dev,
9340 struct iw_request_info *info,
9341 union iwreq_data *wrqu, char *extra)
9343 struct ipw_priv *priv = ieee80211_priv(dev);
9344 int *parms = (int *)extra;
9345 int enable = (parms[0] > 0);
9346 mutex_lock(&priv->mutex);
9347 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9348 if (enable) {
9349 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9350 #ifdef CONFIG_IEEE80211_RADIOTAP
9351 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9352 #else
9353 priv->net_dev->type = ARPHRD_IEEE80211;
9354 #endif
9355 queue_work(priv->workqueue, &priv->adapter_restart);
9358 ipw_set_channel(priv, parms[1]);
9359 } else {
9360 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9361 mutex_unlock(&priv->mutex);
9362 return 0;
9364 priv->net_dev->type = ARPHRD_ETHER;
9365 queue_work(priv->workqueue, &priv->adapter_restart);
9367 mutex_unlock(&priv->mutex);
9368 return 0;
9371 #endif // CONFIG_IPW2200_MONITOR
9373 static int ipw_wx_reset(struct net_device *dev,
9374 struct iw_request_info *info,
9375 union iwreq_data *wrqu, char *extra)
9377 struct ipw_priv *priv = ieee80211_priv(dev);
9378 IPW_DEBUG_WX("RESET\n");
9379 queue_work(priv->workqueue, &priv->adapter_restart);
9380 return 0;
9383 static int ipw_wx_sw_reset(struct net_device *dev,
9384 struct iw_request_info *info,
9385 union iwreq_data *wrqu, char *extra)
9387 struct ipw_priv *priv = ieee80211_priv(dev);
9388 union iwreq_data wrqu_sec = {
9389 .encoding = {
9390 .flags = IW_ENCODE_DISABLED,
9393 int ret;
9395 IPW_DEBUG_WX("SW_RESET\n");
9397 mutex_lock(&priv->mutex);
9399 ret = ipw_sw_reset(priv, 0);
9400 if (!ret) {
9401 free_firmware();
9402 ipw_adapter_restart(priv);
9405 /* The SW reset bit might have been toggled on by the 'disable'
9406 * module parameter, so take appropriate action */
9407 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9409 mutex_unlock(&priv->mutex);
9410 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9411 mutex_lock(&priv->mutex);
9413 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9414 /* Configuration likely changed -- force [re]association */
9415 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9416 "reset.\n");
9417 if (!ipw_disassociate(priv))
9418 ipw_associate(priv);
9421 mutex_unlock(&priv->mutex);
9423 return 0;
9426 /* Rebase the WE IOCTLs to zero for the handler array */
9427 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9428 static iw_handler ipw_wx_handlers[] = {
9429 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9430 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9431 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9432 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9433 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9434 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9435 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9436 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9437 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9438 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9439 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9440 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9441 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9442 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9443 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9444 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9445 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9446 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9447 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9448 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9449 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9450 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9451 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9452 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9453 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9454 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9455 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9456 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9457 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9458 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9459 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9460 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9461 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9462 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9463 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9464 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9465 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9466 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9467 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9470 enum {
9471 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9472 IPW_PRIV_GET_POWER,
9473 IPW_PRIV_SET_MODE,
9474 IPW_PRIV_GET_MODE,
9475 IPW_PRIV_SET_PREAMBLE,
9476 IPW_PRIV_GET_PREAMBLE,
9477 IPW_PRIV_RESET,
9478 IPW_PRIV_SW_RESET,
9479 #ifdef CONFIG_IPW2200_MONITOR
9480 IPW_PRIV_SET_MONITOR,
9481 #endif
9484 static struct iw_priv_args ipw_priv_args[] = {
9486 .cmd = IPW_PRIV_SET_POWER,
9487 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9488 .name = "set_power"},
9490 .cmd = IPW_PRIV_GET_POWER,
9491 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9492 .name = "get_power"},
9494 .cmd = IPW_PRIV_SET_MODE,
9495 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9496 .name = "set_mode"},
9498 .cmd = IPW_PRIV_GET_MODE,
9499 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9500 .name = "get_mode"},
9502 .cmd = IPW_PRIV_SET_PREAMBLE,
9503 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9504 .name = "set_preamble"},
9506 .cmd = IPW_PRIV_GET_PREAMBLE,
9507 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9508 .name = "get_preamble"},
9510 IPW_PRIV_RESET,
9511 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9513 IPW_PRIV_SW_RESET,
9514 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9515 #ifdef CONFIG_IPW2200_MONITOR
9517 IPW_PRIV_SET_MONITOR,
9518 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9519 #endif /* CONFIG_IPW2200_MONITOR */
9522 static iw_handler ipw_priv_handler[] = {
9523 ipw_wx_set_powermode,
9524 ipw_wx_get_powermode,
9525 ipw_wx_set_wireless_mode,
9526 ipw_wx_get_wireless_mode,
9527 ipw_wx_set_preamble,
9528 ipw_wx_get_preamble,
9529 ipw_wx_reset,
9530 ipw_wx_sw_reset,
9531 #ifdef CONFIG_IPW2200_MONITOR
9532 ipw_wx_set_monitor,
9533 #endif
9536 static struct iw_handler_def ipw_wx_handler_def = {
9537 .standard = ipw_wx_handlers,
9538 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
9539 .num_private = ARRAY_SIZE(ipw_priv_handler),
9540 .num_private_args = ARRAY_SIZE(ipw_priv_args),
9541 .private = ipw_priv_handler,
9542 .private_args = ipw_priv_args,
9543 .get_wireless_stats = ipw_get_wireless_stats,
9547 * Get wireless statistics.
9548 * Called by /proc/net/wireless
9549 * Also called by SIOCGIWSTATS
9551 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9553 struct ipw_priv *priv = ieee80211_priv(dev);
9554 struct iw_statistics *wstats;
9556 wstats = &priv->wstats;
9558 /* if hw is disabled, then ipw_get_ordinal() can't be called.
9559 * netdev->get_wireless_stats seems to be called before fw is
9560 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
9561 * and associated; if not associcated, the values are all meaningless
9562 * anyway, so set them all to NULL and INVALID */
9563 if (!(priv->status & STATUS_ASSOCIATED)) {
9564 wstats->miss.beacon = 0;
9565 wstats->discard.retries = 0;
9566 wstats->qual.qual = 0;
9567 wstats->qual.level = 0;
9568 wstats->qual.noise = 0;
9569 wstats->qual.updated = 7;
9570 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
9571 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
9572 return wstats;
9575 wstats->qual.qual = priv->quality;
9576 wstats->qual.level = average_value(&priv->average_rssi);
9577 wstats->qual.noise = average_value(&priv->average_noise);
9578 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
9579 IW_QUAL_NOISE_UPDATED;
9581 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
9582 wstats->discard.retries = priv->last_tx_failures;
9583 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
9585 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
9586 goto fail_get_ordinal;
9587 wstats->discard.retries += tx_retry; */
9589 return wstats;
9592 /* net device stuff */
9594 static void init_sys_config(struct ipw_sys_config *sys_config)
9596 memset(sys_config, 0, sizeof(struct ipw_sys_config));
9597 sys_config->bt_coexistence = 0;
9598 sys_config->answer_broadcast_ssid_probe = 0;
9599 sys_config->accept_all_data_frames = 0;
9600 sys_config->accept_non_directed_frames = 1;
9601 sys_config->exclude_unicast_unencrypted = 0;
9602 sys_config->disable_unicast_decryption = 1;
9603 sys_config->exclude_multicast_unencrypted = 0;
9604 sys_config->disable_multicast_decryption = 1;
9605 sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH;
9606 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
9607 sys_config->dot11g_auto_detection = 0;
9608 sys_config->enable_cts_to_self = 0;
9609 sys_config->bt_coexist_collision_thr = 0;
9610 sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256
9613 static int ipw_net_open(struct net_device *dev)
9615 struct ipw_priv *priv = ieee80211_priv(dev);
9616 IPW_DEBUG_INFO("dev->open\n");
9617 /* we should be verifying the device is ready to be opened */
9618 mutex_lock(&priv->mutex);
9619 if (!(priv->status & STATUS_RF_KILL_MASK) &&
9620 (priv->status & STATUS_ASSOCIATED))
9621 netif_start_queue(dev);
9622 mutex_unlock(&priv->mutex);
9623 return 0;
9626 static int ipw_net_stop(struct net_device *dev)
9628 IPW_DEBUG_INFO("dev->close\n");
9629 netif_stop_queue(dev);
9630 return 0;
9634 todo:
9636 modify to send one tfd per fragment instead of using chunking. otherwise
9637 we need to heavily modify the ieee80211_skb_to_txb.
9640 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
9641 int pri)
9643 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
9644 txb->fragments[0]->data;
9645 int i = 0;
9646 struct tfd_frame *tfd;
9647 #ifdef CONFIG_IPW_QOS
9648 int tx_id = ipw_get_tx_queue_number(priv, pri);
9649 struct clx2_tx_queue *txq = &priv->txq[tx_id];
9650 #else
9651 struct clx2_tx_queue *txq = &priv->txq[0];
9652 #endif
9653 struct clx2_queue *q = &txq->q;
9654 u8 id, hdr_len, unicast;
9655 u16 remaining_bytes;
9656 int fc;
9658 /* If there isn't room in the queue, we return busy and let the
9659 * network stack requeue the packet for us */
9660 if (ipw_queue_space(q) < q->high_mark)
9661 return NETDEV_TX_BUSY;
9663 switch (priv->ieee->iw_mode) {
9664 case IW_MODE_ADHOC:
9665 hdr_len = IEEE80211_3ADDR_LEN;
9666 unicast = !is_multicast_ether_addr(hdr->addr1);
9667 id = ipw_find_station(priv, hdr->addr1);
9668 if (id == IPW_INVALID_STATION) {
9669 id = ipw_add_station(priv, hdr->addr1);
9670 if (id == IPW_INVALID_STATION) {
9671 IPW_WARNING("Attempt to send data to "
9672 "invalid cell: " MAC_FMT "\n",
9673 MAC_ARG(hdr->addr1));
9674 goto drop;
9677 break;
9679 case IW_MODE_INFRA:
9680 default:
9681 unicast = !is_multicast_ether_addr(hdr->addr3);
9682 hdr_len = IEEE80211_3ADDR_LEN;
9683 id = 0;
9684 break;
9687 tfd = &txq->bd[q->first_empty];
9688 txq->txb[q->first_empty] = txb;
9689 memset(tfd, 0, sizeof(*tfd));
9690 tfd->u.data.station_number = id;
9692 tfd->control_flags.message_type = TX_FRAME_TYPE;
9693 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
9695 tfd->u.data.cmd_id = DINO_CMD_TX;
9696 tfd->u.data.len = cpu_to_le16(txb->payload_size);
9697 remaining_bytes = txb->payload_size;
9699 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
9700 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
9701 else
9702 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
9704 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
9705 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
9707 fc = le16_to_cpu(hdr->frame_ctl);
9708 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
9710 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
9712 if (likely(unicast))
9713 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
9715 if (txb->encrypted && !priv->ieee->host_encrypt) {
9716 switch (priv->ieee->sec.level) {
9717 case SEC_LEVEL_3:
9718 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9719 IEEE80211_FCTL_PROTECTED;
9720 /* XXX: ACK flag must be set for CCMP even if it
9721 * is a multicast/broadcast packet, because CCMP
9722 * group communication encrypted by GTK is
9723 * actually done by the AP. */
9724 if (!unicast)
9725 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
9727 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
9728 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
9729 tfd->u.data.key_index = 0;
9730 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
9731 break;
9732 case SEC_LEVEL_2:
9733 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9734 IEEE80211_FCTL_PROTECTED;
9735 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
9736 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
9737 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
9738 break;
9739 case SEC_LEVEL_1:
9740 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9741 IEEE80211_FCTL_PROTECTED;
9742 tfd->u.data.key_index = priv->ieee->tx_keyidx;
9743 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
9745 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
9746 else
9747 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
9748 break;
9749 case SEC_LEVEL_0:
9750 break;
9751 default:
9752 printk(KERN_ERR "Unknow security level %d\n",
9753 priv->ieee->sec.level);
9754 break;
9756 } else
9757 /* No hardware encryption */
9758 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
9760 #ifdef CONFIG_IPW_QOS
9761 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data), unicast);
9762 #endif /* CONFIG_IPW_QOS */
9764 /* payload */
9765 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
9766 txb->nr_frags));
9767 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
9768 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
9769 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
9770 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
9771 i, le32_to_cpu(tfd->u.data.num_chunks),
9772 txb->fragments[i]->len - hdr_len);
9773 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
9774 i, tfd->u.data.num_chunks,
9775 txb->fragments[i]->len - hdr_len);
9776 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
9777 txb->fragments[i]->len - hdr_len);
9779 tfd->u.data.chunk_ptr[i] =
9780 cpu_to_le32(pci_map_single
9781 (priv->pci_dev,
9782 txb->fragments[i]->data + hdr_len,
9783 txb->fragments[i]->len - hdr_len,
9784 PCI_DMA_TODEVICE));
9785 tfd->u.data.chunk_len[i] =
9786 cpu_to_le16(txb->fragments[i]->len - hdr_len);
9789 if (i != txb->nr_frags) {
9790 struct sk_buff *skb;
9791 u16 remaining_bytes = 0;
9792 int j;
9794 for (j = i; j < txb->nr_frags; j++)
9795 remaining_bytes += txb->fragments[j]->len - hdr_len;
9797 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
9798 remaining_bytes);
9799 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
9800 if (skb != NULL) {
9801 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
9802 for (j = i; j < txb->nr_frags; j++) {
9803 int size = txb->fragments[j]->len - hdr_len;
9805 printk(KERN_INFO "Adding frag %d %d...\n",
9806 j, size);
9807 memcpy(skb_put(skb, size),
9808 txb->fragments[j]->data + hdr_len, size);
9810 dev_kfree_skb_any(txb->fragments[i]);
9811 txb->fragments[i] = skb;
9812 tfd->u.data.chunk_ptr[i] =
9813 cpu_to_le32(pci_map_single
9814 (priv->pci_dev, skb->data,
9815 tfd->u.data.chunk_len[i],
9816 PCI_DMA_TODEVICE));
9818 tfd->u.data.num_chunks =
9819 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
9824 /* kick DMA */
9825 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
9826 ipw_write32(priv, q->reg_w, q->first_empty);
9828 return NETDEV_TX_OK;
9830 drop:
9831 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
9832 ieee80211_txb_free(txb);
9833 return NETDEV_TX_OK;
9836 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
9838 struct ipw_priv *priv = ieee80211_priv(dev);
9839 #ifdef CONFIG_IPW_QOS
9840 int tx_id = ipw_get_tx_queue_number(priv, pri);
9841 struct clx2_tx_queue *txq = &priv->txq[tx_id];
9842 #else
9843 struct clx2_tx_queue *txq = &priv->txq[0];
9844 #endif /* CONFIG_IPW_QOS */
9846 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
9847 return 1;
9849 return 0;
9852 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
9853 struct net_device *dev, int pri)
9855 struct ipw_priv *priv = ieee80211_priv(dev);
9856 unsigned long flags;
9857 int ret;
9859 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
9860 spin_lock_irqsave(&priv->lock, flags);
9862 if (!(priv->status & STATUS_ASSOCIATED)) {
9863 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
9864 priv->ieee->stats.tx_carrier_errors++;
9865 netif_stop_queue(dev);
9866 goto fail_unlock;
9869 ret = ipw_tx_skb(priv, txb, pri);
9870 if (ret == NETDEV_TX_OK)
9871 __ipw_led_activity_on(priv);
9872 spin_unlock_irqrestore(&priv->lock, flags);
9874 return ret;
9876 fail_unlock:
9877 spin_unlock_irqrestore(&priv->lock, flags);
9878 return 1;
9881 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
9883 struct ipw_priv *priv = ieee80211_priv(dev);
9885 priv->ieee->stats.tx_packets = priv->tx_packets;
9886 priv->ieee->stats.rx_packets = priv->rx_packets;
9887 return &priv->ieee->stats;
9890 static void ipw_net_set_multicast_list(struct net_device *dev)
9895 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
9897 struct ipw_priv *priv = ieee80211_priv(dev);
9898 struct sockaddr *addr = p;
9899 if (!is_valid_ether_addr(addr->sa_data))
9900 return -EADDRNOTAVAIL;
9901 mutex_lock(&priv->mutex);
9902 priv->config |= CFG_CUSTOM_MAC;
9903 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
9904 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
9905 priv->net_dev->name, MAC_ARG(priv->mac_addr));
9906 queue_work(priv->workqueue, &priv->adapter_restart);
9907 mutex_unlock(&priv->mutex);
9908 return 0;
9911 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
9912 struct ethtool_drvinfo *info)
9914 struct ipw_priv *p = ieee80211_priv(dev);
9915 char vers[64];
9916 char date[32];
9917 u32 len;
9919 strcpy(info->driver, DRV_NAME);
9920 strcpy(info->version, DRV_VERSION);
9922 len = sizeof(vers);
9923 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
9924 len = sizeof(date);
9925 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
9927 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
9928 vers, date);
9929 strcpy(info->bus_info, pci_name(p->pci_dev));
9930 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
9933 static u32 ipw_ethtool_get_link(struct net_device *dev)
9935 struct ipw_priv *priv = ieee80211_priv(dev);
9936 return (priv->status & STATUS_ASSOCIATED) != 0;
9939 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
9941 return IPW_EEPROM_IMAGE_SIZE;
9944 static int ipw_ethtool_get_eeprom(struct net_device *dev,
9945 struct ethtool_eeprom *eeprom, u8 * bytes)
9947 struct ipw_priv *p = ieee80211_priv(dev);
9949 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
9950 return -EINVAL;
9951 mutex_lock(&p->mutex);
9952 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
9953 mutex_unlock(&p->mutex);
9954 return 0;
9957 static int ipw_ethtool_set_eeprom(struct net_device *dev,
9958 struct ethtool_eeprom *eeprom, u8 * bytes)
9960 struct ipw_priv *p = ieee80211_priv(dev);
9961 int i;
9963 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
9964 return -EINVAL;
9965 mutex_lock(&p->mutex);
9966 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
9967 for (i = IPW_EEPROM_DATA;
9968 i < IPW_EEPROM_DATA + IPW_EEPROM_IMAGE_SIZE; i++)
9969 ipw_write8(p, i, p->eeprom[i]);
9970 mutex_unlock(&p->mutex);
9971 return 0;
9974 static struct ethtool_ops ipw_ethtool_ops = {
9975 .get_link = ipw_ethtool_get_link,
9976 .get_drvinfo = ipw_ethtool_get_drvinfo,
9977 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
9978 .get_eeprom = ipw_ethtool_get_eeprom,
9979 .set_eeprom = ipw_ethtool_set_eeprom,
9982 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
9984 struct ipw_priv *priv = data;
9985 u32 inta, inta_mask;
9987 if (!priv)
9988 return IRQ_NONE;
9990 spin_lock(&priv->lock);
9992 if (!(priv->status & STATUS_INT_ENABLED)) {
9993 /* Shared IRQ */
9994 goto none;
9997 inta = ipw_read32(priv, IPW_INTA_RW);
9998 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10000 if (inta == 0xFFFFFFFF) {
10001 /* Hardware disappeared */
10002 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10003 goto none;
10006 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10007 /* Shared interrupt */
10008 goto none;
10011 /* tell the device to stop sending interrupts */
10012 ipw_disable_interrupts(priv);
10014 /* ack current interrupts */
10015 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10016 ipw_write32(priv, IPW_INTA_RW, inta);
10018 /* Cache INTA value for our tasklet */
10019 priv->isr_inta = inta;
10021 tasklet_schedule(&priv->irq_tasklet);
10023 spin_unlock(&priv->lock);
10025 return IRQ_HANDLED;
10026 none:
10027 spin_unlock(&priv->lock);
10028 return IRQ_NONE;
10031 static void ipw_rf_kill(void *adapter)
10033 struct ipw_priv *priv = adapter;
10034 unsigned long flags;
10036 spin_lock_irqsave(&priv->lock, flags);
10038 if (rf_kill_active(priv)) {
10039 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10040 if (priv->workqueue)
10041 queue_delayed_work(priv->workqueue,
10042 &priv->rf_kill, 2 * HZ);
10043 goto exit_unlock;
10046 /* RF Kill is now disabled, so bring the device back up */
10048 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10049 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10050 "device\n");
10052 /* we can not do an adapter restart while inside an irq lock */
10053 queue_work(priv->workqueue, &priv->adapter_restart);
10054 } else
10055 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10056 "enabled\n");
10058 exit_unlock:
10059 spin_unlock_irqrestore(&priv->lock, flags);
10062 static void ipw_bg_rf_kill(void *data)
10064 struct ipw_priv *priv = data;
10065 mutex_lock(&priv->mutex);
10066 ipw_rf_kill(data);
10067 mutex_unlock(&priv->mutex);
10070 static void ipw_link_up(struct ipw_priv *priv)
10072 priv->last_seq_num = -1;
10073 priv->last_frag_num = -1;
10074 priv->last_packet_time = 0;
10076 netif_carrier_on(priv->net_dev);
10077 if (netif_queue_stopped(priv->net_dev)) {
10078 IPW_DEBUG_NOTIF("waking queue\n");
10079 netif_wake_queue(priv->net_dev);
10080 } else {
10081 IPW_DEBUG_NOTIF("starting queue\n");
10082 netif_start_queue(priv->net_dev);
10085 cancel_delayed_work(&priv->request_scan);
10086 ipw_reset_stats(priv);
10087 /* Ensure the rate is updated immediately */
10088 priv->last_rate = ipw_get_current_rate(priv);
10089 ipw_gather_stats(priv);
10090 ipw_led_link_up(priv);
10091 notify_wx_assoc_event(priv);
10093 if (priv->config & CFG_BACKGROUND_SCAN)
10094 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10097 static void ipw_bg_link_up(void *data)
10099 struct ipw_priv *priv = data;
10100 mutex_lock(&priv->mutex);
10101 ipw_link_up(data);
10102 mutex_unlock(&priv->mutex);
10105 static void ipw_link_down(struct ipw_priv *priv)
10107 ipw_led_link_down(priv);
10108 netif_carrier_off(priv->net_dev);
10109 netif_stop_queue(priv->net_dev);
10110 notify_wx_assoc_event(priv);
10112 /* Cancel any queued work ... */
10113 cancel_delayed_work(&priv->request_scan);
10114 cancel_delayed_work(&priv->adhoc_check);
10115 cancel_delayed_work(&priv->gather_stats);
10117 ipw_reset_stats(priv);
10119 if (!(priv->status & STATUS_EXIT_PENDING)) {
10120 /* Queue up another scan... */
10121 queue_work(priv->workqueue, &priv->request_scan);
10125 static void ipw_bg_link_down(void *data)
10127 struct ipw_priv *priv = data;
10128 mutex_lock(&priv->mutex);
10129 ipw_link_down(data);
10130 mutex_unlock(&priv->mutex);
10133 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10135 int ret = 0;
10137 priv->workqueue = create_workqueue(DRV_NAME);
10138 init_waitqueue_head(&priv->wait_command_queue);
10139 init_waitqueue_head(&priv->wait_state);
10141 INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
10142 INIT_WORK(&priv->associate, ipw_bg_associate, priv);
10143 INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
10144 INIT_WORK(&priv->system_config, ipw_system_config, priv);
10145 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
10146 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
10147 INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
10148 INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
10149 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
10150 INIT_WORK(&priv->request_scan,
10151 (void (*)(void *))ipw_request_scan, priv);
10152 INIT_WORK(&priv->gather_stats,
10153 (void (*)(void *))ipw_bg_gather_stats, priv);
10154 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
10155 INIT_WORK(&priv->roam, ipw_bg_roam, priv);
10156 INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
10157 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
10158 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
10159 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
10160 priv);
10161 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
10162 priv);
10163 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
10164 priv);
10165 INIT_WORK(&priv->merge_networks,
10166 (void (*)(void *))ipw_merge_adhoc_network, priv);
10168 #ifdef CONFIG_IPW_QOS
10169 INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
10170 priv);
10171 #endif /* CONFIG_IPW_QOS */
10173 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10174 ipw_irq_tasklet, (unsigned long)priv);
10176 return ret;
10179 static void shim__set_security(struct net_device *dev,
10180 struct ieee80211_security *sec)
10182 struct ipw_priv *priv = ieee80211_priv(dev);
10183 int i;
10184 for (i = 0; i < 4; i++) {
10185 if (sec->flags & (1 << i)) {
10186 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10187 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10188 if (sec->key_sizes[i] == 0)
10189 priv->ieee->sec.flags &= ~(1 << i);
10190 else {
10191 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10192 sec->key_sizes[i]);
10193 priv->ieee->sec.flags |= (1 << i);
10195 priv->status |= STATUS_SECURITY_UPDATED;
10196 } else if (sec->level != SEC_LEVEL_1)
10197 priv->ieee->sec.flags &= ~(1 << i);
10200 if (sec->flags & SEC_ACTIVE_KEY) {
10201 if (sec->active_key <= 3) {
10202 priv->ieee->sec.active_key = sec->active_key;
10203 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10204 } else
10205 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10206 priv->status |= STATUS_SECURITY_UPDATED;
10207 } else
10208 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10210 if ((sec->flags & SEC_AUTH_MODE) &&
10211 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10212 priv->ieee->sec.auth_mode = sec->auth_mode;
10213 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10214 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10215 priv->capability |= CAP_SHARED_KEY;
10216 else
10217 priv->capability &= ~CAP_SHARED_KEY;
10218 priv->status |= STATUS_SECURITY_UPDATED;
10221 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10222 priv->ieee->sec.flags |= SEC_ENABLED;
10223 priv->ieee->sec.enabled = sec->enabled;
10224 priv->status |= STATUS_SECURITY_UPDATED;
10225 if (sec->enabled)
10226 priv->capability |= CAP_PRIVACY_ON;
10227 else
10228 priv->capability &= ~CAP_PRIVACY_ON;
10231 if (sec->flags & SEC_ENCRYPT)
10232 priv->ieee->sec.encrypt = sec->encrypt;
10234 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10235 priv->ieee->sec.level = sec->level;
10236 priv->ieee->sec.flags |= SEC_LEVEL;
10237 priv->status |= STATUS_SECURITY_UPDATED;
10240 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10241 ipw_set_hwcrypto_keys(priv);
10243 /* To match current functionality of ipw2100 (which works well w/
10244 * various supplicants, we don't force a disassociate if the
10245 * privacy capability changes ... */
10246 #if 0
10247 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10248 (((priv->assoc_request.capability &
10249 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10250 (!(priv->assoc_request.capability &
10251 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10252 IPW_DEBUG_ASSOC("Disassociating due to capability "
10253 "change.\n");
10254 ipw_disassociate(priv);
10256 #endif
10259 static int init_supported_rates(struct ipw_priv *priv,
10260 struct ipw_supported_rates *rates)
10262 /* TODO: Mask out rates based on priv->rates_mask */
10264 memset(rates, 0, sizeof(*rates));
10265 /* configure supported rates */
10266 switch (priv->ieee->freq_band) {
10267 case IEEE80211_52GHZ_BAND:
10268 rates->ieee_mode = IPW_A_MODE;
10269 rates->purpose = IPW_RATE_CAPABILITIES;
10270 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10271 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10272 break;
10274 default: /* Mixed or 2.4Ghz */
10275 rates->ieee_mode = IPW_G_MODE;
10276 rates->purpose = IPW_RATE_CAPABILITIES;
10277 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10278 IEEE80211_CCK_DEFAULT_RATES_MASK);
10279 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10280 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10281 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10283 break;
10286 return 0;
10289 static int ipw_config(struct ipw_priv *priv)
10291 /* This is only called from ipw_up, which resets/reloads the firmware
10292 so, we don't need to first disable the card before we configure
10293 it */
10294 if (ipw_set_tx_power(priv))
10295 goto error;
10297 /* initialize adapter address */
10298 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10299 goto error;
10301 /* set basic system config settings */
10302 init_sys_config(&priv->sys_config);
10304 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10305 * Does not support BT priority yet (don't abort or defer our Tx) */
10306 if (bt_coexist) {
10307 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10309 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10310 priv->sys_config.bt_coexistence
10311 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10312 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10313 priv->sys_config.bt_coexistence
10314 |= CFG_BT_COEXISTENCE_OOB;
10317 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10318 priv->sys_config.answer_broadcast_ssid_probe = 1;
10319 else
10320 priv->sys_config.answer_broadcast_ssid_probe = 0;
10322 if (ipw_send_system_config(priv, &priv->sys_config))
10323 goto error;
10325 init_supported_rates(priv, &priv->rates);
10326 if (ipw_send_supported_rates(priv, &priv->rates))
10327 goto error;
10329 /* Set request-to-send threshold */
10330 if (priv->rts_threshold) {
10331 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10332 goto error;
10334 #ifdef CONFIG_IPW_QOS
10335 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10336 ipw_qos_activate(priv, NULL);
10337 #endif /* CONFIG_IPW_QOS */
10339 if (ipw_set_random_seed(priv))
10340 goto error;
10342 /* final state transition to the RUN state */
10343 if (ipw_send_host_complete(priv))
10344 goto error;
10346 priv->status |= STATUS_INIT;
10348 ipw_led_init(priv);
10349 ipw_led_radio_on(priv);
10350 priv->notif_missed_beacons = 0;
10352 /* Set hardware WEP key if it is configured. */
10353 if ((priv->capability & CAP_PRIVACY_ON) &&
10354 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10355 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10356 ipw_set_hwcrypto_keys(priv);
10358 return 0;
10360 error:
10361 return -EIO;
10365 * NOTE:
10367 * These tables have been tested in conjunction with the
10368 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10370 * Altering this values, using it on other hardware, or in geographies
10371 * not intended for resale of the above mentioned Intel adapters has
10372 * not been tested.
10375 static const struct ieee80211_geo ipw_geos[] = {
10376 { /* Restricted */
10377 "---",
10378 .bg_channels = 11,
10379 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10380 {2427, 4}, {2432, 5}, {2437, 6},
10381 {2442, 7}, {2447, 8}, {2452, 9},
10382 {2457, 10}, {2462, 11}},
10385 { /* Custom US/Canada */
10386 "ZZF",
10387 .bg_channels = 11,
10388 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10389 {2427, 4}, {2432, 5}, {2437, 6},
10390 {2442, 7}, {2447, 8}, {2452, 9},
10391 {2457, 10}, {2462, 11}},
10392 .a_channels = 8,
10393 .a = {{5180, 36},
10394 {5200, 40},
10395 {5220, 44},
10396 {5240, 48},
10397 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10398 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10399 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10400 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10403 { /* Rest of World */
10404 "ZZD",
10405 .bg_channels = 13,
10406 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10407 {2427, 4}, {2432, 5}, {2437, 6},
10408 {2442, 7}, {2447, 8}, {2452, 9},
10409 {2457, 10}, {2462, 11}, {2467, 12},
10410 {2472, 13}},
10413 { /* Custom USA & Europe & High */
10414 "ZZA",
10415 .bg_channels = 11,
10416 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10417 {2427, 4}, {2432, 5}, {2437, 6},
10418 {2442, 7}, {2447, 8}, {2452, 9},
10419 {2457, 10}, {2462, 11}},
10420 .a_channels = 13,
10421 .a = {{5180, 36},
10422 {5200, 40},
10423 {5220, 44},
10424 {5240, 48},
10425 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10426 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10427 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10428 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10429 {5745, 149},
10430 {5765, 153},
10431 {5785, 157},
10432 {5805, 161},
10433 {5825, 165}},
10436 { /* Custom NA & Europe */
10437 "ZZB",
10438 .bg_channels = 11,
10439 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10440 {2427, 4}, {2432, 5}, {2437, 6},
10441 {2442, 7}, {2447, 8}, {2452, 9},
10442 {2457, 10}, {2462, 11}},
10443 .a_channels = 13,
10444 .a = {{5180, 36},
10445 {5200, 40},
10446 {5220, 44},
10447 {5240, 48},
10448 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10449 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10450 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10451 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10452 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10453 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10454 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10455 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10456 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10459 { /* Custom Japan */
10460 "ZZC",
10461 .bg_channels = 11,
10462 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10463 {2427, 4}, {2432, 5}, {2437, 6},
10464 {2442, 7}, {2447, 8}, {2452, 9},
10465 {2457, 10}, {2462, 11}},
10466 .a_channels = 4,
10467 .a = {{5170, 34}, {5190, 38},
10468 {5210, 42}, {5230, 46}},
10471 { /* Custom */
10472 "ZZM",
10473 .bg_channels = 11,
10474 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10475 {2427, 4}, {2432, 5}, {2437, 6},
10476 {2442, 7}, {2447, 8}, {2452, 9},
10477 {2457, 10}, {2462, 11}},
10480 { /* Europe */
10481 "ZZE",
10482 .bg_channels = 13,
10483 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10484 {2427, 4}, {2432, 5}, {2437, 6},
10485 {2442, 7}, {2447, 8}, {2452, 9},
10486 {2457, 10}, {2462, 11}, {2467, 12},
10487 {2472, 13}},
10488 .a_channels = 19,
10489 .a = {{5180, 36},
10490 {5200, 40},
10491 {5220, 44},
10492 {5240, 48},
10493 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10494 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10495 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10496 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10497 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10498 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10499 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10500 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10501 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10502 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10503 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10504 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10505 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10506 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10507 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
10510 { /* Custom Japan */
10511 "ZZJ",
10512 .bg_channels = 14,
10513 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10514 {2427, 4}, {2432, 5}, {2437, 6},
10515 {2442, 7}, {2447, 8}, {2452, 9},
10516 {2457, 10}, {2462, 11}, {2467, 12},
10517 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
10518 .a_channels = 4,
10519 .a = {{5170, 34}, {5190, 38},
10520 {5210, 42}, {5230, 46}},
10523 { /* Rest of World */
10524 "ZZR",
10525 .bg_channels = 14,
10526 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10527 {2427, 4}, {2432, 5}, {2437, 6},
10528 {2442, 7}, {2447, 8}, {2452, 9},
10529 {2457, 10}, {2462, 11}, {2467, 12},
10530 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
10531 IEEE80211_CH_PASSIVE_ONLY}},
10534 { /* High Band */
10535 "ZZH",
10536 .bg_channels = 13,
10537 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10538 {2427, 4}, {2432, 5}, {2437, 6},
10539 {2442, 7}, {2447, 8}, {2452, 9},
10540 {2457, 10}, {2462, 11},
10541 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
10542 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
10543 .a_channels = 4,
10544 .a = {{5745, 149}, {5765, 153},
10545 {5785, 157}, {5805, 161}},
10548 { /* Custom Europe */
10549 "ZZG",
10550 .bg_channels = 13,
10551 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10552 {2427, 4}, {2432, 5}, {2437, 6},
10553 {2442, 7}, {2447, 8}, {2452, 9},
10554 {2457, 10}, {2462, 11},
10555 {2467, 12}, {2472, 13}},
10556 .a_channels = 4,
10557 .a = {{5180, 36}, {5200, 40},
10558 {5220, 44}, {5240, 48}},
10561 { /* Europe */
10562 "ZZK",
10563 .bg_channels = 13,
10564 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10565 {2427, 4}, {2432, 5}, {2437, 6},
10566 {2442, 7}, {2447, 8}, {2452, 9},
10567 {2457, 10}, {2462, 11},
10568 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
10569 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
10570 .a_channels = 24,
10571 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
10572 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
10573 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
10574 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
10575 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10576 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10577 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10578 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10579 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10580 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10581 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10582 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10583 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10584 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10585 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10586 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10587 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10588 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10589 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
10590 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10591 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10592 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10593 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10594 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10597 { /* Europe */
10598 "ZZL",
10599 .bg_channels = 11,
10600 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10601 {2427, 4}, {2432, 5}, {2437, 6},
10602 {2442, 7}, {2447, 8}, {2452, 9},
10603 {2457, 10}, {2462, 11}},
10604 .a_channels = 13,
10605 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
10606 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
10607 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
10608 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
10609 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10610 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10611 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10612 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10613 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10614 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10615 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10616 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10617 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10621 /* GEO code borrowed from ieee80211_geo.c */
10622 static int ipw_is_valid_channel(struct ieee80211_device *ieee, u8 channel)
10624 int i;
10626 /* Driver needs to initialize the geography map before using
10627 * these helper functions */
10628 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10630 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10631 for (i = 0; i < ieee->geo.bg_channels; i++)
10632 /* NOTE: If G mode is currently supported but
10633 * this is a B only channel, we don't see it
10634 * as valid. */
10635 if ((ieee->geo.bg[i].channel == channel) &&
10636 (!(ieee->mode & IEEE_G) ||
10637 !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY)))
10638 return IEEE80211_24GHZ_BAND;
10640 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10641 for (i = 0; i < ieee->geo.a_channels; i++)
10642 if (ieee->geo.a[i].channel == channel)
10643 return IEEE80211_52GHZ_BAND;
10645 return 0;
10648 static int ipw_channel_to_index(struct ieee80211_device *ieee, u8 channel)
10650 int i;
10652 /* Driver needs to initialize the geography map before using
10653 * these helper functions */
10654 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10656 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10657 for (i = 0; i < ieee->geo.bg_channels; i++)
10658 if (ieee->geo.bg[i].channel == channel)
10659 return i;
10661 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10662 for (i = 0; i < ieee->geo.a_channels; i++)
10663 if (ieee->geo.a[i].channel == channel)
10664 return i;
10666 return -1;
10669 static u8 ipw_freq_to_channel(struct ieee80211_device *ieee, u32 freq)
10671 int i;
10673 /* Driver needs to initialize the geography map before using
10674 * these helper functions */
10675 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10677 freq /= 100000;
10679 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10680 for (i = 0; i < ieee->geo.bg_channels; i++)
10681 if (ieee->geo.bg[i].freq == freq)
10682 return ieee->geo.bg[i].channel;
10684 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10685 for (i = 0; i < ieee->geo.a_channels; i++)
10686 if (ieee->geo.a[i].freq == freq)
10687 return ieee->geo.a[i].channel;
10689 return 0;
10692 static int ipw_set_geo(struct ieee80211_device *ieee,
10693 const struct ieee80211_geo *geo)
10695 memcpy(ieee->geo.name, geo->name, 3);
10696 ieee->geo.name[3] = '\0';
10697 ieee->geo.bg_channels = geo->bg_channels;
10698 ieee->geo.a_channels = geo->a_channels;
10699 memcpy(ieee->geo.bg, geo->bg, geo->bg_channels *
10700 sizeof(struct ieee80211_channel));
10701 memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels *
10702 sizeof(struct ieee80211_channel));
10703 return 0;
10706 static const struct ieee80211_geo *ipw_get_geo(struct ieee80211_device *ieee)
10708 return &ieee->geo;
10711 #define MAX_HW_RESTARTS 5
10712 static int ipw_up(struct ipw_priv *priv)
10714 int rc, i, j;
10716 if (priv->status & STATUS_EXIT_PENDING)
10717 return -EIO;
10719 if (cmdlog && !priv->cmdlog) {
10720 priv->cmdlog = kmalloc(sizeof(*priv->cmdlog) * cmdlog,
10721 GFP_KERNEL);
10722 if (priv->cmdlog == NULL) {
10723 IPW_ERROR("Error allocating %d command log entries.\n",
10724 cmdlog);
10725 } else {
10726 memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
10727 priv->cmdlog_len = cmdlog;
10731 for (i = 0; i < MAX_HW_RESTARTS; i++) {
10732 /* Load the microcode, firmware, and eeprom.
10733 * Also start the clocks. */
10734 rc = ipw_load(priv);
10735 if (rc) {
10736 IPW_ERROR("Unable to load firmware: %d\n", rc);
10737 return rc;
10740 ipw_init_ordinals(priv);
10741 if (!(priv->config & CFG_CUSTOM_MAC))
10742 eeprom_parse_mac(priv, priv->mac_addr);
10743 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
10745 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
10746 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
10747 ipw_geos[j].name, 3))
10748 break;
10750 if (j == ARRAY_SIZE(ipw_geos)) {
10751 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
10752 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
10753 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
10754 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
10755 j = 0;
10757 if (ipw_set_geo(priv->ieee, &ipw_geos[j])) {
10758 IPW_WARNING("Could not set geography.");
10759 return 0;
10762 IPW_DEBUG_INFO("Geography %03d [%s] detected.\n",
10763 j, priv->ieee->geo.name);
10765 if (priv->status & STATUS_RF_KILL_SW) {
10766 IPW_WARNING("Radio disabled by module parameter.\n");
10767 return 0;
10768 } else if (rf_kill_active(priv)) {
10769 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
10770 "Kill switch must be turned off for "
10771 "wireless networking to work.\n");
10772 queue_delayed_work(priv->workqueue, &priv->rf_kill,
10773 2 * HZ);
10774 return 0;
10777 rc = ipw_config(priv);
10778 if (!rc) {
10779 IPW_DEBUG_INFO("Configured device on count %i\n", i);
10781 /* If configure to try and auto-associate, kick
10782 * off a scan. */
10783 queue_work(priv->workqueue, &priv->request_scan);
10785 return 0;
10788 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
10789 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
10790 i, MAX_HW_RESTARTS);
10792 /* We had an error bringing up the hardware, so take it
10793 * all the way back down so we can try again */
10794 ipw_down(priv);
10797 /* tried to restart and config the device for as long as our
10798 * patience could withstand */
10799 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
10801 return -EIO;
10804 static void ipw_bg_up(void *data)
10806 struct ipw_priv *priv = data;
10807 mutex_lock(&priv->mutex);
10808 ipw_up(data);
10809 mutex_unlock(&priv->mutex);
10812 static void ipw_deinit(struct ipw_priv *priv)
10814 int i;
10816 if (priv->status & STATUS_SCANNING) {
10817 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
10818 ipw_abort_scan(priv);
10821 if (priv->status & STATUS_ASSOCIATED) {
10822 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
10823 ipw_disassociate(priv);
10826 ipw_led_shutdown(priv);
10828 /* Wait up to 1s for status to change to not scanning and not
10829 * associated (disassociation can take a while for a ful 802.11
10830 * exchange */
10831 for (i = 1000; i && (priv->status &
10832 (STATUS_DISASSOCIATING |
10833 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
10834 udelay(10);
10836 if (priv->status & (STATUS_DISASSOCIATING |
10837 STATUS_ASSOCIATED | STATUS_SCANNING))
10838 IPW_DEBUG_INFO("Still associated or scanning...\n");
10839 else
10840 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
10842 /* Attempt to disable the card */
10843 ipw_send_card_disable(priv, 0);
10845 priv->status &= ~STATUS_INIT;
10848 static void ipw_down(struct ipw_priv *priv)
10850 int exit_pending = priv->status & STATUS_EXIT_PENDING;
10852 priv->status |= STATUS_EXIT_PENDING;
10854 if (ipw_is_init(priv))
10855 ipw_deinit(priv);
10857 /* Wipe out the EXIT_PENDING status bit if we are not actually
10858 * exiting the module */
10859 if (!exit_pending)
10860 priv->status &= ~STATUS_EXIT_PENDING;
10862 /* tell the device to stop sending interrupts */
10863 ipw_disable_interrupts(priv);
10865 /* Clear all bits but the RF Kill */
10866 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
10867 netif_carrier_off(priv->net_dev);
10868 netif_stop_queue(priv->net_dev);
10870 ipw_stop_nic(priv);
10872 ipw_led_radio_off(priv);
10875 static void ipw_bg_down(void *data)
10877 struct ipw_priv *priv = data;
10878 mutex_lock(&priv->mutex);
10879 ipw_down(data);
10880 mutex_unlock(&priv->mutex);
10883 /* Called by register_netdev() */
10884 static int ipw_net_init(struct net_device *dev)
10886 struct ipw_priv *priv = ieee80211_priv(dev);
10887 mutex_lock(&priv->mutex);
10889 if (ipw_up(priv)) {
10890 mutex_unlock(&priv->mutex);
10891 return -EIO;
10894 mutex_unlock(&priv->mutex);
10895 return 0;
10898 /* PCI driver stuff */
10899 static struct pci_device_id card_ids[] = {
10900 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
10901 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
10902 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
10903 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
10904 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
10905 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
10906 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
10907 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
10908 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
10909 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
10910 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
10911 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
10912 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
10913 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
10914 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
10915 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
10916 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
10917 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
10918 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
10919 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
10920 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
10921 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
10923 /* required last entry */
10924 {0,}
10927 MODULE_DEVICE_TABLE(pci, card_ids);
10929 static struct attribute *ipw_sysfs_entries[] = {
10930 &dev_attr_rf_kill.attr,
10931 &dev_attr_direct_dword.attr,
10932 &dev_attr_indirect_byte.attr,
10933 &dev_attr_indirect_dword.attr,
10934 &dev_attr_mem_gpio_reg.attr,
10935 &dev_attr_command_event_reg.attr,
10936 &dev_attr_nic_type.attr,
10937 &dev_attr_status.attr,
10938 &dev_attr_cfg.attr,
10939 &dev_attr_error.attr,
10940 &dev_attr_event_log.attr,
10941 &dev_attr_cmd_log.attr,
10942 &dev_attr_eeprom_delay.attr,
10943 &dev_attr_ucode_version.attr,
10944 &dev_attr_rtc.attr,
10945 &dev_attr_scan_age.attr,
10946 &dev_attr_led.attr,
10947 &dev_attr_speed_scan.attr,
10948 &dev_attr_net_stats.attr,
10949 NULL
10952 static struct attribute_group ipw_attribute_group = {
10953 .name = NULL, /* put in device directory */
10954 .attrs = ipw_sysfs_entries,
10957 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10959 int err = 0;
10960 struct net_device *net_dev;
10961 void __iomem *base;
10962 u32 length, val;
10963 struct ipw_priv *priv;
10964 int i;
10966 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
10967 if (net_dev == NULL) {
10968 err = -ENOMEM;
10969 goto out;
10972 priv = ieee80211_priv(net_dev);
10973 priv->ieee = netdev_priv(net_dev);
10975 priv->net_dev = net_dev;
10976 priv->pci_dev = pdev;
10977 #ifdef CONFIG_IPW2200_DEBUG
10978 ipw_debug_level = debug;
10979 #endif
10980 spin_lock_init(&priv->lock);
10981 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
10982 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
10984 mutex_init(&priv->mutex);
10985 if (pci_enable_device(pdev)) {
10986 err = -ENODEV;
10987 goto out_free_ieee80211;
10990 pci_set_master(pdev);
10992 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10993 if (!err)
10994 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
10995 if (err) {
10996 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
10997 goto out_pci_disable_device;
11000 pci_set_drvdata(pdev, priv);
11002 err = pci_request_regions(pdev, DRV_NAME);
11003 if (err)
11004 goto out_pci_disable_device;
11006 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11007 * PCI Tx retries from interfering with C3 CPU state */
11008 pci_read_config_dword(pdev, 0x40, &val);
11009 if ((val & 0x0000ff00) != 0)
11010 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11012 length = pci_resource_len(pdev, 0);
11013 priv->hw_len = length;
11015 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
11016 if (!base) {
11017 err = -ENODEV;
11018 goto out_pci_release_regions;
11021 priv->hw_base = base;
11022 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11023 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11025 err = ipw_setup_deferred_work(priv);
11026 if (err) {
11027 IPW_ERROR("Unable to setup deferred work\n");
11028 goto out_iounmap;
11031 ipw_sw_reset(priv, 1);
11033 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
11034 if (err) {
11035 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11036 goto out_destroy_workqueue;
11039 SET_MODULE_OWNER(net_dev);
11040 SET_NETDEV_DEV(net_dev, &pdev->dev);
11042 mutex_lock(&priv->mutex);
11044 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11045 priv->ieee->set_security = shim__set_security;
11046 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11048 #ifdef CONFIG_IPW_QOS
11049 priv->ieee->handle_probe_response = ipw_handle_beacon;
11050 priv->ieee->handle_beacon = ipw_handle_probe_response;
11051 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11052 #endif /* CONFIG_IPW_QOS */
11054 priv->ieee->perfect_rssi = -20;
11055 priv->ieee->worst_rssi = -85;
11057 net_dev->open = ipw_net_open;
11058 net_dev->stop = ipw_net_stop;
11059 net_dev->init = ipw_net_init;
11060 net_dev->get_stats = ipw_net_get_stats;
11061 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11062 net_dev->set_mac_address = ipw_net_set_mac_address;
11063 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11064 net_dev->wireless_data = &priv->wireless_data;
11065 net_dev->wireless_handlers = &ipw_wx_handler_def;
11066 net_dev->ethtool_ops = &ipw_ethtool_ops;
11067 net_dev->irq = pdev->irq;
11068 net_dev->base_addr = (unsigned long)priv->hw_base;
11069 net_dev->mem_start = pci_resource_start(pdev, 0);
11070 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11072 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11073 if (err) {
11074 IPW_ERROR("failed to create sysfs device attributes\n");
11075 mutex_unlock(&priv->mutex);
11076 goto out_release_irq;
11079 mutex_unlock(&priv->mutex);
11080 err = register_netdev(net_dev);
11081 if (err) {
11082 IPW_ERROR("failed to register network device\n");
11083 goto out_remove_sysfs;
11085 return 0;
11087 out_remove_sysfs:
11088 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11089 out_release_irq:
11090 free_irq(pdev->irq, priv);
11091 out_destroy_workqueue:
11092 destroy_workqueue(priv->workqueue);
11093 priv->workqueue = NULL;
11094 out_iounmap:
11095 iounmap(priv->hw_base);
11096 out_pci_release_regions:
11097 pci_release_regions(pdev);
11098 out_pci_disable_device:
11099 pci_disable_device(pdev);
11100 pci_set_drvdata(pdev, NULL);
11101 out_free_ieee80211:
11102 free_ieee80211(priv->net_dev);
11103 out:
11104 return err;
11107 static void ipw_pci_remove(struct pci_dev *pdev)
11109 struct ipw_priv *priv = pci_get_drvdata(pdev);
11110 struct list_head *p, *q;
11111 int i;
11113 if (!priv)
11114 return;
11116 mutex_lock(&priv->mutex);
11118 priv->status |= STATUS_EXIT_PENDING;
11119 ipw_down(priv);
11120 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11122 mutex_unlock(&priv->mutex);
11124 unregister_netdev(priv->net_dev);
11126 if (priv->rxq) {
11127 ipw_rx_queue_free(priv, priv->rxq);
11128 priv->rxq = NULL;
11130 ipw_tx_queue_free(priv);
11132 if (priv->cmdlog) {
11133 kfree(priv->cmdlog);
11134 priv->cmdlog = NULL;
11136 /* ipw_down will ensure that there is no more pending work
11137 * in the workqueue's, so we can safely remove them now. */
11138 cancel_delayed_work(&priv->adhoc_check);
11139 cancel_delayed_work(&priv->gather_stats);
11140 cancel_delayed_work(&priv->request_scan);
11141 cancel_delayed_work(&priv->rf_kill);
11142 cancel_delayed_work(&priv->scan_check);
11143 destroy_workqueue(priv->workqueue);
11144 priv->workqueue = NULL;
11146 /* Free MAC hash list for ADHOC */
11147 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11148 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11149 list_del(p);
11150 kfree(list_entry(p, struct ipw_ibss_seq, list));
11154 if (priv->error) {
11155 ipw_free_error_log(priv->error);
11156 priv->error = NULL;
11159 free_irq(pdev->irq, priv);
11160 iounmap(priv->hw_base);
11161 pci_release_regions(pdev);
11162 pci_disable_device(pdev);
11163 pci_set_drvdata(pdev, NULL);
11164 free_ieee80211(priv->net_dev);
11165 free_firmware();
11168 #ifdef CONFIG_PM
11169 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11171 struct ipw_priv *priv = pci_get_drvdata(pdev);
11172 struct net_device *dev = priv->net_dev;
11174 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11176 /* Take down the device; powers it off, etc. */
11177 ipw_down(priv);
11179 /* Remove the PRESENT state of the device */
11180 netif_device_detach(dev);
11182 pci_save_state(pdev);
11183 pci_disable_device(pdev);
11184 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11186 return 0;
11189 static int ipw_pci_resume(struct pci_dev *pdev)
11191 struct ipw_priv *priv = pci_get_drvdata(pdev);
11192 struct net_device *dev = priv->net_dev;
11193 u32 val;
11195 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11197 pci_set_power_state(pdev, PCI_D0);
11198 pci_enable_device(pdev);
11199 pci_restore_state(pdev);
11202 * Suspend/Resume resets the PCI configuration space, so we have to
11203 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11204 * from interfering with C3 CPU state. pci_restore_state won't help
11205 * here since it only restores the first 64 bytes pci config header.
11207 pci_read_config_dword(pdev, 0x40, &val);
11208 if ((val & 0x0000ff00) != 0)
11209 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11211 /* Set the device back into the PRESENT state; this will also wake
11212 * the queue of needed */
11213 netif_device_attach(dev);
11215 /* Bring the device back up */
11216 queue_work(priv->workqueue, &priv->up);
11218 return 0;
11220 #endif
11222 /* driver initialization stuff */
11223 static struct pci_driver ipw_driver = {
11224 .name = DRV_NAME,
11225 .id_table = card_ids,
11226 .probe = ipw_pci_probe,
11227 .remove = __devexit_p(ipw_pci_remove),
11228 #ifdef CONFIG_PM
11229 .suspend = ipw_pci_suspend,
11230 .resume = ipw_pci_resume,
11231 #endif
11234 static int __init ipw_init(void)
11236 int ret;
11238 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11239 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11241 ret = pci_module_init(&ipw_driver);
11242 if (ret) {
11243 IPW_ERROR("Unable to initialize PCI module\n");
11244 return ret;
11247 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11248 if (ret) {
11249 IPW_ERROR("Unable to create driver sysfs file\n");
11250 pci_unregister_driver(&ipw_driver);
11251 return ret;
11254 return ret;
11257 static void __exit ipw_exit(void)
11259 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11260 pci_unregister_driver(&ipw_driver);
11263 module_param(disable, int, 0444);
11264 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11266 module_param(associate, int, 0444);
11267 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11269 module_param(auto_create, int, 0444);
11270 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11272 module_param(led, int, 0444);
11273 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11275 module_param(debug, int, 0444);
11276 MODULE_PARM_DESC(debug, "debug output mask");
11278 module_param(channel, int, 0444);
11279 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11281 #ifdef CONFIG_IPW_QOS
11282 module_param(qos_enable, int, 0444);
11283 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11285 module_param(qos_burst_enable, int, 0444);
11286 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11288 module_param(qos_no_ack_mask, int, 0444);
11289 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11291 module_param(burst_duration_CCK, int, 0444);
11292 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11294 module_param(burst_duration_OFDM, int, 0444);
11295 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11296 #endif /* CONFIG_IPW_QOS */
11298 #ifdef CONFIG_IPW2200_MONITOR
11299 module_param(mode, int, 0444);
11300 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11301 #else
11302 module_param(mode, int, 0444);
11303 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11304 #endif
11306 module_param(bt_coexist, int, 0444);
11307 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11309 module_param(hwcrypto, int, 0444);
11310 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11312 module_param(cmdlog, int, 0444);
11313 MODULE_PARM_DESC(cmdlog,
11314 "allocate a ring buffer for logging firmware commands");
11316 module_param(roaming, int, 0444);
11317 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11319 module_exit(ipw_exit);
11320 module_init(ipw_init);