[PATCH] bcm43xx: remove some compilerwarnings.
[linux-2.6/sactl.git] / drivers / net / wireless / ipw2200.c
blob9dce522526c5ca78f506d00249de59cdb57c9f55
1 /******************************************************************************
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 ******************************************************************************/
33 #include "ipw2200.h"
34 #include <linux/version.h>
36 #define IPW2200_VERSION "git-1.1.1"
37 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
38 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
39 #define DRV_VERSION IPW2200_VERSION
41 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
43 MODULE_DESCRIPTION(DRV_DESCRIPTION);
44 MODULE_VERSION(DRV_VERSION);
45 MODULE_AUTHOR(DRV_COPYRIGHT);
46 MODULE_LICENSE("GPL");
48 static int cmdlog = 0;
49 static int debug = 0;
50 static int channel = 0;
51 static int mode = 0;
53 static u32 ipw_debug_level;
54 static int associate = 1;
55 static int auto_create = 1;
56 static int led = 0;
57 static int disable = 0;
58 static int bt_coexist = 0;
59 static int hwcrypto = 0;
60 static int roaming = 1;
61 static const char ipw_modes[] = {
62 'a', 'b', 'g', '?'
65 #ifdef CONFIG_IPW_QOS
66 static int qos_enable = 0;
67 static int qos_burst_enable = 0;
68 static int qos_no_ack_mask = 0;
69 static int burst_duration_CCK = 0;
70 static int burst_duration_OFDM = 0;
72 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
73 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
74 QOS_TX3_CW_MIN_OFDM},
75 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
76 QOS_TX3_CW_MAX_OFDM},
77 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
78 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
79 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
80 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
83 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
84 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
85 QOS_TX3_CW_MIN_CCK},
86 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
87 QOS_TX3_CW_MAX_CCK},
88 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
89 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
90 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
91 QOS_TX3_TXOP_LIMIT_CCK}
94 static struct ieee80211_qos_parameters def_parameters_OFDM = {
95 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
96 DEF_TX3_CW_MIN_OFDM},
97 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
98 DEF_TX3_CW_MAX_OFDM},
99 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
100 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
101 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
102 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
105 static struct ieee80211_qos_parameters def_parameters_CCK = {
106 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
107 DEF_TX3_CW_MIN_CCK},
108 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
109 DEF_TX3_CW_MAX_CCK},
110 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
111 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
112 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
113 DEF_TX3_TXOP_LIMIT_CCK}
116 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
118 static int from_priority_to_tx_queue[] = {
119 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
120 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
123 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
125 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
126 *qos_param);
127 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
128 *qos_param);
129 #endif /* CONFIG_IPW_QOS */
131 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
132 static void ipw_remove_current_network(struct ipw_priv *priv);
133 static void ipw_rx(struct ipw_priv *priv);
134 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
135 struct clx2_tx_queue *txq, int qindex);
136 static int ipw_queue_reset(struct ipw_priv *priv);
138 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
139 int len, int sync);
141 static void ipw_tx_queue_free(struct ipw_priv *);
143 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
144 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
145 static void ipw_rx_queue_replenish(void *);
146 static int ipw_up(struct ipw_priv *);
147 static void ipw_bg_up(void *);
148 static void ipw_down(struct ipw_priv *);
149 static void ipw_bg_down(void *);
150 static int ipw_config(struct ipw_priv *);
151 static int init_supported_rates(struct ipw_priv *priv,
152 struct ipw_supported_rates *prates);
153 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
154 static void ipw_send_wep_keys(struct ipw_priv *, int);
156 static int snprint_line(char *buf, size_t count,
157 const u8 * data, u32 len, u32 ofs)
159 int out, i, j, l;
160 char c;
162 out = snprintf(buf, count, "%08X", ofs);
164 for (l = 0, i = 0; i < 2; i++) {
165 out += snprintf(buf + out, count - out, " ");
166 for (j = 0; j < 8 && l < len; j++, l++)
167 out += snprintf(buf + out, count - out, "%02X ",
168 data[(i * 8 + j)]);
169 for (; j < 8; j++)
170 out += snprintf(buf + out, count - out, " ");
173 out += snprintf(buf + out, count - out, " ");
174 for (l = 0, i = 0; i < 2; i++) {
175 out += snprintf(buf + out, count - out, " ");
176 for (j = 0; j < 8 && l < len; j++, l++) {
177 c = data[(i * 8 + j)];
178 if (!isascii(c) || !isprint(c))
179 c = '.';
181 out += snprintf(buf + out, count - out, "%c", c);
184 for (; j < 8; j++)
185 out += snprintf(buf + out, count - out, " ");
188 return out;
191 static void printk_buf(int level, const u8 * data, u32 len)
193 char line[81];
194 u32 ofs = 0;
195 if (!(ipw_debug_level & level))
196 return;
198 while (len) {
199 snprint_line(line, sizeof(line), &data[ofs],
200 min(len, 16U), ofs);
201 printk(KERN_DEBUG "%s\n", line);
202 ofs += 16;
203 len -= min(len, 16U);
207 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
209 size_t out = size;
210 u32 ofs = 0;
211 int total = 0;
213 while (size && len) {
214 out = snprint_line(output, size, &data[ofs],
215 min_t(size_t, len, 16U), ofs);
217 ofs += 16;
218 output += out;
219 size -= out;
220 len -= min_t(size_t, len, 16U);
221 total += out;
223 return total;
226 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
227 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
228 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
230 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
231 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
232 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
234 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
235 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
236 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
238 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
239 __LINE__, (u32) (b), (u32) (c));
240 _ipw_write_reg8(a, b, c);
243 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
244 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
245 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
247 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
248 __LINE__, (u32) (b), (u32) (c));
249 _ipw_write_reg16(a, b, c);
252 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
253 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
254 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
256 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
257 __LINE__, (u32) (b), (u32) (c));
258 _ipw_write_reg32(a, b, c);
261 /* 8-bit direct write (low 4K) */
262 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
264 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
265 #define ipw_write8(ipw, ofs, val) \
266 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
267 _ipw_write8(ipw, ofs, val)
269 /* 16-bit direct write (low 4K) */
270 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
272 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
273 #define ipw_write16(ipw, ofs, val) \
274 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
275 _ipw_write16(ipw, ofs, val)
277 /* 32-bit direct write (low 4K) */
278 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
280 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
281 #define ipw_write32(ipw, ofs, val) \
282 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
283 _ipw_write32(ipw, ofs, val)
285 /* 8-bit direct read (low 4K) */
286 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
288 /* 8-bit direct read (low 4K), with debug wrapper */
289 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
291 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
292 return _ipw_read8(ipw, ofs);
295 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
296 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
298 /* 16-bit direct read (low 4K) */
299 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
301 /* 16-bit direct read (low 4K), with debug wrapper */
302 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
304 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
305 return _ipw_read16(ipw, ofs);
308 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
309 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
311 /* 32-bit direct read (low 4K) */
312 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
314 /* 32-bit direct read (low 4K), with debug wrapper */
315 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
317 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
318 return _ipw_read32(ipw, ofs);
321 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
322 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
324 /* multi-byte read (above 4K), with debug wrapper */
325 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
326 static inline void __ipw_read_indirect(const char *f, int l,
327 struct ipw_priv *a, u32 b, u8 * c, int d)
329 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
331 _ipw_read_indirect(a, b, c, d);
334 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
335 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
337 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
338 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
339 int num);
340 #define ipw_write_indirect(a, b, c, d) \
341 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
342 _ipw_write_indirect(a, b, c, d)
344 /* 32-bit indirect write (above 4K) */
345 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
347 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
348 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
349 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
352 /* 8-bit indirect write (above 4K) */
353 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
355 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
356 u32 dif_len = reg - aligned_addr;
358 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
359 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
360 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
363 /* 16-bit indirect write (above 4K) */
364 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
366 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
367 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
369 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
370 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
371 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
374 /* 8-bit indirect read (above 4K) */
375 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
377 u32 word;
378 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
379 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
380 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
381 return (word >> ((reg & 0x3) * 8)) & 0xff;
384 /* 32-bit indirect read (above 4K) */
385 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
387 u32 value;
389 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
391 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
392 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
393 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
394 return value;
397 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
398 /* for area above 1st 4K of SRAM/reg space */
399 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
400 int num)
402 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
403 u32 dif_len = addr - aligned_addr;
404 u32 i;
406 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
408 if (num <= 0) {
409 return;
412 /* Read the first dword (or portion) byte by byte */
413 if (unlikely(dif_len)) {
414 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
415 /* Start reading at aligned_addr + dif_len */
416 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
417 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
418 aligned_addr += 4;
421 /* Read all of the middle dwords as dwords, with auto-increment */
422 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
423 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
424 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
426 /* Read the last dword (or portion) byte by byte */
427 if (unlikely(num)) {
428 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
429 for (i = 0; num > 0; i++, num--)
430 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
434 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
435 /* for area above 1st 4K of SRAM/reg space */
436 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
437 int num)
439 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
440 u32 dif_len = addr - aligned_addr;
441 u32 i;
443 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
445 if (num <= 0) {
446 return;
449 /* Write the first dword (or portion) byte by byte */
450 if (unlikely(dif_len)) {
451 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
452 /* Start writing at aligned_addr + dif_len */
453 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
454 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
455 aligned_addr += 4;
458 /* Write all of the middle dwords as dwords, with auto-increment */
459 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
460 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
461 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
463 /* Write the last dword (or portion) byte by byte */
464 if (unlikely(num)) {
465 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
466 for (i = 0; num > 0; i++, num--, buf++)
467 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
471 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
472 /* for 1st 4K of SRAM/regs space */
473 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
474 int num)
476 memcpy_toio((priv->hw_base + addr), buf, num);
479 /* Set bit(s) in low 4K of SRAM/regs */
480 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
482 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
485 /* Clear bit(s) in low 4K of SRAM/regs */
486 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
488 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
491 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
493 if (priv->status & STATUS_INT_ENABLED)
494 return;
495 priv->status |= STATUS_INT_ENABLED;
496 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
499 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
501 if (!(priv->status & STATUS_INT_ENABLED))
502 return;
503 priv->status &= ~STATUS_INT_ENABLED;
504 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
507 #ifdef CONFIG_IPW2200_DEBUG
508 static char *ipw_error_desc(u32 val)
510 switch (val) {
511 case IPW_FW_ERROR_OK:
512 return "ERROR_OK";
513 case IPW_FW_ERROR_FAIL:
514 return "ERROR_FAIL";
515 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
516 return "MEMORY_UNDERFLOW";
517 case IPW_FW_ERROR_MEMORY_OVERFLOW:
518 return "MEMORY_OVERFLOW";
519 case IPW_FW_ERROR_BAD_PARAM:
520 return "BAD_PARAM";
521 case IPW_FW_ERROR_BAD_CHECKSUM:
522 return "BAD_CHECKSUM";
523 case IPW_FW_ERROR_NMI_INTERRUPT:
524 return "NMI_INTERRUPT";
525 case IPW_FW_ERROR_BAD_DATABASE:
526 return "BAD_DATABASE";
527 case IPW_FW_ERROR_ALLOC_FAIL:
528 return "ALLOC_FAIL";
529 case IPW_FW_ERROR_DMA_UNDERRUN:
530 return "DMA_UNDERRUN";
531 case IPW_FW_ERROR_DMA_STATUS:
532 return "DMA_STATUS";
533 case IPW_FW_ERROR_DINO_ERROR:
534 return "DINO_ERROR";
535 case IPW_FW_ERROR_EEPROM_ERROR:
536 return "EEPROM_ERROR";
537 case IPW_FW_ERROR_SYSASSERT:
538 return "SYSASSERT";
539 case IPW_FW_ERROR_FATAL_ERROR:
540 return "FATAL_ERROR";
541 default:
542 return "UNKNOWN_ERROR";
546 static void ipw_dump_error_log(struct ipw_priv *priv,
547 struct ipw_fw_error *error)
549 u32 i;
551 if (!error) {
552 IPW_ERROR("Error allocating and capturing error log. "
553 "Nothing to dump.\n");
554 return;
557 IPW_ERROR("Start IPW Error Log Dump:\n");
558 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
559 error->status, error->config);
561 for (i = 0; i < error->elem_len; i++)
562 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
563 ipw_error_desc(error->elem[i].desc),
564 error->elem[i].time,
565 error->elem[i].blink1,
566 error->elem[i].blink2,
567 error->elem[i].link1,
568 error->elem[i].link2, error->elem[i].data);
569 for (i = 0; i < error->log_len; i++)
570 IPW_ERROR("%i\t0x%08x\t%i\n",
571 error->log[i].time,
572 error->log[i].data, error->log[i].event);
574 #endif
576 static inline int ipw_is_init(struct ipw_priv *priv)
578 return (priv->status & STATUS_INIT) ? 1 : 0;
581 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
583 u32 addr, field_info, field_len, field_count, total_len;
585 IPW_DEBUG_ORD("ordinal = %i\n", ord);
587 if (!priv || !val || !len) {
588 IPW_DEBUG_ORD("Invalid argument\n");
589 return -EINVAL;
592 /* verify device ordinal tables have been initialized */
593 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
594 IPW_DEBUG_ORD("Access ordinals before initialization\n");
595 return -EINVAL;
598 switch (IPW_ORD_TABLE_ID_MASK & ord) {
599 case IPW_ORD_TABLE_0_MASK:
601 * TABLE 0: Direct access to a table of 32 bit values
603 * This is a very simple table with the data directly
604 * read from the table
607 /* remove the table id from the ordinal */
608 ord &= IPW_ORD_TABLE_VALUE_MASK;
610 /* boundary check */
611 if (ord > priv->table0_len) {
612 IPW_DEBUG_ORD("ordinal value (%i) longer then "
613 "max (%i)\n", ord, priv->table0_len);
614 return -EINVAL;
617 /* verify we have enough room to store the value */
618 if (*len < sizeof(u32)) {
619 IPW_DEBUG_ORD("ordinal buffer length too small, "
620 "need %zd\n", sizeof(u32));
621 return -EINVAL;
624 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
625 ord, priv->table0_addr + (ord << 2));
627 *len = sizeof(u32);
628 ord <<= 2;
629 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
630 break;
632 case IPW_ORD_TABLE_1_MASK:
634 * TABLE 1: Indirect access to a table of 32 bit values
636 * This is a fairly large table of u32 values each
637 * representing starting addr for the data (which is
638 * also a u32)
641 /* remove the table id from the ordinal */
642 ord &= IPW_ORD_TABLE_VALUE_MASK;
644 /* boundary check */
645 if (ord > priv->table1_len) {
646 IPW_DEBUG_ORD("ordinal value too long\n");
647 return -EINVAL;
650 /* verify we have enough room to store the value */
651 if (*len < sizeof(u32)) {
652 IPW_DEBUG_ORD("ordinal buffer length too small, "
653 "need %zd\n", sizeof(u32));
654 return -EINVAL;
657 *((u32 *) val) =
658 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
659 *len = sizeof(u32);
660 break;
662 case IPW_ORD_TABLE_2_MASK:
664 * TABLE 2: Indirect access to a table of variable sized values
666 * This table consist of six values, each containing
667 * - dword containing the starting offset of the data
668 * - dword containing the lengh in the first 16bits
669 * and the count in the second 16bits
672 /* remove the table id from the ordinal */
673 ord &= IPW_ORD_TABLE_VALUE_MASK;
675 /* boundary check */
676 if (ord > priv->table2_len) {
677 IPW_DEBUG_ORD("ordinal value too long\n");
678 return -EINVAL;
681 /* get the address of statistic */
682 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
684 /* get the second DW of statistics ;
685 * two 16-bit words - first is length, second is count */
686 field_info =
687 ipw_read_reg32(priv,
688 priv->table2_addr + (ord << 3) +
689 sizeof(u32));
691 /* get each entry length */
692 field_len = *((u16 *) & field_info);
694 /* get number of entries */
695 field_count = *(((u16 *) & field_info) + 1);
697 /* abort if not enought memory */
698 total_len = field_len * field_count;
699 if (total_len > *len) {
700 *len = total_len;
701 return -EINVAL;
704 *len = total_len;
705 if (!total_len)
706 return 0;
708 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
709 "field_info = 0x%08x\n",
710 addr, total_len, field_info);
711 ipw_read_indirect(priv, addr, val, total_len);
712 break;
714 default:
715 IPW_DEBUG_ORD("Invalid ordinal!\n");
716 return -EINVAL;
720 return 0;
723 static void ipw_init_ordinals(struct ipw_priv *priv)
725 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
726 priv->table0_len = ipw_read32(priv, priv->table0_addr);
728 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
729 priv->table0_addr, priv->table0_len);
731 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
732 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
734 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
735 priv->table1_addr, priv->table1_len);
737 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
738 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
739 priv->table2_len &= 0x0000ffff; /* use first two bytes */
741 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
742 priv->table2_addr, priv->table2_len);
746 static u32 ipw_register_toggle(u32 reg)
748 reg &= ~IPW_START_STANDBY;
749 if (reg & IPW_GATE_ODMA)
750 reg &= ~IPW_GATE_ODMA;
751 if (reg & IPW_GATE_IDMA)
752 reg &= ~IPW_GATE_IDMA;
753 if (reg & IPW_GATE_ADMA)
754 reg &= ~IPW_GATE_ADMA;
755 return reg;
759 * LED behavior:
760 * - On radio ON, turn on any LEDs that require to be on during start
761 * - On initialization, start unassociated blink
762 * - On association, disable unassociated blink
763 * - On disassociation, start unassociated blink
764 * - On radio OFF, turn off any LEDs started during radio on
767 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
768 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
769 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
771 static void ipw_led_link_on(struct ipw_priv *priv)
773 unsigned long flags;
774 u32 led;
776 /* If configured to not use LEDs, or nic_type is 1,
777 * then we don't toggle a LINK led */
778 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
779 return;
781 spin_lock_irqsave(&priv->lock, flags);
783 if (!(priv->status & STATUS_RF_KILL_MASK) &&
784 !(priv->status & STATUS_LED_LINK_ON)) {
785 IPW_DEBUG_LED("Link LED On\n");
786 led = ipw_read_reg32(priv, IPW_EVENT_REG);
787 led |= priv->led_association_on;
789 led = ipw_register_toggle(led);
791 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
792 ipw_write_reg32(priv, IPW_EVENT_REG, led);
794 priv->status |= STATUS_LED_LINK_ON;
796 /* If we aren't associated, schedule turning the LED off */
797 if (!(priv->status & STATUS_ASSOCIATED))
798 queue_delayed_work(priv->workqueue,
799 &priv->led_link_off,
800 LD_TIME_LINK_ON);
803 spin_unlock_irqrestore(&priv->lock, flags);
806 static void ipw_bg_led_link_on(void *data)
808 struct ipw_priv *priv = data;
809 mutex_lock(&priv->mutex);
810 ipw_led_link_on(data);
811 mutex_unlock(&priv->mutex);
814 static void ipw_led_link_off(struct ipw_priv *priv)
816 unsigned long flags;
817 u32 led;
819 /* If configured not to use LEDs, or nic type is 1,
820 * then we don't goggle the LINK led. */
821 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
822 return;
824 spin_lock_irqsave(&priv->lock, flags);
826 if (priv->status & STATUS_LED_LINK_ON) {
827 led = ipw_read_reg32(priv, IPW_EVENT_REG);
828 led &= priv->led_association_off;
829 led = ipw_register_toggle(led);
831 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
832 ipw_write_reg32(priv, IPW_EVENT_REG, led);
834 IPW_DEBUG_LED("Link LED Off\n");
836 priv->status &= ~STATUS_LED_LINK_ON;
838 /* If we aren't associated and the radio is on, schedule
839 * turning the LED on (blink while unassociated) */
840 if (!(priv->status & STATUS_RF_KILL_MASK) &&
841 !(priv->status & STATUS_ASSOCIATED))
842 queue_delayed_work(priv->workqueue, &priv->led_link_on,
843 LD_TIME_LINK_OFF);
847 spin_unlock_irqrestore(&priv->lock, flags);
850 static void ipw_bg_led_link_off(void *data)
852 struct ipw_priv *priv = data;
853 mutex_lock(&priv->mutex);
854 ipw_led_link_off(data);
855 mutex_unlock(&priv->mutex);
858 static void __ipw_led_activity_on(struct ipw_priv *priv)
860 u32 led;
862 if (priv->config & CFG_NO_LED)
863 return;
865 if (priv->status & STATUS_RF_KILL_MASK)
866 return;
868 if (!(priv->status & STATUS_LED_ACT_ON)) {
869 led = ipw_read_reg32(priv, IPW_EVENT_REG);
870 led |= priv->led_activity_on;
872 led = ipw_register_toggle(led);
874 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
875 ipw_write_reg32(priv, IPW_EVENT_REG, led);
877 IPW_DEBUG_LED("Activity LED On\n");
879 priv->status |= STATUS_LED_ACT_ON;
881 cancel_delayed_work(&priv->led_act_off);
882 queue_delayed_work(priv->workqueue, &priv->led_act_off,
883 LD_TIME_ACT_ON);
884 } else {
885 /* Reschedule LED off for full time period */
886 cancel_delayed_work(&priv->led_act_off);
887 queue_delayed_work(priv->workqueue, &priv->led_act_off,
888 LD_TIME_ACT_ON);
892 #if 0
893 void ipw_led_activity_on(struct ipw_priv *priv)
895 unsigned long flags;
896 spin_lock_irqsave(&priv->lock, flags);
897 __ipw_led_activity_on(priv);
898 spin_unlock_irqrestore(&priv->lock, flags);
900 #endif /* 0 */
902 static void ipw_led_activity_off(struct ipw_priv *priv)
904 unsigned long flags;
905 u32 led;
907 if (priv->config & CFG_NO_LED)
908 return;
910 spin_lock_irqsave(&priv->lock, flags);
912 if (priv->status & STATUS_LED_ACT_ON) {
913 led = ipw_read_reg32(priv, IPW_EVENT_REG);
914 led &= priv->led_activity_off;
916 led = ipw_register_toggle(led);
918 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
919 ipw_write_reg32(priv, IPW_EVENT_REG, led);
921 IPW_DEBUG_LED("Activity LED Off\n");
923 priv->status &= ~STATUS_LED_ACT_ON;
926 spin_unlock_irqrestore(&priv->lock, flags);
929 static void ipw_bg_led_activity_off(void *data)
931 struct ipw_priv *priv = data;
932 mutex_lock(&priv->mutex);
933 ipw_led_activity_off(data);
934 mutex_unlock(&priv->mutex);
937 static void ipw_led_band_on(struct ipw_priv *priv)
939 unsigned long flags;
940 u32 led;
942 /* Only nic type 1 supports mode LEDs */
943 if (priv->config & CFG_NO_LED ||
944 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
945 return;
947 spin_lock_irqsave(&priv->lock, flags);
949 led = ipw_read_reg32(priv, IPW_EVENT_REG);
950 if (priv->assoc_network->mode == IEEE_A) {
951 led |= priv->led_ofdm_on;
952 led &= priv->led_association_off;
953 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
954 } else if (priv->assoc_network->mode == IEEE_G) {
955 led |= priv->led_ofdm_on;
956 led |= priv->led_association_on;
957 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
958 } else {
959 led &= priv->led_ofdm_off;
960 led |= priv->led_association_on;
961 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
964 led = ipw_register_toggle(led);
966 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
967 ipw_write_reg32(priv, IPW_EVENT_REG, led);
969 spin_unlock_irqrestore(&priv->lock, flags);
972 static void ipw_led_band_off(struct ipw_priv *priv)
974 unsigned long flags;
975 u32 led;
977 /* Only nic type 1 supports mode LEDs */
978 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
979 return;
981 spin_lock_irqsave(&priv->lock, flags);
983 led = ipw_read_reg32(priv, IPW_EVENT_REG);
984 led &= priv->led_ofdm_off;
985 led &= priv->led_association_off;
987 led = ipw_register_toggle(led);
989 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
990 ipw_write_reg32(priv, IPW_EVENT_REG, led);
992 spin_unlock_irqrestore(&priv->lock, flags);
995 static void ipw_led_radio_on(struct ipw_priv *priv)
997 ipw_led_link_on(priv);
1000 static void ipw_led_radio_off(struct ipw_priv *priv)
1002 ipw_led_activity_off(priv);
1003 ipw_led_link_off(priv);
1006 static void ipw_led_link_up(struct ipw_priv *priv)
1008 /* Set the Link Led on for all nic types */
1009 ipw_led_link_on(priv);
1012 static void ipw_led_link_down(struct ipw_priv *priv)
1014 ipw_led_activity_off(priv);
1015 ipw_led_link_off(priv);
1017 if (priv->status & STATUS_RF_KILL_MASK)
1018 ipw_led_radio_off(priv);
1021 static void ipw_led_init(struct ipw_priv *priv)
1023 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1025 /* Set the default PINs for the link and activity leds */
1026 priv->led_activity_on = IPW_ACTIVITY_LED;
1027 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1029 priv->led_association_on = IPW_ASSOCIATED_LED;
1030 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1032 /* Set the default PINs for the OFDM leds */
1033 priv->led_ofdm_on = IPW_OFDM_LED;
1034 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1036 switch (priv->nic_type) {
1037 case EEPROM_NIC_TYPE_1:
1038 /* In this NIC type, the LEDs are reversed.... */
1039 priv->led_activity_on = IPW_ASSOCIATED_LED;
1040 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1041 priv->led_association_on = IPW_ACTIVITY_LED;
1042 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1044 if (!(priv->config & CFG_NO_LED))
1045 ipw_led_band_on(priv);
1047 /* And we don't blink link LEDs for this nic, so
1048 * just return here */
1049 return;
1051 case EEPROM_NIC_TYPE_3:
1052 case EEPROM_NIC_TYPE_2:
1053 case EEPROM_NIC_TYPE_4:
1054 case EEPROM_NIC_TYPE_0:
1055 break;
1057 default:
1058 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1059 priv->nic_type);
1060 priv->nic_type = EEPROM_NIC_TYPE_0;
1061 break;
1064 if (!(priv->config & CFG_NO_LED)) {
1065 if (priv->status & STATUS_ASSOCIATED)
1066 ipw_led_link_on(priv);
1067 else
1068 ipw_led_link_off(priv);
1072 static void ipw_led_shutdown(struct ipw_priv *priv)
1074 ipw_led_activity_off(priv);
1075 ipw_led_link_off(priv);
1076 ipw_led_band_off(priv);
1077 cancel_delayed_work(&priv->led_link_on);
1078 cancel_delayed_work(&priv->led_link_off);
1079 cancel_delayed_work(&priv->led_act_off);
1083 * The following adds a new attribute to the sysfs representation
1084 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1085 * used for controling the debug level.
1087 * See the level definitions in ipw for details.
1089 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1091 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1094 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1095 size_t count)
1097 char *p = (char *)buf;
1098 u32 val;
1100 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1101 p++;
1102 if (p[0] == 'x' || p[0] == 'X')
1103 p++;
1104 val = simple_strtoul(p, &p, 16);
1105 } else
1106 val = simple_strtoul(p, &p, 10);
1107 if (p == buf)
1108 printk(KERN_INFO DRV_NAME
1109 ": %s is not in hex or decimal form.\n", buf);
1110 else
1111 ipw_debug_level = val;
1113 return strnlen(buf, count);
1116 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1117 show_debug_level, store_debug_level);
1119 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1121 /* length = 1st dword in log */
1122 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1125 static void ipw_capture_event_log(struct ipw_priv *priv,
1126 u32 log_len, struct ipw_event *log)
1128 u32 base;
1130 if (log_len) {
1131 base = ipw_read32(priv, IPW_EVENT_LOG);
1132 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1133 (u8 *) log, sizeof(*log) * log_len);
1137 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1139 struct ipw_fw_error *error;
1140 u32 log_len = ipw_get_event_log_len(priv);
1141 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1142 u32 elem_len = ipw_read_reg32(priv, base);
1144 error = kmalloc(sizeof(*error) +
1145 sizeof(*error->elem) * elem_len +
1146 sizeof(*error->log) * log_len, GFP_ATOMIC);
1147 if (!error) {
1148 IPW_ERROR("Memory allocation for firmware error log "
1149 "failed.\n");
1150 return NULL;
1152 error->jiffies = jiffies;
1153 error->status = priv->status;
1154 error->config = priv->config;
1155 error->elem_len = elem_len;
1156 error->log_len = log_len;
1157 error->elem = (struct ipw_error_elem *)error->payload;
1158 error->log = (struct ipw_event *)(error->elem + elem_len);
1160 ipw_capture_event_log(priv, log_len, error->log);
1162 if (elem_len)
1163 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1164 sizeof(*error->elem) * elem_len);
1166 return error;
1169 static void ipw_free_error_log(struct ipw_fw_error *error)
1171 if (error)
1172 kfree(error);
1175 static ssize_t show_event_log(struct device *d,
1176 struct device_attribute *attr, char *buf)
1178 struct ipw_priv *priv = dev_get_drvdata(d);
1179 u32 log_len = ipw_get_event_log_len(priv);
1180 struct ipw_event log[log_len];
1181 u32 len = 0, i;
1183 ipw_capture_event_log(priv, log_len, log);
1185 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1186 for (i = 0; i < log_len; i++)
1187 len += snprintf(buf + len, PAGE_SIZE - len,
1188 "\n%08X%08X%08X",
1189 log[i].time, log[i].event, log[i].data);
1190 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1191 return len;
1194 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1196 static ssize_t show_error(struct device *d,
1197 struct device_attribute *attr, char *buf)
1199 struct ipw_priv *priv = dev_get_drvdata(d);
1200 u32 len = 0, i;
1201 if (!priv->error)
1202 return 0;
1203 len += snprintf(buf + len, PAGE_SIZE - len,
1204 "%08lX%08X%08X%08X",
1205 priv->error->jiffies,
1206 priv->error->status,
1207 priv->error->config, priv->error->elem_len);
1208 for (i = 0; i < priv->error->elem_len; i++)
1209 len += snprintf(buf + len, PAGE_SIZE - len,
1210 "\n%08X%08X%08X%08X%08X%08X%08X",
1211 priv->error->elem[i].time,
1212 priv->error->elem[i].desc,
1213 priv->error->elem[i].blink1,
1214 priv->error->elem[i].blink2,
1215 priv->error->elem[i].link1,
1216 priv->error->elem[i].link2,
1217 priv->error->elem[i].data);
1219 len += snprintf(buf + len, PAGE_SIZE - len,
1220 "\n%08X", priv->error->log_len);
1221 for (i = 0; i < priv->error->log_len; i++)
1222 len += snprintf(buf + len, PAGE_SIZE - len,
1223 "\n%08X%08X%08X",
1224 priv->error->log[i].time,
1225 priv->error->log[i].event,
1226 priv->error->log[i].data);
1227 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1228 return len;
1231 static ssize_t clear_error(struct device *d,
1232 struct device_attribute *attr,
1233 const char *buf, size_t count)
1235 struct ipw_priv *priv = dev_get_drvdata(d);
1236 if (priv->error) {
1237 ipw_free_error_log(priv->error);
1238 priv->error = NULL;
1240 return count;
1243 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1245 static ssize_t show_cmd_log(struct device *d,
1246 struct device_attribute *attr, char *buf)
1248 struct ipw_priv *priv = dev_get_drvdata(d);
1249 u32 len = 0, i;
1250 if (!priv->cmdlog)
1251 return 0;
1252 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1253 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1254 i = (i + 1) % priv->cmdlog_len) {
1255 len +=
1256 snprintf(buf + len, PAGE_SIZE - len,
1257 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1258 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1259 priv->cmdlog[i].cmd.len);
1260 len +=
1261 snprintk_buf(buf + len, PAGE_SIZE - len,
1262 (u8 *) priv->cmdlog[i].cmd.param,
1263 priv->cmdlog[i].cmd.len);
1264 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1266 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1267 return len;
1270 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1272 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1273 char *buf)
1275 struct ipw_priv *priv = dev_get_drvdata(d);
1276 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1279 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1280 const char *buf, size_t count)
1282 struct ipw_priv *priv = dev_get_drvdata(d);
1283 #ifdef CONFIG_IPW2200_DEBUG
1284 struct net_device *dev = priv->net_dev;
1285 #endif
1286 char buffer[] = "00000000";
1287 unsigned long len =
1288 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1289 unsigned long val;
1290 char *p = buffer;
1292 IPW_DEBUG_INFO("enter\n");
1294 strncpy(buffer, buf, len);
1295 buffer[len] = 0;
1297 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1298 p++;
1299 if (p[0] == 'x' || p[0] == 'X')
1300 p++;
1301 val = simple_strtoul(p, &p, 16);
1302 } else
1303 val = simple_strtoul(p, &p, 10);
1304 if (p == buffer) {
1305 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1306 } else {
1307 priv->ieee->scan_age = val;
1308 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1311 IPW_DEBUG_INFO("exit\n");
1312 return len;
1315 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1317 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1318 char *buf)
1320 struct ipw_priv *priv = dev_get_drvdata(d);
1321 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1324 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1325 const char *buf, size_t count)
1327 struct ipw_priv *priv = dev_get_drvdata(d);
1329 IPW_DEBUG_INFO("enter\n");
1331 if (count == 0)
1332 return 0;
1334 if (*buf == 0) {
1335 IPW_DEBUG_LED("Disabling LED control.\n");
1336 priv->config |= CFG_NO_LED;
1337 ipw_led_shutdown(priv);
1338 } else {
1339 IPW_DEBUG_LED("Enabling LED control.\n");
1340 priv->config &= ~CFG_NO_LED;
1341 ipw_led_init(priv);
1344 IPW_DEBUG_INFO("exit\n");
1345 return count;
1348 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1350 static ssize_t show_status(struct device *d,
1351 struct device_attribute *attr, char *buf)
1353 struct ipw_priv *p = d->driver_data;
1354 return sprintf(buf, "0x%08x\n", (int)p->status);
1357 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1359 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1360 char *buf)
1362 struct ipw_priv *p = d->driver_data;
1363 return sprintf(buf, "0x%08x\n", (int)p->config);
1366 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1368 static ssize_t show_nic_type(struct device *d,
1369 struct device_attribute *attr, char *buf)
1371 struct ipw_priv *priv = d->driver_data;
1372 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1375 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1377 static ssize_t show_ucode_version(struct device *d,
1378 struct device_attribute *attr, char *buf)
1380 u32 len = sizeof(u32), tmp = 0;
1381 struct ipw_priv *p = d->driver_data;
1383 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1384 return 0;
1386 return sprintf(buf, "0x%08x\n", tmp);
1389 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1391 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1392 char *buf)
1394 u32 len = sizeof(u32), tmp = 0;
1395 struct ipw_priv *p = d->driver_data;
1397 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1398 return 0;
1400 return sprintf(buf, "0x%08x\n", tmp);
1403 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1406 * Add a device attribute to view/control the delay between eeprom
1407 * operations.
1409 static ssize_t show_eeprom_delay(struct device *d,
1410 struct device_attribute *attr, char *buf)
1412 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1413 return sprintf(buf, "%i\n", n);
1415 static ssize_t store_eeprom_delay(struct device *d,
1416 struct device_attribute *attr,
1417 const char *buf, size_t count)
1419 struct ipw_priv *p = d->driver_data;
1420 sscanf(buf, "%i", &p->eeprom_delay);
1421 return strnlen(buf, count);
1424 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1425 show_eeprom_delay, store_eeprom_delay);
1427 static ssize_t show_command_event_reg(struct device *d,
1428 struct device_attribute *attr, char *buf)
1430 u32 reg = 0;
1431 struct ipw_priv *p = d->driver_data;
1433 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1434 return sprintf(buf, "0x%08x\n", reg);
1436 static ssize_t store_command_event_reg(struct device *d,
1437 struct device_attribute *attr,
1438 const char *buf, size_t count)
1440 u32 reg;
1441 struct ipw_priv *p = d->driver_data;
1443 sscanf(buf, "%x", &reg);
1444 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1445 return strnlen(buf, count);
1448 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1449 show_command_event_reg, store_command_event_reg);
1451 static ssize_t show_mem_gpio_reg(struct device *d,
1452 struct device_attribute *attr, char *buf)
1454 u32 reg = 0;
1455 struct ipw_priv *p = d->driver_data;
1457 reg = ipw_read_reg32(p, 0x301100);
1458 return sprintf(buf, "0x%08x\n", reg);
1460 static ssize_t store_mem_gpio_reg(struct device *d,
1461 struct device_attribute *attr,
1462 const char *buf, size_t count)
1464 u32 reg;
1465 struct ipw_priv *p = d->driver_data;
1467 sscanf(buf, "%x", &reg);
1468 ipw_write_reg32(p, 0x301100, reg);
1469 return strnlen(buf, count);
1472 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1473 show_mem_gpio_reg, store_mem_gpio_reg);
1475 static ssize_t show_indirect_dword(struct device *d,
1476 struct device_attribute *attr, char *buf)
1478 u32 reg = 0;
1479 struct ipw_priv *priv = d->driver_data;
1481 if (priv->status & STATUS_INDIRECT_DWORD)
1482 reg = ipw_read_reg32(priv, priv->indirect_dword);
1483 else
1484 reg = 0;
1486 return sprintf(buf, "0x%08x\n", reg);
1488 static ssize_t store_indirect_dword(struct device *d,
1489 struct device_attribute *attr,
1490 const char *buf, size_t count)
1492 struct ipw_priv *priv = d->driver_data;
1494 sscanf(buf, "%x", &priv->indirect_dword);
1495 priv->status |= STATUS_INDIRECT_DWORD;
1496 return strnlen(buf, count);
1499 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1500 show_indirect_dword, store_indirect_dword);
1502 static ssize_t show_indirect_byte(struct device *d,
1503 struct device_attribute *attr, char *buf)
1505 u8 reg = 0;
1506 struct ipw_priv *priv = d->driver_data;
1508 if (priv->status & STATUS_INDIRECT_BYTE)
1509 reg = ipw_read_reg8(priv, priv->indirect_byte);
1510 else
1511 reg = 0;
1513 return sprintf(buf, "0x%02x\n", reg);
1515 static ssize_t store_indirect_byte(struct device *d,
1516 struct device_attribute *attr,
1517 const char *buf, size_t count)
1519 struct ipw_priv *priv = d->driver_data;
1521 sscanf(buf, "%x", &priv->indirect_byte);
1522 priv->status |= STATUS_INDIRECT_BYTE;
1523 return strnlen(buf, count);
1526 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1527 show_indirect_byte, store_indirect_byte);
1529 static ssize_t show_direct_dword(struct device *d,
1530 struct device_attribute *attr, char *buf)
1532 u32 reg = 0;
1533 struct ipw_priv *priv = d->driver_data;
1535 if (priv->status & STATUS_DIRECT_DWORD)
1536 reg = ipw_read32(priv, priv->direct_dword);
1537 else
1538 reg = 0;
1540 return sprintf(buf, "0x%08x\n", reg);
1542 static ssize_t store_direct_dword(struct device *d,
1543 struct device_attribute *attr,
1544 const char *buf, size_t count)
1546 struct ipw_priv *priv = d->driver_data;
1548 sscanf(buf, "%x", &priv->direct_dword);
1549 priv->status |= STATUS_DIRECT_DWORD;
1550 return strnlen(buf, count);
1553 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1554 show_direct_dword, store_direct_dword);
1556 static int rf_kill_active(struct ipw_priv *priv)
1558 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1559 priv->status |= STATUS_RF_KILL_HW;
1560 else
1561 priv->status &= ~STATUS_RF_KILL_HW;
1563 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1566 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1567 char *buf)
1569 /* 0 - RF kill not enabled
1570 1 - SW based RF kill active (sysfs)
1571 2 - HW based RF kill active
1572 3 - Both HW and SW baed RF kill active */
1573 struct ipw_priv *priv = d->driver_data;
1574 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1575 (rf_kill_active(priv) ? 0x2 : 0x0);
1576 return sprintf(buf, "%i\n", val);
1579 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1581 if ((disable_radio ? 1 : 0) ==
1582 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1583 return 0;
1585 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1586 disable_radio ? "OFF" : "ON");
1588 if (disable_radio) {
1589 priv->status |= STATUS_RF_KILL_SW;
1591 if (priv->workqueue)
1592 cancel_delayed_work(&priv->request_scan);
1593 queue_work(priv->workqueue, &priv->down);
1594 } else {
1595 priv->status &= ~STATUS_RF_KILL_SW;
1596 if (rf_kill_active(priv)) {
1597 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1598 "disabled by HW switch\n");
1599 /* Make sure the RF_KILL check timer is running */
1600 cancel_delayed_work(&priv->rf_kill);
1601 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1602 2 * HZ);
1603 } else
1604 queue_work(priv->workqueue, &priv->up);
1607 return 1;
1610 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1611 const char *buf, size_t count)
1613 struct ipw_priv *priv = d->driver_data;
1615 ipw_radio_kill_sw(priv, buf[0] == '1');
1617 return count;
1620 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1622 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1623 char *buf)
1625 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1626 int pos = 0, len = 0;
1627 if (priv->config & CFG_SPEED_SCAN) {
1628 while (priv->speed_scan[pos] != 0)
1629 len += sprintf(&buf[len], "%d ",
1630 priv->speed_scan[pos++]);
1631 return len + sprintf(&buf[len], "\n");
1634 return sprintf(buf, "0\n");
1637 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1638 const char *buf, size_t count)
1640 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1641 int channel, pos = 0;
1642 const char *p = buf;
1644 /* list of space separated channels to scan, optionally ending with 0 */
1645 while ((channel = simple_strtol(p, NULL, 0))) {
1646 if (pos == MAX_SPEED_SCAN - 1) {
1647 priv->speed_scan[pos] = 0;
1648 break;
1651 if (ieee80211_is_valid_channel(priv->ieee, channel))
1652 priv->speed_scan[pos++] = channel;
1653 else
1654 IPW_WARNING("Skipping invalid channel request: %d\n",
1655 channel);
1656 p = strchr(p, ' ');
1657 if (!p)
1658 break;
1659 while (*p == ' ' || *p == '\t')
1660 p++;
1663 if (pos == 0)
1664 priv->config &= ~CFG_SPEED_SCAN;
1665 else {
1666 priv->speed_scan_pos = 0;
1667 priv->config |= CFG_SPEED_SCAN;
1670 return count;
1673 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1674 store_speed_scan);
1676 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1677 char *buf)
1679 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1680 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1683 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1684 const char *buf, size_t count)
1686 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1687 if (buf[0] == '1')
1688 priv->config |= CFG_NET_STATS;
1689 else
1690 priv->config &= ~CFG_NET_STATS;
1692 return count;
1695 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1696 show_net_stats, store_net_stats);
1698 static void notify_wx_assoc_event(struct ipw_priv *priv)
1700 union iwreq_data wrqu;
1701 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1702 if (priv->status & STATUS_ASSOCIATED)
1703 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1704 else
1705 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1706 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1709 static void ipw_irq_tasklet(struct ipw_priv *priv)
1711 u32 inta, inta_mask, handled = 0;
1712 unsigned long flags;
1713 int rc = 0;
1715 spin_lock_irqsave(&priv->lock, flags);
1717 inta = ipw_read32(priv, IPW_INTA_RW);
1718 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1719 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1721 /* Add any cached INTA values that need to be handled */
1722 inta |= priv->isr_inta;
1724 /* handle all the justifications for the interrupt */
1725 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1726 ipw_rx(priv);
1727 handled |= IPW_INTA_BIT_RX_TRANSFER;
1730 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1731 IPW_DEBUG_HC("Command completed.\n");
1732 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1733 priv->status &= ~STATUS_HCMD_ACTIVE;
1734 wake_up_interruptible(&priv->wait_command_queue);
1735 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1738 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1739 IPW_DEBUG_TX("TX_QUEUE_1\n");
1740 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1741 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1744 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1745 IPW_DEBUG_TX("TX_QUEUE_2\n");
1746 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1747 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1750 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1751 IPW_DEBUG_TX("TX_QUEUE_3\n");
1752 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1753 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1756 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1757 IPW_DEBUG_TX("TX_QUEUE_4\n");
1758 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1759 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1762 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1763 IPW_WARNING("STATUS_CHANGE\n");
1764 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1767 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1768 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1769 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1772 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1773 IPW_WARNING("HOST_CMD_DONE\n");
1774 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1777 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1778 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1779 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1782 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1783 IPW_WARNING("PHY_OFF_DONE\n");
1784 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1787 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1788 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1789 priv->status |= STATUS_RF_KILL_HW;
1790 wake_up_interruptible(&priv->wait_command_queue);
1791 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1792 cancel_delayed_work(&priv->request_scan);
1793 schedule_work(&priv->link_down);
1794 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1795 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1798 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1799 IPW_WARNING("Firmware error detected. Restarting.\n");
1800 if (priv->error) {
1801 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
1802 #ifdef CONFIG_IPW2200_DEBUG
1803 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1804 struct ipw_fw_error *error =
1805 ipw_alloc_error_log(priv);
1806 ipw_dump_error_log(priv, error);
1807 if (error)
1808 ipw_free_error_log(error);
1810 #endif
1811 } else {
1812 priv->error = ipw_alloc_error_log(priv);
1813 if (priv->error)
1814 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
1815 else
1816 IPW_DEBUG_FW("Error allocating sysfs 'error' "
1817 "log.\n");
1818 #ifdef CONFIG_IPW2200_DEBUG
1819 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1820 ipw_dump_error_log(priv, priv->error);
1821 #endif
1824 /* XXX: If hardware encryption is for WPA/WPA2,
1825 * we have to notify the supplicant. */
1826 if (priv->ieee->sec.encrypt) {
1827 priv->status &= ~STATUS_ASSOCIATED;
1828 notify_wx_assoc_event(priv);
1831 /* Keep the restart process from trying to send host
1832 * commands by clearing the INIT status bit */
1833 priv->status &= ~STATUS_INIT;
1835 /* Cancel currently queued command. */
1836 priv->status &= ~STATUS_HCMD_ACTIVE;
1837 wake_up_interruptible(&priv->wait_command_queue);
1839 queue_work(priv->workqueue, &priv->adapter_restart);
1840 handled |= IPW_INTA_BIT_FATAL_ERROR;
1843 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
1844 IPW_ERROR("Parity error\n");
1845 handled |= IPW_INTA_BIT_PARITY_ERROR;
1848 if (handled != inta) {
1849 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1852 /* enable all interrupts */
1853 ipw_enable_interrupts(priv);
1855 spin_unlock_irqrestore(&priv->lock, flags);
1858 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
1859 static char *get_cmd_string(u8 cmd)
1861 switch (cmd) {
1862 IPW_CMD(HOST_COMPLETE);
1863 IPW_CMD(POWER_DOWN);
1864 IPW_CMD(SYSTEM_CONFIG);
1865 IPW_CMD(MULTICAST_ADDRESS);
1866 IPW_CMD(SSID);
1867 IPW_CMD(ADAPTER_ADDRESS);
1868 IPW_CMD(PORT_TYPE);
1869 IPW_CMD(RTS_THRESHOLD);
1870 IPW_CMD(FRAG_THRESHOLD);
1871 IPW_CMD(POWER_MODE);
1872 IPW_CMD(WEP_KEY);
1873 IPW_CMD(TGI_TX_KEY);
1874 IPW_CMD(SCAN_REQUEST);
1875 IPW_CMD(SCAN_REQUEST_EXT);
1876 IPW_CMD(ASSOCIATE);
1877 IPW_CMD(SUPPORTED_RATES);
1878 IPW_CMD(SCAN_ABORT);
1879 IPW_CMD(TX_FLUSH);
1880 IPW_CMD(QOS_PARAMETERS);
1881 IPW_CMD(DINO_CONFIG);
1882 IPW_CMD(RSN_CAPABILITIES);
1883 IPW_CMD(RX_KEY);
1884 IPW_CMD(CARD_DISABLE);
1885 IPW_CMD(SEED_NUMBER);
1886 IPW_CMD(TX_POWER);
1887 IPW_CMD(COUNTRY_INFO);
1888 IPW_CMD(AIRONET_INFO);
1889 IPW_CMD(AP_TX_POWER);
1890 IPW_CMD(CCKM_INFO);
1891 IPW_CMD(CCX_VER_INFO);
1892 IPW_CMD(SET_CALIBRATION);
1893 IPW_CMD(SENSITIVITY_CALIB);
1894 IPW_CMD(RETRY_LIMIT);
1895 IPW_CMD(IPW_PRE_POWER_DOWN);
1896 IPW_CMD(VAP_BEACON_TEMPLATE);
1897 IPW_CMD(VAP_DTIM_PERIOD);
1898 IPW_CMD(EXT_SUPPORTED_RATES);
1899 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
1900 IPW_CMD(VAP_QUIET_INTERVALS);
1901 IPW_CMD(VAP_CHANNEL_SWITCH);
1902 IPW_CMD(VAP_MANDATORY_CHANNELS);
1903 IPW_CMD(VAP_CELL_PWR_LIMIT);
1904 IPW_CMD(VAP_CF_PARAM_SET);
1905 IPW_CMD(VAP_SET_BEACONING_STATE);
1906 IPW_CMD(MEASUREMENT);
1907 IPW_CMD(POWER_CAPABILITY);
1908 IPW_CMD(SUPPORTED_CHANNELS);
1909 IPW_CMD(TPC_REPORT);
1910 IPW_CMD(WME_INFO);
1911 IPW_CMD(PRODUCTION_COMMAND);
1912 default:
1913 return "UNKNOWN";
1917 #define HOST_COMPLETE_TIMEOUT HZ
1919 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1921 int rc = 0;
1922 unsigned long flags;
1924 spin_lock_irqsave(&priv->lock, flags);
1925 if (priv->status & STATUS_HCMD_ACTIVE) {
1926 IPW_ERROR("Failed to send %s: Already sending a command.\n",
1927 get_cmd_string(cmd->cmd));
1928 spin_unlock_irqrestore(&priv->lock, flags);
1929 return -EAGAIN;
1932 priv->status |= STATUS_HCMD_ACTIVE;
1934 if (priv->cmdlog) {
1935 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
1936 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
1937 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
1938 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
1939 cmd->len);
1940 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
1943 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
1944 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
1945 priv->status);
1947 #ifndef DEBUG_CMD_WEP_KEY
1948 if (cmd->cmd == IPW_CMD_WEP_KEY)
1949 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
1950 else
1951 #endif
1952 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
1954 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
1955 if (rc) {
1956 priv->status &= ~STATUS_HCMD_ACTIVE;
1957 IPW_ERROR("Failed to send %s: Reason %d\n",
1958 get_cmd_string(cmd->cmd), rc);
1959 spin_unlock_irqrestore(&priv->lock, flags);
1960 goto exit;
1962 spin_unlock_irqrestore(&priv->lock, flags);
1964 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
1965 !(priv->
1966 status & STATUS_HCMD_ACTIVE),
1967 HOST_COMPLETE_TIMEOUT);
1968 if (rc == 0) {
1969 spin_lock_irqsave(&priv->lock, flags);
1970 if (priv->status & STATUS_HCMD_ACTIVE) {
1971 IPW_ERROR("Failed to send %s: Command timed out.\n",
1972 get_cmd_string(cmd->cmd));
1973 priv->status &= ~STATUS_HCMD_ACTIVE;
1974 spin_unlock_irqrestore(&priv->lock, flags);
1975 rc = -EIO;
1976 goto exit;
1978 spin_unlock_irqrestore(&priv->lock, flags);
1979 } else
1980 rc = 0;
1982 if (priv->status & STATUS_RF_KILL_HW) {
1983 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
1984 get_cmd_string(cmd->cmd));
1985 rc = -EIO;
1986 goto exit;
1989 exit:
1990 if (priv->cmdlog) {
1991 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
1992 priv->cmdlog_pos %= priv->cmdlog_len;
1994 return rc;
1997 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
1999 struct host_cmd cmd = {
2000 .cmd = command,
2003 return __ipw_send_cmd(priv, &cmd);
2006 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2007 void *data)
2009 struct host_cmd cmd = {
2010 .cmd = command,
2011 .len = len,
2012 .param = data,
2015 return __ipw_send_cmd(priv, &cmd);
2018 static int ipw_send_host_complete(struct ipw_priv *priv)
2020 if (!priv) {
2021 IPW_ERROR("Invalid args\n");
2022 return -1;
2025 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2028 static int ipw_send_system_config(struct ipw_priv *priv,
2029 struct ipw_sys_config *config)
2031 if (!priv || !config) {
2032 IPW_ERROR("Invalid args\n");
2033 return -1;
2036 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, sizeof(*config),
2037 config);
2040 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2042 if (!priv || !ssid) {
2043 IPW_ERROR("Invalid args\n");
2044 return -1;
2047 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2048 ssid);
2051 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2053 if (!priv || !mac) {
2054 IPW_ERROR("Invalid args\n");
2055 return -1;
2058 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2059 priv->net_dev->name, MAC_ARG(mac));
2061 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2065 * NOTE: This must be executed from our workqueue as it results in udelay
2066 * being called which may corrupt the keyboard if executed on default
2067 * workqueue
2069 static void ipw_adapter_restart(void *adapter)
2071 struct ipw_priv *priv = adapter;
2073 if (priv->status & STATUS_RF_KILL_MASK)
2074 return;
2076 ipw_down(priv);
2078 if (priv->assoc_network &&
2079 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2080 ipw_remove_current_network(priv);
2082 if (ipw_up(priv)) {
2083 IPW_ERROR("Failed to up device\n");
2084 return;
2088 static void ipw_bg_adapter_restart(void *data)
2090 struct ipw_priv *priv = data;
2091 mutex_lock(&priv->mutex);
2092 ipw_adapter_restart(data);
2093 mutex_unlock(&priv->mutex);
2096 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2098 static void ipw_scan_check(void *data)
2100 struct ipw_priv *priv = data;
2101 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2102 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2103 "adapter after (%dms).\n",
2104 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2105 queue_work(priv->workqueue, &priv->adapter_restart);
2109 static void ipw_bg_scan_check(void *data)
2111 struct ipw_priv *priv = data;
2112 mutex_lock(&priv->mutex);
2113 ipw_scan_check(data);
2114 mutex_unlock(&priv->mutex);
2117 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2118 struct ipw_scan_request_ext *request)
2120 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2121 sizeof(*request), request);
2124 static int ipw_send_scan_abort(struct ipw_priv *priv)
2126 if (!priv) {
2127 IPW_ERROR("Invalid args\n");
2128 return -1;
2131 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2134 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2136 struct ipw_sensitivity_calib calib = {
2137 .beacon_rssi_raw = sens,
2140 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2141 &calib);
2144 static int ipw_send_associate(struct ipw_priv *priv,
2145 struct ipw_associate *associate)
2147 struct ipw_associate tmp_associate;
2149 if (!priv || !associate) {
2150 IPW_ERROR("Invalid args\n");
2151 return -1;
2154 memcpy(&tmp_associate, associate, sizeof(*associate));
2155 tmp_associate.policy_support =
2156 cpu_to_le16(tmp_associate.policy_support);
2157 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2158 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2159 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2160 tmp_associate.listen_interval =
2161 cpu_to_le16(tmp_associate.listen_interval);
2162 tmp_associate.beacon_interval =
2163 cpu_to_le16(tmp_associate.beacon_interval);
2164 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2166 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2167 &tmp_associate);
2170 static int ipw_send_supported_rates(struct ipw_priv *priv,
2171 struct ipw_supported_rates *rates)
2173 if (!priv || !rates) {
2174 IPW_ERROR("Invalid args\n");
2175 return -1;
2178 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2179 rates);
2182 static int ipw_set_random_seed(struct ipw_priv *priv)
2184 u32 val;
2186 if (!priv) {
2187 IPW_ERROR("Invalid args\n");
2188 return -1;
2191 get_random_bytes(&val, sizeof(val));
2193 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2196 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2198 if (!priv) {
2199 IPW_ERROR("Invalid args\n");
2200 return -1;
2203 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2204 &phy_off);
2207 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2209 if (!priv || !power) {
2210 IPW_ERROR("Invalid args\n");
2211 return -1;
2214 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2217 static int ipw_set_tx_power(struct ipw_priv *priv)
2219 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2220 struct ipw_tx_power tx_power;
2221 s8 max_power;
2222 int i;
2224 memset(&tx_power, 0, sizeof(tx_power));
2226 /* configure device for 'G' band */
2227 tx_power.ieee_mode = IPW_G_MODE;
2228 tx_power.num_channels = geo->bg_channels;
2229 for (i = 0; i < geo->bg_channels; i++) {
2230 max_power = geo->bg[i].max_power;
2231 tx_power.channels_tx_power[i].channel_number =
2232 geo->bg[i].channel;
2233 tx_power.channels_tx_power[i].tx_power = max_power ?
2234 min(max_power, priv->tx_power) : priv->tx_power;
2236 if (ipw_send_tx_power(priv, &tx_power))
2237 return -EIO;
2239 /* configure device to also handle 'B' band */
2240 tx_power.ieee_mode = IPW_B_MODE;
2241 if (ipw_send_tx_power(priv, &tx_power))
2242 return -EIO;
2244 /* configure device to also handle 'A' band */
2245 if (priv->ieee->abg_true) {
2246 tx_power.ieee_mode = IPW_A_MODE;
2247 tx_power.num_channels = geo->a_channels;
2248 for (i = 0; i < tx_power.num_channels; i++) {
2249 max_power = geo->a[i].max_power;
2250 tx_power.channels_tx_power[i].channel_number =
2251 geo->a[i].channel;
2252 tx_power.channels_tx_power[i].tx_power = max_power ?
2253 min(max_power, priv->tx_power) : priv->tx_power;
2255 if (ipw_send_tx_power(priv, &tx_power))
2256 return -EIO;
2258 return 0;
2261 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2263 struct ipw_rts_threshold rts_threshold = {
2264 .rts_threshold = rts,
2267 if (!priv) {
2268 IPW_ERROR("Invalid args\n");
2269 return -1;
2272 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2273 sizeof(rts_threshold), &rts_threshold);
2276 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2278 struct ipw_frag_threshold frag_threshold = {
2279 .frag_threshold = frag,
2282 if (!priv) {
2283 IPW_ERROR("Invalid args\n");
2284 return -1;
2287 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2288 sizeof(frag_threshold), &frag_threshold);
2291 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2293 u32 param;
2295 if (!priv) {
2296 IPW_ERROR("Invalid args\n");
2297 return -1;
2300 /* If on battery, set to 3, if AC set to CAM, else user
2301 * level */
2302 switch (mode) {
2303 case IPW_POWER_BATTERY:
2304 param = IPW_POWER_INDEX_3;
2305 break;
2306 case IPW_POWER_AC:
2307 param = IPW_POWER_MODE_CAM;
2308 break;
2309 default:
2310 param = mode;
2311 break;
2314 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2315 &param);
2318 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2320 struct ipw_retry_limit retry_limit = {
2321 .short_retry_limit = slimit,
2322 .long_retry_limit = llimit
2325 if (!priv) {
2326 IPW_ERROR("Invalid args\n");
2327 return -1;
2330 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2331 &retry_limit);
2335 * The IPW device contains a Microwire compatible EEPROM that stores
2336 * various data like the MAC address. Usually the firmware has exclusive
2337 * access to the eeprom, but during device initialization (before the
2338 * device driver has sent the HostComplete command to the firmware) the
2339 * device driver has read access to the EEPROM by way of indirect addressing
2340 * through a couple of memory mapped registers.
2342 * The following is a simplified implementation for pulling data out of the
2343 * the eeprom, along with some helper functions to find information in
2344 * the per device private data's copy of the eeprom.
2346 * NOTE: To better understand how these functions work (i.e what is a chip
2347 * select and why do have to keep driving the eeprom clock?), read
2348 * just about any data sheet for a Microwire compatible EEPROM.
2351 /* write a 32 bit value into the indirect accessor register */
2352 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2354 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2356 /* the eeprom requires some time to complete the operation */
2357 udelay(p->eeprom_delay);
2359 return;
2362 /* perform a chip select operation */
2363 static void eeprom_cs(struct ipw_priv *priv)
2365 eeprom_write_reg(priv, 0);
2366 eeprom_write_reg(priv, EEPROM_BIT_CS);
2367 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2368 eeprom_write_reg(priv, EEPROM_BIT_CS);
2371 /* perform a chip select operation */
2372 static void eeprom_disable_cs(struct ipw_priv *priv)
2374 eeprom_write_reg(priv, EEPROM_BIT_CS);
2375 eeprom_write_reg(priv, 0);
2376 eeprom_write_reg(priv, EEPROM_BIT_SK);
2379 /* push a single bit down to the eeprom */
2380 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2382 int d = (bit ? EEPROM_BIT_DI : 0);
2383 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2384 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2387 /* push an opcode followed by an address down to the eeprom */
2388 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2390 int i;
2392 eeprom_cs(priv);
2393 eeprom_write_bit(priv, 1);
2394 eeprom_write_bit(priv, op & 2);
2395 eeprom_write_bit(priv, op & 1);
2396 for (i = 7; i >= 0; i--) {
2397 eeprom_write_bit(priv, addr & (1 << i));
2401 /* pull 16 bits off the eeprom, one bit at a time */
2402 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2404 int i;
2405 u16 r = 0;
2407 /* Send READ Opcode */
2408 eeprom_op(priv, EEPROM_CMD_READ, addr);
2410 /* Send dummy bit */
2411 eeprom_write_reg(priv, EEPROM_BIT_CS);
2413 /* Read the byte off the eeprom one bit at a time */
2414 for (i = 0; i < 16; i++) {
2415 u32 data = 0;
2416 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2417 eeprom_write_reg(priv, EEPROM_BIT_CS);
2418 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2419 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2422 /* Send another dummy bit */
2423 eeprom_write_reg(priv, 0);
2424 eeprom_disable_cs(priv);
2426 return r;
2429 /* helper function for pulling the mac address out of the private */
2430 /* data's copy of the eeprom data */
2431 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2433 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2437 * Either the device driver (i.e. the host) or the firmware can
2438 * load eeprom data into the designated region in SRAM. If neither
2439 * happens then the FW will shutdown with a fatal error.
2441 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2442 * bit needs region of shared SRAM needs to be non-zero.
2444 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2446 int i;
2447 u16 *eeprom = (u16 *) priv->eeprom;
2449 IPW_DEBUG_TRACE(">>\n");
2451 /* read entire contents of eeprom into private buffer */
2452 for (i = 0; i < 128; i++)
2453 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2456 If the data looks correct, then copy it to our private
2457 copy. Otherwise let the firmware know to perform the operation
2458 on its own.
2460 if (priv->eeprom[EEPROM_VERSION] != 0) {
2461 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2463 /* write the eeprom data to sram */
2464 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2465 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2467 /* Do not load eeprom data on fatal error or suspend */
2468 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2469 } else {
2470 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2472 /* Load eeprom data on fatal error or suspend */
2473 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2476 IPW_DEBUG_TRACE("<<\n");
2479 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2481 count >>= 2;
2482 if (!count)
2483 return;
2484 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2485 while (count--)
2486 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2489 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2491 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2492 CB_NUMBER_OF_ELEMENTS_SMALL *
2493 sizeof(struct command_block));
2496 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2497 { /* start dma engine but no transfers yet */
2499 IPW_DEBUG_FW(">> : \n");
2501 /* Start the dma */
2502 ipw_fw_dma_reset_command_blocks(priv);
2504 /* Write CB base address */
2505 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2507 IPW_DEBUG_FW("<< : \n");
2508 return 0;
2511 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2513 u32 control = 0;
2515 IPW_DEBUG_FW(">> :\n");
2517 //set the Stop and Abort bit
2518 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2519 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2520 priv->sram_desc.last_cb_index = 0;
2522 IPW_DEBUG_FW("<< \n");
2525 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2526 struct command_block *cb)
2528 u32 address =
2529 IPW_SHARED_SRAM_DMA_CONTROL +
2530 (sizeof(struct command_block) * index);
2531 IPW_DEBUG_FW(">> :\n");
2533 ipw_write_indirect(priv, address, (u8 *) cb,
2534 (int)sizeof(struct command_block));
2536 IPW_DEBUG_FW("<< :\n");
2537 return 0;
2541 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2543 u32 control = 0;
2544 u32 index = 0;
2546 IPW_DEBUG_FW(">> :\n");
2548 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2549 ipw_fw_dma_write_command_block(priv, index,
2550 &priv->sram_desc.cb_list[index]);
2552 /* Enable the DMA in the CSR register */
2553 ipw_clear_bit(priv, IPW_RESET_REG,
2554 IPW_RESET_REG_MASTER_DISABLED |
2555 IPW_RESET_REG_STOP_MASTER);
2557 /* Set the Start bit. */
2558 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2559 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2561 IPW_DEBUG_FW("<< :\n");
2562 return 0;
2565 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2567 u32 address;
2568 u32 register_value = 0;
2569 u32 cb_fields_address = 0;
2571 IPW_DEBUG_FW(">> :\n");
2572 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2573 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2575 /* Read the DMA Controlor register */
2576 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2577 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2579 /* Print the CB values */
2580 cb_fields_address = address;
2581 register_value = ipw_read_reg32(priv, cb_fields_address);
2582 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2584 cb_fields_address += sizeof(u32);
2585 register_value = ipw_read_reg32(priv, cb_fields_address);
2586 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2588 cb_fields_address += sizeof(u32);
2589 register_value = ipw_read_reg32(priv, cb_fields_address);
2590 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2591 register_value);
2593 cb_fields_address += sizeof(u32);
2594 register_value = ipw_read_reg32(priv, cb_fields_address);
2595 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2597 IPW_DEBUG_FW(">> :\n");
2600 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2602 u32 current_cb_address = 0;
2603 u32 current_cb_index = 0;
2605 IPW_DEBUG_FW("<< :\n");
2606 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2608 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2609 sizeof(struct command_block);
2611 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2612 current_cb_index, current_cb_address);
2614 IPW_DEBUG_FW(">> :\n");
2615 return current_cb_index;
2619 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2620 u32 src_address,
2621 u32 dest_address,
2622 u32 length,
2623 int interrupt_enabled, int is_last)
2626 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2627 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2628 CB_DEST_SIZE_LONG;
2629 struct command_block *cb;
2630 u32 last_cb_element = 0;
2632 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2633 src_address, dest_address, length);
2635 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2636 return -1;
2638 last_cb_element = priv->sram_desc.last_cb_index;
2639 cb = &priv->sram_desc.cb_list[last_cb_element];
2640 priv->sram_desc.last_cb_index++;
2642 /* Calculate the new CB control word */
2643 if (interrupt_enabled)
2644 control |= CB_INT_ENABLED;
2646 if (is_last)
2647 control |= CB_LAST_VALID;
2649 control |= length;
2651 /* Calculate the CB Element's checksum value */
2652 cb->status = control ^ src_address ^ dest_address;
2654 /* Copy the Source and Destination addresses */
2655 cb->dest_addr = dest_address;
2656 cb->source_addr = src_address;
2658 /* Copy the Control Word last */
2659 cb->control = control;
2661 return 0;
2664 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2665 u32 src_phys, u32 dest_address, u32 length)
2667 u32 bytes_left = length;
2668 u32 src_offset = 0;
2669 u32 dest_offset = 0;
2670 int status = 0;
2671 IPW_DEBUG_FW(">> \n");
2672 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2673 src_phys, dest_address, length);
2674 while (bytes_left > CB_MAX_LENGTH) {
2675 status = ipw_fw_dma_add_command_block(priv,
2676 src_phys + src_offset,
2677 dest_address +
2678 dest_offset,
2679 CB_MAX_LENGTH, 0, 0);
2680 if (status) {
2681 IPW_DEBUG_FW_INFO(": Failed\n");
2682 return -1;
2683 } else
2684 IPW_DEBUG_FW_INFO(": Added new cb\n");
2686 src_offset += CB_MAX_LENGTH;
2687 dest_offset += CB_MAX_LENGTH;
2688 bytes_left -= CB_MAX_LENGTH;
2691 /* add the buffer tail */
2692 if (bytes_left > 0) {
2693 status =
2694 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2695 dest_address + dest_offset,
2696 bytes_left, 0, 0);
2697 if (status) {
2698 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2699 return -1;
2700 } else
2701 IPW_DEBUG_FW_INFO
2702 (": Adding new cb - the buffer tail\n");
2705 IPW_DEBUG_FW("<< \n");
2706 return 0;
2709 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2711 u32 current_index = 0, previous_index;
2712 u32 watchdog = 0;
2714 IPW_DEBUG_FW(">> : \n");
2716 current_index = ipw_fw_dma_command_block_index(priv);
2717 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2718 (int)priv->sram_desc.last_cb_index);
2720 while (current_index < priv->sram_desc.last_cb_index) {
2721 udelay(50);
2722 previous_index = current_index;
2723 current_index = ipw_fw_dma_command_block_index(priv);
2725 if (previous_index < current_index) {
2726 watchdog = 0;
2727 continue;
2729 if (++watchdog > 400) {
2730 IPW_DEBUG_FW_INFO("Timeout\n");
2731 ipw_fw_dma_dump_command_block(priv);
2732 ipw_fw_dma_abort(priv);
2733 return -1;
2737 ipw_fw_dma_abort(priv);
2739 /*Disable the DMA in the CSR register */
2740 ipw_set_bit(priv, IPW_RESET_REG,
2741 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2743 IPW_DEBUG_FW("<< dmaWaitSync \n");
2744 return 0;
2747 static void ipw_remove_current_network(struct ipw_priv *priv)
2749 struct list_head *element, *safe;
2750 struct ieee80211_network *network = NULL;
2751 unsigned long flags;
2753 spin_lock_irqsave(&priv->ieee->lock, flags);
2754 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2755 network = list_entry(element, struct ieee80211_network, list);
2756 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2757 list_del(element);
2758 list_add_tail(&network->list,
2759 &priv->ieee->network_free_list);
2762 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2766 * Check that card is still alive.
2767 * Reads debug register from domain0.
2768 * If card is present, pre-defined value should
2769 * be found there.
2771 * @param priv
2772 * @return 1 if card is present, 0 otherwise
2774 static inline int ipw_alive(struct ipw_priv *priv)
2776 return ipw_read32(priv, 0x90) == 0xd55555d5;
2779 /* timeout in msec, attempted in 10-msec quanta */
2780 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2781 int timeout)
2783 int i = 0;
2785 do {
2786 if ((ipw_read32(priv, addr) & mask) == mask)
2787 return i;
2788 mdelay(10);
2789 i += 10;
2790 } while (i < timeout);
2792 return -ETIME;
2795 /* These functions load the firmware and micro code for the operation of
2796 * the ipw hardware. It assumes the buffer has all the bits for the
2797 * image and the caller is handling the memory allocation and clean up.
2800 static int ipw_stop_master(struct ipw_priv *priv)
2802 int rc;
2804 IPW_DEBUG_TRACE(">> \n");
2805 /* stop master. typical delay - 0 */
2806 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2808 /* timeout is in msec, polled in 10-msec quanta */
2809 rc = ipw_poll_bit(priv, IPW_RESET_REG,
2810 IPW_RESET_REG_MASTER_DISABLED, 100);
2811 if (rc < 0) {
2812 IPW_ERROR("wait for stop master failed after 100ms\n");
2813 return -1;
2816 IPW_DEBUG_INFO("stop master %dms\n", rc);
2818 return rc;
2821 static void ipw_arc_release(struct ipw_priv *priv)
2823 IPW_DEBUG_TRACE(">> \n");
2824 mdelay(5);
2826 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2828 /* no one knows timing, for safety add some delay */
2829 mdelay(5);
2832 struct fw_chunk {
2833 u32 address;
2834 u32 length;
2837 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2839 int rc = 0, i, addr;
2840 u8 cr = 0;
2841 u16 *image;
2843 image = (u16 *) data;
2845 IPW_DEBUG_TRACE(">> \n");
2847 rc = ipw_stop_master(priv);
2849 if (rc < 0)
2850 return rc;
2852 // spin_lock_irqsave(&priv->lock, flags);
2854 for (addr = IPW_SHARED_LOWER_BOUND;
2855 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
2856 ipw_write32(priv, addr, 0);
2859 /* no ucode (yet) */
2860 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
2861 /* destroy DMA queues */
2862 /* reset sequence */
2864 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
2865 ipw_arc_release(priv);
2866 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
2867 mdelay(1);
2869 /* reset PHY */
2870 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
2871 mdelay(1);
2873 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
2874 mdelay(1);
2876 /* enable ucode store */
2877 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
2878 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
2879 mdelay(1);
2881 /* write ucode */
2883 * @bug
2884 * Do NOT set indirect address register once and then
2885 * store data to indirect data register in the loop.
2886 * It seems very reasonable, but in this case DINO do not
2887 * accept ucode. It is essential to set address each time.
2889 /* load new ipw uCode */
2890 for (i = 0; i < len / 2; i++)
2891 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
2892 cpu_to_le16(image[i]));
2894 /* enable DINO */
2895 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
2896 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
2898 /* this is where the igx / win driver deveates from the VAP driver. */
2900 /* wait for alive response */
2901 for (i = 0; i < 100; i++) {
2902 /* poll for incoming data */
2903 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
2904 if (cr & DINO_RXFIFO_DATA)
2905 break;
2906 mdelay(1);
2909 if (cr & DINO_RXFIFO_DATA) {
2910 /* alive_command_responce size is NOT multiple of 4 */
2911 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
2913 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
2914 response_buffer[i] =
2915 le32_to_cpu(ipw_read_reg32(priv,
2916 IPW_BASEBAND_RX_FIFO_READ));
2917 memcpy(&priv->dino_alive, response_buffer,
2918 sizeof(priv->dino_alive));
2919 if (priv->dino_alive.alive_command == 1
2920 && priv->dino_alive.ucode_valid == 1) {
2921 rc = 0;
2922 IPW_DEBUG_INFO
2923 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
2924 "of %02d/%02d/%02d %02d:%02d\n",
2925 priv->dino_alive.software_revision,
2926 priv->dino_alive.software_revision,
2927 priv->dino_alive.device_identifier,
2928 priv->dino_alive.device_identifier,
2929 priv->dino_alive.time_stamp[0],
2930 priv->dino_alive.time_stamp[1],
2931 priv->dino_alive.time_stamp[2],
2932 priv->dino_alive.time_stamp[3],
2933 priv->dino_alive.time_stamp[4]);
2934 } else {
2935 IPW_DEBUG_INFO("Microcode is not alive\n");
2936 rc = -EINVAL;
2938 } else {
2939 IPW_DEBUG_INFO("No alive response from DINO\n");
2940 rc = -ETIME;
2943 /* disable DINO, otherwise for some reason
2944 firmware have problem getting alive resp. */
2945 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
2947 // spin_unlock_irqrestore(&priv->lock, flags);
2949 return rc;
2952 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
2954 int rc = -1;
2955 int offset = 0;
2956 struct fw_chunk *chunk;
2957 dma_addr_t shared_phys;
2958 u8 *shared_virt;
2960 IPW_DEBUG_TRACE("<< : \n");
2961 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
2963 if (!shared_virt)
2964 return -ENOMEM;
2966 memmove(shared_virt, data, len);
2968 /* Start the Dma */
2969 rc = ipw_fw_dma_enable(priv);
2971 if (priv->sram_desc.last_cb_index > 0) {
2972 /* the DMA is already ready this would be a bug. */
2973 BUG();
2974 goto out;
2977 do {
2978 chunk = (struct fw_chunk *)(data + offset);
2979 offset += sizeof(struct fw_chunk);
2980 /* build DMA packet and queue up for sending */
2981 /* dma to chunk->address, the chunk->length bytes from data +
2982 * offeset*/
2983 /* Dma loading */
2984 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
2985 le32_to_cpu(chunk->address),
2986 le32_to_cpu(chunk->length));
2987 if (rc) {
2988 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
2989 goto out;
2992 offset += le32_to_cpu(chunk->length);
2993 } while (offset < len);
2995 /* Run the DMA and wait for the answer */
2996 rc = ipw_fw_dma_kick(priv);
2997 if (rc) {
2998 IPW_ERROR("dmaKick Failed\n");
2999 goto out;
3002 rc = ipw_fw_dma_wait(priv);
3003 if (rc) {
3004 IPW_ERROR("dmaWaitSync Failed\n");
3005 goto out;
3007 out:
3008 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3009 return rc;
3012 /* stop nic */
3013 static int ipw_stop_nic(struct ipw_priv *priv)
3015 int rc = 0;
3017 /* stop */
3018 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3020 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3021 IPW_RESET_REG_MASTER_DISABLED, 500);
3022 if (rc < 0) {
3023 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3024 return rc;
3027 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3029 return rc;
3032 static void ipw_start_nic(struct ipw_priv *priv)
3034 IPW_DEBUG_TRACE(">>\n");
3036 /* prvHwStartNic release ARC */
3037 ipw_clear_bit(priv, IPW_RESET_REG,
3038 IPW_RESET_REG_MASTER_DISABLED |
3039 IPW_RESET_REG_STOP_MASTER |
3040 CBD_RESET_REG_PRINCETON_RESET);
3042 /* enable power management */
3043 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3044 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3046 IPW_DEBUG_TRACE("<<\n");
3049 static int ipw_init_nic(struct ipw_priv *priv)
3051 int rc;
3053 IPW_DEBUG_TRACE(">>\n");
3054 /* reset */
3055 /*prvHwInitNic */
3056 /* set "initialization complete" bit to move adapter to D0 state */
3057 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3059 /* low-level PLL activation */
3060 ipw_write32(priv, IPW_READ_INT_REGISTER,
3061 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3063 /* wait for clock stabilization */
3064 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3065 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3066 if (rc < 0)
3067 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3069 /* assert SW reset */
3070 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3072 udelay(10);
3074 /* set "initialization complete" bit to move adapter to D0 state */
3075 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3077 IPW_DEBUG_TRACE(">>\n");
3078 return 0;
3081 /* Call this function from process context, it will sleep in request_firmware.
3082 * Probe is an ok place to call this from.
3084 static int ipw_reset_nic(struct ipw_priv *priv)
3086 int rc = 0;
3087 unsigned long flags;
3089 IPW_DEBUG_TRACE(">>\n");
3091 rc = ipw_init_nic(priv);
3093 spin_lock_irqsave(&priv->lock, flags);
3094 /* Clear the 'host command active' bit... */
3095 priv->status &= ~STATUS_HCMD_ACTIVE;
3096 wake_up_interruptible(&priv->wait_command_queue);
3097 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3098 wake_up_interruptible(&priv->wait_state);
3099 spin_unlock_irqrestore(&priv->lock, flags);
3101 IPW_DEBUG_TRACE("<<\n");
3102 return rc;
3106 struct ipw_fw {
3107 u32 ver;
3108 u32 boot_size;
3109 u32 ucode_size;
3110 u32 fw_size;
3111 u8 data[0];
3114 static int ipw_get_fw(struct ipw_priv *priv,
3115 const struct firmware **raw, const char *name)
3117 struct ipw_fw *fw;
3118 int rc;
3120 /* ask firmware_class module to get the boot firmware off disk */
3121 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3122 if (rc < 0) {
3123 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3124 return rc;
3127 if ((*raw)->size < sizeof(*fw)) {
3128 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3129 return -EINVAL;
3132 fw = (void *)(*raw)->data;
3134 if ((*raw)->size < sizeof(*fw) +
3135 fw->boot_size + fw->ucode_size + fw->fw_size) {
3136 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3137 name, (*raw)->size);
3138 return -EINVAL;
3141 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3142 name,
3143 le32_to_cpu(fw->ver) >> 16,
3144 le32_to_cpu(fw->ver) & 0xff,
3145 (*raw)->size - sizeof(*fw));
3146 return 0;
3149 #define IPW_RX_BUF_SIZE (3000)
3151 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3152 struct ipw_rx_queue *rxq)
3154 unsigned long flags;
3155 int i;
3157 spin_lock_irqsave(&rxq->lock, flags);
3159 INIT_LIST_HEAD(&rxq->rx_free);
3160 INIT_LIST_HEAD(&rxq->rx_used);
3162 /* Fill the rx_used queue with _all_ of the Rx buffers */
3163 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3164 /* In the reset function, these buffers may have been allocated
3165 * to an SKB, so we need to unmap and free potential storage */
3166 if (rxq->pool[i].skb != NULL) {
3167 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3168 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3169 dev_kfree_skb(rxq->pool[i].skb);
3170 rxq->pool[i].skb = NULL;
3172 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3175 /* Set us so that we have processed and used all buffers, but have
3176 * not restocked the Rx queue with fresh buffers */
3177 rxq->read = rxq->write = 0;
3178 rxq->processed = RX_QUEUE_SIZE - 1;
3179 rxq->free_count = 0;
3180 spin_unlock_irqrestore(&rxq->lock, flags);
3183 #ifdef CONFIG_PM
3184 static int fw_loaded = 0;
3185 static const struct firmware *raw = NULL;
3187 static void free_firmware(void)
3189 if (fw_loaded) {
3190 release_firmware(raw);
3191 raw = NULL;
3192 fw_loaded = 0;
3195 #else
3196 #define free_firmware() do {} while (0)
3197 #endif
3199 static int ipw_load(struct ipw_priv *priv)
3201 #ifndef CONFIG_PM
3202 const struct firmware *raw = NULL;
3203 #endif
3204 struct ipw_fw *fw;
3205 u8 *boot_img, *ucode_img, *fw_img;
3206 u8 *name = NULL;
3207 int rc = 0, retries = 3;
3209 switch (priv->ieee->iw_mode) {
3210 case IW_MODE_ADHOC:
3211 name = "ipw2200-ibss.fw";
3212 break;
3213 #ifdef CONFIG_IPW2200_MONITOR
3214 case IW_MODE_MONITOR:
3215 name = "ipw2200-sniffer.fw";
3216 break;
3217 #endif
3218 case IW_MODE_INFRA:
3219 name = "ipw2200-bss.fw";
3220 break;
3223 if (!name) {
3224 rc = -EINVAL;
3225 goto error;
3228 #ifdef CONFIG_PM
3229 if (!fw_loaded) {
3230 #endif
3231 rc = ipw_get_fw(priv, &raw, name);
3232 if (rc < 0)
3233 goto error;
3234 #ifdef CONFIG_PM
3236 #endif
3238 fw = (void *)raw->data;
3239 boot_img = &fw->data[0];
3240 ucode_img = &fw->data[fw->boot_size];
3241 fw_img = &fw->data[fw->boot_size + fw->ucode_size];
3243 if (rc < 0)
3244 goto error;
3246 if (!priv->rxq)
3247 priv->rxq = ipw_rx_queue_alloc(priv);
3248 else
3249 ipw_rx_queue_reset(priv, priv->rxq);
3250 if (!priv->rxq) {
3251 IPW_ERROR("Unable to initialize Rx queue\n");
3252 goto error;
3255 retry:
3256 /* Ensure interrupts are disabled */
3257 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3258 priv->status &= ~STATUS_INT_ENABLED;
3260 /* ack pending interrupts */
3261 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3263 ipw_stop_nic(priv);
3265 rc = ipw_reset_nic(priv);
3266 if (rc < 0) {
3267 IPW_ERROR("Unable to reset NIC\n");
3268 goto error;
3271 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3272 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3274 /* DMA the initial boot firmware into the device */
3275 rc = ipw_load_firmware(priv, boot_img, fw->boot_size);
3276 if (rc < 0) {
3277 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3278 goto error;
3281 /* kick start the device */
3282 ipw_start_nic(priv);
3284 /* wait for the device to finish its initial startup sequence */
3285 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3286 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3287 if (rc < 0) {
3288 IPW_ERROR("device failed to boot initial fw image\n");
3289 goto error;
3291 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3293 /* ack fw init done interrupt */
3294 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3296 /* DMA the ucode into the device */
3297 rc = ipw_load_ucode(priv, ucode_img, fw->ucode_size);
3298 if (rc < 0) {
3299 IPW_ERROR("Unable to load ucode: %d\n", rc);
3300 goto error;
3303 /* stop nic */
3304 ipw_stop_nic(priv);
3306 /* DMA bss firmware into the device */
3307 rc = ipw_load_firmware(priv, fw_img, fw->fw_size);
3308 if (rc < 0) {
3309 IPW_ERROR("Unable to load firmware: %d\n", rc);
3310 goto error;
3312 #ifdef CONFIG_PM
3313 fw_loaded = 1;
3314 #endif
3316 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3318 rc = ipw_queue_reset(priv);
3319 if (rc < 0) {
3320 IPW_ERROR("Unable to initialize queues\n");
3321 goto error;
3324 /* Ensure interrupts are disabled */
3325 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3326 /* ack pending interrupts */
3327 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3329 /* kick start the device */
3330 ipw_start_nic(priv);
3332 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3333 if (retries > 0) {
3334 IPW_WARNING("Parity error. Retrying init.\n");
3335 retries--;
3336 goto retry;
3339 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3340 rc = -EIO;
3341 goto error;
3344 /* wait for the device */
3345 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3346 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3347 if (rc < 0) {
3348 IPW_ERROR("device failed to start within 500ms\n");
3349 goto error;
3351 IPW_DEBUG_INFO("device response after %dms\n", rc);
3353 /* ack fw init done interrupt */
3354 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3356 /* read eeprom data and initialize the eeprom region of sram */
3357 priv->eeprom_delay = 1;
3358 ipw_eeprom_init_sram(priv);
3360 /* enable interrupts */
3361 ipw_enable_interrupts(priv);
3363 /* Ensure our queue has valid packets */
3364 ipw_rx_queue_replenish(priv);
3366 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3368 /* ack pending interrupts */
3369 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3371 #ifndef CONFIG_PM
3372 release_firmware(raw);
3373 #endif
3374 return 0;
3376 error:
3377 if (priv->rxq) {
3378 ipw_rx_queue_free(priv, priv->rxq);
3379 priv->rxq = NULL;
3381 ipw_tx_queue_free(priv);
3382 if (raw)
3383 release_firmware(raw);
3384 #ifdef CONFIG_PM
3385 fw_loaded = 0;
3386 raw = NULL;
3387 #endif
3389 return rc;
3393 * DMA services
3395 * Theory of operation
3397 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3398 * 2 empty entries always kept in the buffer to protect from overflow.
3400 * For Tx queue, there are low mark and high mark limits. If, after queuing
3401 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3402 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3403 * Tx queue resumed.
3405 * The IPW operates with six queues, one receive queue in the device's
3406 * sram, one transmit queue for sending commands to the device firmware,
3407 * and four transmit queues for data.
3409 * The four transmit queues allow for performing quality of service (qos)
3410 * transmissions as per the 802.11 protocol. Currently Linux does not
3411 * provide a mechanism to the user for utilizing prioritized queues, so
3412 * we only utilize the first data transmit queue (queue1).
3416 * Driver allocates buffers of this size for Rx
3419 static inline int ipw_queue_space(const struct clx2_queue *q)
3421 int s = q->last_used - q->first_empty;
3422 if (s <= 0)
3423 s += q->n_bd;
3424 s -= 2; /* keep some reserve to not confuse empty and full situations */
3425 if (s < 0)
3426 s = 0;
3427 return s;
3430 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3432 return (++index == n_bd) ? 0 : index;
3436 * Initialize common DMA queue structure
3438 * @param q queue to init
3439 * @param count Number of BD's to allocate. Should be power of 2
3440 * @param read_register Address for 'read' register
3441 * (not offset within BAR, full address)
3442 * @param write_register Address for 'write' register
3443 * (not offset within BAR, full address)
3444 * @param base_register Address for 'base' register
3445 * (not offset within BAR, full address)
3446 * @param size Address for 'size' register
3447 * (not offset within BAR, full address)
3449 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3450 int count, u32 read, u32 write, u32 base, u32 size)
3452 q->n_bd = count;
3454 q->low_mark = q->n_bd / 4;
3455 if (q->low_mark < 4)
3456 q->low_mark = 4;
3458 q->high_mark = q->n_bd / 8;
3459 if (q->high_mark < 2)
3460 q->high_mark = 2;
3462 q->first_empty = q->last_used = 0;
3463 q->reg_r = read;
3464 q->reg_w = write;
3466 ipw_write32(priv, base, q->dma_addr);
3467 ipw_write32(priv, size, count);
3468 ipw_write32(priv, read, 0);
3469 ipw_write32(priv, write, 0);
3471 _ipw_read32(priv, 0x90);
3474 static int ipw_queue_tx_init(struct ipw_priv *priv,
3475 struct clx2_tx_queue *q,
3476 int count, u32 read, u32 write, u32 base, u32 size)
3478 struct pci_dev *dev = priv->pci_dev;
3480 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3481 if (!q->txb) {
3482 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3483 return -ENOMEM;
3486 q->bd =
3487 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3488 if (!q->bd) {
3489 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3490 sizeof(q->bd[0]) * count);
3491 kfree(q->txb);
3492 q->txb = NULL;
3493 return -ENOMEM;
3496 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3497 return 0;
3501 * Free one TFD, those at index [txq->q.last_used].
3502 * Do NOT advance any indexes
3504 * @param dev
3505 * @param txq
3507 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3508 struct clx2_tx_queue *txq)
3510 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3511 struct pci_dev *dev = priv->pci_dev;
3512 int i;
3514 /* classify bd */
3515 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3516 /* nothing to cleanup after for host commands */
3517 return;
3519 /* sanity check */
3520 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3521 IPW_ERROR("Too many chunks: %i\n",
3522 le32_to_cpu(bd->u.data.num_chunks));
3523 /** @todo issue fatal error, it is quite serious situation */
3524 return;
3527 /* unmap chunks if any */
3528 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3529 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3530 le16_to_cpu(bd->u.data.chunk_len[i]),
3531 PCI_DMA_TODEVICE);
3532 if (txq->txb[txq->q.last_used]) {
3533 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3534 txq->txb[txq->q.last_used] = NULL;
3540 * Deallocate DMA queue.
3542 * Empty queue by removing and destroying all BD's.
3543 * Free all buffers.
3545 * @param dev
3546 * @param q
3548 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3550 struct clx2_queue *q = &txq->q;
3551 struct pci_dev *dev = priv->pci_dev;
3553 if (q->n_bd == 0)
3554 return;
3556 /* first, empty all BD's */
3557 for (; q->first_empty != q->last_used;
3558 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3559 ipw_queue_tx_free_tfd(priv, txq);
3562 /* free buffers belonging to queue itself */
3563 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3564 q->dma_addr);
3565 kfree(txq->txb);
3567 /* 0 fill whole structure */
3568 memset(txq, 0, sizeof(*txq));
3572 * Destroy all DMA queues and structures
3574 * @param priv
3576 static void ipw_tx_queue_free(struct ipw_priv *priv)
3578 /* Tx CMD queue */
3579 ipw_queue_tx_free(priv, &priv->txq_cmd);
3581 /* Tx queues */
3582 ipw_queue_tx_free(priv, &priv->txq[0]);
3583 ipw_queue_tx_free(priv, &priv->txq[1]);
3584 ipw_queue_tx_free(priv, &priv->txq[2]);
3585 ipw_queue_tx_free(priv, &priv->txq[3]);
3588 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3590 /* First 3 bytes are manufacturer */
3591 bssid[0] = priv->mac_addr[0];
3592 bssid[1] = priv->mac_addr[1];
3593 bssid[2] = priv->mac_addr[2];
3595 /* Last bytes are random */
3596 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3598 bssid[0] &= 0xfe; /* clear multicast bit */
3599 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3602 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3604 struct ipw_station_entry entry;
3605 int i;
3607 for (i = 0; i < priv->num_stations; i++) {
3608 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3609 /* Another node is active in network */
3610 priv->missed_adhoc_beacons = 0;
3611 if (!(priv->config & CFG_STATIC_CHANNEL))
3612 /* when other nodes drop out, we drop out */
3613 priv->config &= ~CFG_ADHOC_PERSIST;
3615 return i;
3619 if (i == MAX_STATIONS)
3620 return IPW_INVALID_STATION;
3622 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3624 entry.reserved = 0;
3625 entry.support_mode = 0;
3626 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3627 memcpy(priv->stations[i], bssid, ETH_ALEN);
3628 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3629 &entry, sizeof(entry));
3630 priv->num_stations++;
3632 return i;
3635 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3637 int i;
3639 for (i = 0; i < priv->num_stations; i++)
3640 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3641 return i;
3643 return IPW_INVALID_STATION;
3646 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3648 int err;
3650 if (priv->status & STATUS_ASSOCIATING) {
3651 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3652 queue_work(priv->workqueue, &priv->disassociate);
3653 return;
3656 if (!(priv->status & STATUS_ASSOCIATED)) {
3657 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3658 return;
3661 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3662 "on channel %d.\n",
3663 MAC_ARG(priv->assoc_request.bssid),
3664 priv->assoc_request.channel);
3666 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3667 priv->status |= STATUS_DISASSOCIATING;
3669 if (quiet)
3670 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3671 else
3672 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3674 err = ipw_send_associate(priv, &priv->assoc_request);
3675 if (err) {
3676 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3677 "failed.\n");
3678 return;
3683 static int ipw_disassociate(void *data)
3685 struct ipw_priv *priv = data;
3686 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3687 return 0;
3688 ipw_send_disassociate(data, 0);
3689 return 1;
3692 static void ipw_bg_disassociate(void *data)
3694 struct ipw_priv *priv = data;
3695 mutex_lock(&priv->mutex);
3696 ipw_disassociate(data);
3697 mutex_unlock(&priv->mutex);
3700 static void ipw_system_config(void *data)
3702 struct ipw_priv *priv = data;
3703 ipw_send_system_config(priv, &priv->sys_config);
3706 struct ipw_status_code {
3707 u16 status;
3708 const char *reason;
3711 static const struct ipw_status_code ipw_status_codes[] = {
3712 {0x00, "Successful"},
3713 {0x01, "Unspecified failure"},
3714 {0x0A, "Cannot support all requested capabilities in the "
3715 "Capability information field"},
3716 {0x0B, "Reassociation denied due to inability to confirm that "
3717 "association exists"},
3718 {0x0C, "Association denied due to reason outside the scope of this "
3719 "standard"},
3720 {0x0D,
3721 "Responding station does not support the specified authentication "
3722 "algorithm"},
3723 {0x0E,
3724 "Received an Authentication frame with authentication sequence "
3725 "transaction sequence number out of expected sequence"},
3726 {0x0F, "Authentication rejected because of challenge failure"},
3727 {0x10, "Authentication rejected due to timeout waiting for next "
3728 "frame in sequence"},
3729 {0x11, "Association denied because AP is unable to handle additional "
3730 "associated stations"},
3731 {0x12,
3732 "Association denied due to requesting station not supporting all "
3733 "of the datarates in the BSSBasicServiceSet Parameter"},
3734 {0x13,
3735 "Association denied due to requesting station not supporting "
3736 "short preamble operation"},
3737 {0x14,
3738 "Association denied due to requesting station not supporting "
3739 "PBCC encoding"},
3740 {0x15,
3741 "Association denied due to requesting station not supporting "
3742 "channel agility"},
3743 {0x19,
3744 "Association denied due to requesting station not supporting "
3745 "short slot operation"},
3746 {0x1A,
3747 "Association denied due to requesting station not supporting "
3748 "DSSS-OFDM operation"},
3749 {0x28, "Invalid Information Element"},
3750 {0x29, "Group Cipher is not valid"},
3751 {0x2A, "Pairwise Cipher is not valid"},
3752 {0x2B, "AKMP is not valid"},
3753 {0x2C, "Unsupported RSN IE version"},
3754 {0x2D, "Invalid RSN IE Capabilities"},
3755 {0x2E, "Cipher suite is rejected per security policy"},
3758 #ifdef CONFIG_IPW2200_DEBUG
3759 static const char *ipw_get_status_code(u16 status)
3761 int i;
3762 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3763 if (ipw_status_codes[i].status == (status & 0xff))
3764 return ipw_status_codes[i].reason;
3765 return "Unknown status value.";
3767 #endif
3769 static void inline average_init(struct average *avg)
3771 memset(avg, 0, sizeof(*avg));
3774 static void average_add(struct average *avg, s16 val)
3776 avg->sum -= avg->entries[avg->pos];
3777 avg->sum += val;
3778 avg->entries[avg->pos++] = val;
3779 if (unlikely(avg->pos == AVG_ENTRIES)) {
3780 avg->init = 1;
3781 avg->pos = 0;
3785 static s16 average_value(struct average *avg)
3787 if (!unlikely(avg->init)) {
3788 if (avg->pos)
3789 return avg->sum / avg->pos;
3790 return 0;
3793 return avg->sum / AVG_ENTRIES;
3796 static void ipw_reset_stats(struct ipw_priv *priv)
3798 u32 len = sizeof(u32);
3800 priv->quality = 0;
3802 average_init(&priv->average_missed_beacons);
3803 average_init(&priv->average_rssi);
3804 average_init(&priv->average_noise);
3806 priv->last_rate = 0;
3807 priv->last_missed_beacons = 0;
3808 priv->last_rx_packets = 0;
3809 priv->last_tx_packets = 0;
3810 priv->last_tx_failures = 0;
3812 /* Firmware managed, reset only when NIC is restarted, so we have to
3813 * normalize on the current value */
3814 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3815 &priv->last_rx_err, &len);
3816 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3817 &priv->last_tx_failures, &len);
3819 /* Driver managed, reset with each association */
3820 priv->missed_adhoc_beacons = 0;
3821 priv->missed_beacons = 0;
3822 priv->tx_packets = 0;
3823 priv->rx_packets = 0;
3827 static u32 ipw_get_max_rate(struct ipw_priv *priv)
3829 u32 i = 0x80000000;
3830 u32 mask = priv->rates_mask;
3831 /* If currently associated in B mode, restrict the maximum
3832 * rate match to B rates */
3833 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3834 mask &= IEEE80211_CCK_RATES_MASK;
3836 /* TODO: Verify that the rate is supported by the current rates
3837 * list. */
3839 while (i && !(mask & i))
3840 i >>= 1;
3841 switch (i) {
3842 case IEEE80211_CCK_RATE_1MB_MASK:
3843 return 1000000;
3844 case IEEE80211_CCK_RATE_2MB_MASK:
3845 return 2000000;
3846 case IEEE80211_CCK_RATE_5MB_MASK:
3847 return 5500000;
3848 case IEEE80211_OFDM_RATE_6MB_MASK:
3849 return 6000000;
3850 case IEEE80211_OFDM_RATE_9MB_MASK:
3851 return 9000000;
3852 case IEEE80211_CCK_RATE_11MB_MASK:
3853 return 11000000;
3854 case IEEE80211_OFDM_RATE_12MB_MASK:
3855 return 12000000;
3856 case IEEE80211_OFDM_RATE_18MB_MASK:
3857 return 18000000;
3858 case IEEE80211_OFDM_RATE_24MB_MASK:
3859 return 24000000;
3860 case IEEE80211_OFDM_RATE_36MB_MASK:
3861 return 36000000;
3862 case IEEE80211_OFDM_RATE_48MB_MASK:
3863 return 48000000;
3864 case IEEE80211_OFDM_RATE_54MB_MASK:
3865 return 54000000;
3868 if (priv->ieee->mode == IEEE_B)
3869 return 11000000;
3870 else
3871 return 54000000;
3874 static u32 ipw_get_current_rate(struct ipw_priv *priv)
3876 u32 rate, len = sizeof(rate);
3877 int err;
3879 if (!(priv->status & STATUS_ASSOCIATED))
3880 return 0;
3882 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
3883 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
3884 &len);
3885 if (err) {
3886 IPW_DEBUG_INFO("failed querying ordinals.\n");
3887 return 0;
3889 } else
3890 return ipw_get_max_rate(priv);
3892 switch (rate) {
3893 case IPW_TX_RATE_1MB:
3894 return 1000000;
3895 case IPW_TX_RATE_2MB:
3896 return 2000000;
3897 case IPW_TX_RATE_5MB:
3898 return 5500000;
3899 case IPW_TX_RATE_6MB:
3900 return 6000000;
3901 case IPW_TX_RATE_9MB:
3902 return 9000000;
3903 case IPW_TX_RATE_11MB:
3904 return 11000000;
3905 case IPW_TX_RATE_12MB:
3906 return 12000000;
3907 case IPW_TX_RATE_18MB:
3908 return 18000000;
3909 case IPW_TX_RATE_24MB:
3910 return 24000000;
3911 case IPW_TX_RATE_36MB:
3912 return 36000000;
3913 case IPW_TX_RATE_48MB:
3914 return 48000000;
3915 case IPW_TX_RATE_54MB:
3916 return 54000000;
3919 return 0;
3922 #define IPW_STATS_INTERVAL (2 * HZ)
3923 static void ipw_gather_stats(struct ipw_priv *priv)
3925 u32 rx_err, rx_err_delta, rx_packets_delta;
3926 u32 tx_failures, tx_failures_delta, tx_packets_delta;
3927 u32 missed_beacons_percent, missed_beacons_delta;
3928 u32 quality = 0;
3929 u32 len = sizeof(u32);
3930 s16 rssi;
3931 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
3932 rate_quality;
3933 u32 max_rate;
3935 if (!(priv->status & STATUS_ASSOCIATED)) {
3936 priv->quality = 0;
3937 return;
3940 /* Update the statistics */
3941 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
3942 &priv->missed_beacons, &len);
3943 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
3944 priv->last_missed_beacons = priv->missed_beacons;
3945 if (priv->assoc_request.beacon_interval) {
3946 missed_beacons_percent = missed_beacons_delta *
3947 (HZ * priv->assoc_request.beacon_interval) /
3948 (IPW_STATS_INTERVAL * 10);
3949 } else {
3950 missed_beacons_percent = 0;
3952 average_add(&priv->average_missed_beacons, missed_beacons_percent);
3954 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
3955 rx_err_delta = rx_err - priv->last_rx_err;
3956 priv->last_rx_err = rx_err;
3958 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
3959 tx_failures_delta = tx_failures - priv->last_tx_failures;
3960 priv->last_tx_failures = tx_failures;
3962 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
3963 priv->last_rx_packets = priv->rx_packets;
3965 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
3966 priv->last_tx_packets = priv->tx_packets;
3968 /* Calculate quality based on the following:
3970 * Missed beacon: 100% = 0, 0% = 70% missed
3971 * Rate: 60% = 1Mbs, 100% = Max
3972 * Rx and Tx errors represent a straight % of total Rx/Tx
3973 * RSSI: 100% = > -50, 0% = < -80
3974 * Rx errors: 100% = 0, 0% = 50% missed
3976 * The lowest computed quality is used.
3979 #define BEACON_THRESHOLD 5
3980 beacon_quality = 100 - missed_beacons_percent;
3981 if (beacon_quality < BEACON_THRESHOLD)
3982 beacon_quality = 0;
3983 else
3984 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
3985 (100 - BEACON_THRESHOLD);
3986 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
3987 beacon_quality, missed_beacons_percent);
3989 priv->last_rate = ipw_get_current_rate(priv);
3990 max_rate = ipw_get_max_rate(priv);
3991 rate_quality = priv->last_rate * 40 / max_rate + 60;
3992 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
3993 rate_quality, priv->last_rate / 1000000);
3995 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
3996 rx_quality = 100 - (rx_err_delta * 100) /
3997 (rx_packets_delta + rx_err_delta);
3998 else
3999 rx_quality = 100;
4000 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4001 rx_quality, rx_err_delta, rx_packets_delta);
4003 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4004 tx_quality = 100 - (tx_failures_delta * 100) /
4005 (tx_packets_delta + tx_failures_delta);
4006 else
4007 tx_quality = 100;
4008 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4009 tx_quality, tx_failures_delta, tx_packets_delta);
4011 rssi = average_value(&priv->average_rssi);
4012 signal_quality =
4013 (100 *
4014 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4015 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4016 (priv->ieee->perfect_rssi - rssi) *
4017 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4018 62 * (priv->ieee->perfect_rssi - rssi))) /
4019 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4020 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4021 if (signal_quality > 100)
4022 signal_quality = 100;
4023 else if (signal_quality < 1)
4024 signal_quality = 0;
4026 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4027 signal_quality, rssi);
4029 quality = min(beacon_quality,
4030 min(rate_quality,
4031 min(tx_quality, min(rx_quality, signal_quality))));
4032 if (quality == beacon_quality)
4033 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4034 quality);
4035 if (quality == rate_quality)
4036 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4037 quality);
4038 if (quality == tx_quality)
4039 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4040 quality);
4041 if (quality == rx_quality)
4042 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4043 quality);
4044 if (quality == signal_quality)
4045 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4046 quality);
4048 priv->quality = quality;
4050 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4051 IPW_STATS_INTERVAL);
4054 static void ipw_bg_gather_stats(void *data)
4056 struct ipw_priv *priv = data;
4057 mutex_lock(&priv->mutex);
4058 ipw_gather_stats(data);
4059 mutex_unlock(&priv->mutex);
4062 /* Missed beacon behavior:
4063 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4064 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4065 * Above disassociate threshold, give up and stop scanning.
4066 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4067 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4068 int missed_count)
4070 priv->notif_missed_beacons = missed_count;
4072 if (missed_count > priv->disassociate_threshold &&
4073 priv->status & STATUS_ASSOCIATED) {
4074 /* If associated and we've hit the missed
4075 * beacon threshold, disassociate, turn
4076 * off roaming, and abort any active scans */
4077 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4078 IPW_DL_STATE | IPW_DL_ASSOC,
4079 "Missed beacon: %d - disassociate\n", missed_count);
4080 priv->status &= ~STATUS_ROAMING;
4081 if (priv->status & STATUS_SCANNING) {
4082 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4083 IPW_DL_STATE,
4084 "Aborting scan with missed beacon.\n");
4085 queue_work(priv->workqueue, &priv->abort_scan);
4088 queue_work(priv->workqueue, &priv->disassociate);
4089 return;
4092 if (priv->status & STATUS_ROAMING) {
4093 /* If we are currently roaming, then just
4094 * print a debug statement... */
4095 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4096 "Missed beacon: %d - roam in progress\n",
4097 missed_count);
4098 return;
4101 if (roaming &&
4102 (missed_count > priv->roaming_threshold &&
4103 missed_count <= priv->disassociate_threshold)) {
4104 /* If we are not already roaming, set the ROAM
4105 * bit in the status and kick off a scan.
4106 * This can happen several times before we reach
4107 * disassociate_threshold. */
4108 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4109 "Missed beacon: %d - initiate "
4110 "roaming\n", missed_count);
4111 if (!(priv->status & STATUS_ROAMING)) {
4112 priv->status |= STATUS_ROAMING;
4113 if (!(priv->status & STATUS_SCANNING))
4114 queue_work(priv->workqueue,
4115 &priv->request_scan);
4117 return;
4120 if (priv->status & STATUS_SCANNING) {
4121 /* Stop scan to keep fw from getting
4122 * stuck (only if we aren't roaming --
4123 * otherwise we'll never scan more than 2 or 3
4124 * channels..) */
4125 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4126 "Aborting scan with missed beacon.\n");
4127 queue_work(priv->workqueue, &priv->abort_scan);
4130 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4134 * Handle host notification packet.
4135 * Called from interrupt routine
4137 static void ipw_rx_notification(struct ipw_priv *priv,
4138 struct ipw_rx_notification *notif)
4140 notif->size = le16_to_cpu(notif->size);
4142 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4144 switch (notif->subtype) {
4145 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4146 struct notif_association *assoc = &notif->u.assoc;
4148 switch (assoc->state) {
4149 case CMAS_ASSOCIATED:{
4150 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4151 IPW_DL_ASSOC,
4152 "associated: '%s' " MAC_FMT
4153 " \n",
4154 escape_essid(priv->essid,
4155 priv->essid_len),
4156 MAC_ARG(priv->bssid));
4158 switch (priv->ieee->iw_mode) {
4159 case IW_MODE_INFRA:
4160 memcpy(priv->ieee->bssid,
4161 priv->bssid, ETH_ALEN);
4162 break;
4164 case IW_MODE_ADHOC:
4165 memcpy(priv->ieee->bssid,
4166 priv->bssid, ETH_ALEN);
4168 /* clear out the station table */
4169 priv->num_stations = 0;
4171 IPW_DEBUG_ASSOC
4172 ("queueing adhoc check\n");
4173 queue_delayed_work(priv->
4174 workqueue,
4175 &priv->
4176 adhoc_check,
4177 priv->
4178 assoc_request.
4179 beacon_interval);
4180 break;
4183 priv->status &= ~STATUS_ASSOCIATING;
4184 priv->status |= STATUS_ASSOCIATED;
4185 queue_work(priv->workqueue,
4186 &priv->system_config);
4188 #ifdef CONFIG_IPW_QOS
4189 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4190 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4191 if ((priv->status & STATUS_AUTH) &&
4192 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4193 == IEEE80211_STYPE_ASSOC_RESP)) {
4194 if ((sizeof
4195 (struct
4196 ieee80211_assoc_response)
4197 <= notif->size)
4198 && (notif->size <= 2314)) {
4199 struct
4200 ieee80211_rx_stats
4201 stats = {
4202 .len =
4203 notif->
4204 size - 1,
4207 IPW_DEBUG_QOS
4208 ("QoS Associate "
4209 "size %d\n",
4210 notif->size);
4211 ieee80211_rx_mgt(priv->
4212 ieee,
4213 (struct
4214 ieee80211_hdr_4addr
4216 &notif->u.raw, &stats);
4219 #endif
4221 schedule_work(&priv->link_up);
4223 break;
4226 case CMAS_AUTHENTICATED:{
4227 if (priv->
4228 status & (STATUS_ASSOCIATED |
4229 STATUS_AUTH)) {
4230 #ifdef CONFIG_IPW2200_DEBUG
4231 struct notif_authenticate *auth
4232 = &notif->u.auth;
4233 IPW_DEBUG(IPW_DL_NOTIF |
4234 IPW_DL_STATE |
4235 IPW_DL_ASSOC,
4236 "deauthenticated: '%s' "
4237 MAC_FMT
4238 ": (0x%04X) - %s \n",
4239 escape_essid(priv->
4240 essid,
4241 priv->
4242 essid_len),
4243 MAC_ARG(priv->bssid),
4244 ntohs(auth->status),
4245 ipw_get_status_code
4246 (ntohs
4247 (auth->status)));
4248 #endif
4250 priv->status &=
4251 ~(STATUS_ASSOCIATING |
4252 STATUS_AUTH |
4253 STATUS_ASSOCIATED);
4255 schedule_work(&priv->link_down);
4256 break;
4259 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4260 IPW_DL_ASSOC,
4261 "authenticated: '%s' " MAC_FMT
4262 "\n",
4263 escape_essid(priv->essid,
4264 priv->essid_len),
4265 MAC_ARG(priv->bssid));
4266 break;
4269 case CMAS_INIT:{
4270 if (priv->status & STATUS_AUTH) {
4271 struct
4272 ieee80211_assoc_response
4273 *resp;
4274 resp =
4275 (struct
4276 ieee80211_assoc_response
4277 *)&notif->u.raw;
4278 IPW_DEBUG(IPW_DL_NOTIF |
4279 IPW_DL_STATE |
4280 IPW_DL_ASSOC,
4281 "association failed (0x%04X): %s\n",
4282 ntohs(resp->status),
4283 ipw_get_status_code
4284 (ntohs
4285 (resp->status)));
4288 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4289 IPW_DL_ASSOC,
4290 "disassociated: '%s' " MAC_FMT
4291 " \n",
4292 escape_essid(priv->essid,
4293 priv->essid_len),
4294 MAC_ARG(priv->bssid));
4296 priv->status &=
4297 ~(STATUS_DISASSOCIATING |
4298 STATUS_ASSOCIATING |
4299 STATUS_ASSOCIATED | STATUS_AUTH);
4300 if (priv->assoc_network
4301 && (priv->assoc_network->
4302 capability &
4303 WLAN_CAPABILITY_IBSS))
4304 ipw_remove_current_network
4305 (priv);
4307 schedule_work(&priv->link_down);
4309 break;
4312 case CMAS_RX_ASSOC_RESP:
4313 break;
4315 default:
4316 IPW_ERROR("assoc: unknown (%d)\n",
4317 assoc->state);
4318 break;
4321 break;
4324 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4325 struct notif_authenticate *auth = &notif->u.auth;
4326 switch (auth->state) {
4327 case CMAS_AUTHENTICATED:
4328 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4329 "authenticated: '%s' " MAC_FMT " \n",
4330 escape_essid(priv->essid,
4331 priv->essid_len),
4332 MAC_ARG(priv->bssid));
4333 priv->status |= STATUS_AUTH;
4334 break;
4336 case CMAS_INIT:
4337 if (priv->status & STATUS_AUTH) {
4338 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4339 IPW_DL_ASSOC,
4340 "authentication failed (0x%04X): %s\n",
4341 ntohs(auth->status),
4342 ipw_get_status_code(ntohs
4343 (auth->
4344 status)));
4346 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4347 IPW_DL_ASSOC,
4348 "deauthenticated: '%s' " MAC_FMT "\n",
4349 escape_essid(priv->essid,
4350 priv->essid_len),
4351 MAC_ARG(priv->bssid));
4353 priv->status &= ~(STATUS_ASSOCIATING |
4354 STATUS_AUTH |
4355 STATUS_ASSOCIATED);
4357 schedule_work(&priv->link_down);
4358 break;
4360 case CMAS_TX_AUTH_SEQ_1:
4361 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4362 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4363 break;
4364 case CMAS_RX_AUTH_SEQ_2:
4365 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4366 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4367 break;
4368 case CMAS_AUTH_SEQ_1_PASS:
4369 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4370 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4371 break;
4372 case CMAS_AUTH_SEQ_1_FAIL:
4373 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4374 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4375 break;
4376 case CMAS_TX_AUTH_SEQ_3:
4377 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4378 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4379 break;
4380 case CMAS_RX_AUTH_SEQ_4:
4381 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4382 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4383 break;
4384 case CMAS_AUTH_SEQ_2_PASS:
4385 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4386 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4387 break;
4388 case CMAS_AUTH_SEQ_2_FAIL:
4389 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4390 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4391 break;
4392 case CMAS_TX_ASSOC:
4393 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4394 IPW_DL_ASSOC, "TX_ASSOC\n");
4395 break;
4396 case CMAS_RX_ASSOC_RESP:
4397 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4398 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4400 break;
4401 case CMAS_ASSOCIATED:
4402 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4403 IPW_DL_ASSOC, "ASSOCIATED\n");
4404 break;
4405 default:
4406 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4407 auth->state);
4408 break;
4410 break;
4413 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4414 struct notif_channel_result *x =
4415 &notif->u.channel_result;
4417 if (notif->size == sizeof(*x)) {
4418 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4419 x->channel_num);
4420 } else {
4421 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4422 "(should be %zd)\n",
4423 notif->size, sizeof(*x));
4425 break;
4428 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4429 struct notif_scan_complete *x = &notif->u.scan_complete;
4430 if (notif->size == sizeof(*x)) {
4431 IPW_DEBUG_SCAN
4432 ("Scan completed: type %d, %d channels, "
4433 "%d status\n", x->scan_type,
4434 x->num_channels, x->status);
4435 } else {
4436 IPW_ERROR("Scan completed of wrong size %d "
4437 "(should be %zd)\n",
4438 notif->size, sizeof(*x));
4441 priv->status &=
4442 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4444 wake_up_interruptible(&priv->wait_state);
4445 cancel_delayed_work(&priv->scan_check);
4447 if (priv->status & STATUS_EXIT_PENDING)
4448 break;
4450 priv->ieee->scans++;
4452 #ifdef CONFIG_IPW2200_MONITOR
4453 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4454 priv->status |= STATUS_SCAN_FORCED;
4455 queue_work(priv->workqueue,
4456 &priv->request_scan);
4457 break;
4459 priv->status &= ~STATUS_SCAN_FORCED;
4460 #endif /* CONFIG_IPW2200_MONITOR */
4462 if (!(priv->status & (STATUS_ASSOCIATED |
4463 STATUS_ASSOCIATING |
4464 STATUS_ROAMING |
4465 STATUS_DISASSOCIATING)))
4466 queue_work(priv->workqueue, &priv->associate);
4467 else if (priv->status & STATUS_ROAMING) {
4468 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4469 /* If a scan completed and we are in roam mode, then
4470 * the scan that completed was the one requested as a
4471 * result of entering roam... so, schedule the
4472 * roam work */
4473 queue_work(priv->workqueue,
4474 &priv->roam);
4475 else
4476 /* Don't schedule if we aborted the scan */
4477 priv->status &= ~STATUS_ROAMING;
4478 } else if (priv->status & STATUS_SCAN_PENDING)
4479 queue_work(priv->workqueue,
4480 &priv->request_scan);
4481 else if (priv->config & CFG_BACKGROUND_SCAN
4482 && priv->status & STATUS_ASSOCIATED)
4483 queue_delayed_work(priv->workqueue,
4484 &priv->request_scan, HZ);
4485 break;
4488 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4489 struct notif_frag_length *x = &notif->u.frag_len;
4491 if (notif->size == sizeof(*x))
4492 IPW_ERROR("Frag length: %d\n",
4493 le16_to_cpu(x->frag_length));
4494 else
4495 IPW_ERROR("Frag length of wrong size %d "
4496 "(should be %zd)\n",
4497 notif->size, sizeof(*x));
4498 break;
4501 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4502 struct notif_link_deterioration *x =
4503 &notif->u.link_deterioration;
4505 if (notif->size == sizeof(*x)) {
4506 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4507 "link deterioration: type %d, cnt %d\n",
4508 x->silence_notification_type,
4509 x->silence_count);
4510 memcpy(&priv->last_link_deterioration, x,
4511 sizeof(*x));
4512 } else {
4513 IPW_ERROR("Link Deterioration of wrong size %d "
4514 "(should be %zd)\n",
4515 notif->size, sizeof(*x));
4517 break;
4520 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4521 IPW_ERROR("Dino config\n");
4522 if (priv->hcmd
4523 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4524 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4526 break;
4529 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4530 struct notif_beacon_state *x = &notif->u.beacon_state;
4531 if (notif->size != sizeof(*x)) {
4532 IPW_ERROR
4533 ("Beacon state of wrong size %d (should "
4534 "be %zd)\n", notif->size, sizeof(*x));
4535 break;
4538 if (le32_to_cpu(x->state) ==
4539 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4540 ipw_handle_missed_beacon(priv,
4541 le32_to_cpu(x->
4542 number));
4544 break;
4547 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4548 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4549 if (notif->size == sizeof(*x)) {
4550 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4551 "0x%02x station %d\n",
4552 x->key_state, x->security_type,
4553 x->station_index);
4554 break;
4557 IPW_ERROR
4558 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4559 notif->size, sizeof(*x));
4560 break;
4563 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4564 struct notif_calibration *x = &notif->u.calibration;
4566 if (notif->size == sizeof(*x)) {
4567 memcpy(&priv->calib, x, sizeof(*x));
4568 IPW_DEBUG_INFO("TODO: Calibration\n");
4569 break;
4572 IPW_ERROR
4573 ("Calibration of wrong size %d (should be %zd)\n",
4574 notif->size, sizeof(*x));
4575 break;
4578 case HOST_NOTIFICATION_NOISE_STATS:{
4579 if (notif->size == sizeof(u32)) {
4580 priv->last_noise =
4581 (u8) (le32_to_cpu(notif->u.noise.value) &
4582 0xff);
4583 average_add(&priv->average_noise,
4584 priv->last_noise);
4585 break;
4588 IPW_ERROR
4589 ("Noise stat is wrong size %d (should be %zd)\n",
4590 notif->size, sizeof(u32));
4591 break;
4594 default:
4595 IPW_DEBUG_NOTIF("Unknown notification: "
4596 "subtype=%d,flags=0x%2x,size=%d\n",
4597 notif->subtype, notif->flags, notif->size);
4602 * Destroys all DMA structures and initialise them again
4604 * @param priv
4605 * @return error code
4607 static int ipw_queue_reset(struct ipw_priv *priv)
4609 int rc = 0;
4610 /** @todo customize queue sizes */
4611 int nTx = 64, nTxCmd = 8;
4612 ipw_tx_queue_free(priv);
4613 /* Tx CMD queue */
4614 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4615 IPW_TX_CMD_QUEUE_READ_INDEX,
4616 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4617 IPW_TX_CMD_QUEUE_BD_BASE,
4618 IPW_TX_CMD_QUEUE_BD_SIZE);
4619 if (rc) {
4620 IPW_ERROR("Tx Cmd queue init failed\n");
4621 goto error;
4623 /* Tx queue(s) */
4624 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4625 IPW_TX_QUEUE_0_READ_INDEX,
4626 IPW_TX_QUEUE_0_WRITE_INDEX,
4627 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4628 if (rc) {
4629 IPW_ERROR("Tx 0 queue init failed\n");
4630 goto error;
4632 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4633 IPW_TX_QUEUE_1_READ_INDEX,
4634 IPW_TX_QUEUE_1_WRITE_INDEX,
4635 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4636 if (rc) {
4637 IPW_ERROR("Tx 1 queue init failed\n");
4638 goto error;
4640 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4641 IPW_TX_QUEUE_2_READ_INDEX,
4642 IPW_TX_QUEUE_2_WRITE_INDEX,
4643 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4644 if (rc) {
4645 IPW_ERROR("Tx 2 queue init failed\n");
4646 goto error;
4648 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4649 IPW_TX_QUEUE_3_READ_INDEX,
4650 IPW_TX_QUEUE_3_WRITE_INDEX,
4651 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4652 if (rc) {
4653 IPW_ERROR("Tx 3 queue init failed\n");
4654 goto error;
4656 /* statistics */
4657 priv->rx_bufs_min = 0;
4658 priv->rx_pend_max = 0;
4659 return rc;
4661 error:
4662 ipw_tx_queue_free(priv);
4663 return rc;
4667 * Reclaim Tx queue entries no more used by NIC.
4669 * When FW adwances 'R' index, all entries between old and
4670 * new 'R' index need to be reclaimed. As result, some free space
4671 * forms. If there is enough free space (> low mark), wake Tx queue.
4673 * @note Need to protect against garbage in 'R' index
4674 * @param priv
4675 * @param txq
4676 * @param qindex
4677 * @return Number of used entries remains in the queue
4679 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4680 struct clx2_tx_queue *txq, int qindex)
4682 u32 hw_tail;
4683 int used;
4684 struct clx2_queue *q = &txq->q;
4686 hw_tail = ipw_read32(priv, q->reg_r);
4687 if (hw_tail >= q->n_bd) {
4688 IPW_ERROR
4689 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4690 hw_tail, q->n_bd);
4691 goto done;
4693 for (; q->last_used != hw_tail;
4694 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4695 ipw_queue_tx_free_tfd(priv, txq);
4696 priv->tx_packets++;
4698 done:
4699 if ((ipw_queue_space(q) > q->low_mark) &&
4700 (qindex >= 0) &&
4701 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4702 netif_wake_queue(priv->net_dev);
4703 used = q->first_empty - q->last_used;
4704 if (used < 0)
4705 used += q->n_bd;
4707 return used;
4710 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4711 int len, int sync)
4713 struct clx2_tx_queue *txq = &priv->txq_cmd;
4714 struct clx2_queue *q = &txq->q;
4715 struct tfd_frame *tfd;
4717 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4718 IPW_ERROR("No space for Tx\n");
4719 return -EBUSY;
4722 tfd = &txq->bd[q->first_empty];
4723 txq->txb[q->first_empty] = NULL;
4725 memset(tfd, 0, sizeof(*tfd));
4726 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4727 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4728 priv->hcmd_seq++;
4729 tfd->u.cmd.index = hcmd;
4730 tfd->u.cmd.length = len;
4731 memcpy(tfd->u.cmd.payload, buf, len);
4732 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4733 ipw_write32(priv, q->reg_w, q->first_empty);
4734 _ipw_read32(priv, 0x90);
4736 return 0;
4740 * Rx theory of operation
4742 * The host allocates 32 DMA target addresses and passes the host address
4743 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4744 * 0 to 31
4746 * Rx Queue Indexes
4747 * The host/firmware share two index registers for managing the Rx buffers.
4749 * The READ index maps to the first position that the firmware may be writing
4750 * to -- the driver can read up to (but not including) this position and get
4751 * good data.
4752 * The READ index is managed by the firmware once the card is enabled.
4754 * The WRITE index maps to the last position the driver has read from -- the
4755 * position preceding WRITE is the last slot the firmware can place a packet.
4757 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4758 * WRITE = READ.
4760 * During initialization the host sets up the READ queue position to the first
4761 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4763 * When the firmware places a packet in a buffer it will advance the READ index
4764 * and fire the RX interrupt. The driver can then query the READ index and
4765 * process as many packets as possible, moving the WRITE index forward as it
4766 * resets the Rx queue buffers with new memory.
4768 * The management in the driver is as follows:
4769 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
4770 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4771 * to replensish the ipw->rxq->rx_free.
4772 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4773 * ipw->rxq is replenished and the READ INDEX is updated (updating the
4774 * 'processed' and 'read' driver indexes as well)
4775 * + A received packet is processed and handed to the kernel network stack,
4776 * detached from the ipw->rxq. The driver 'processed' index is updated.
4777 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4778 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4779 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
4780 * were enough free buffers and RX_STALLED is set it is cleared.
4783 * Driver sequence:
4785 * ipw_rx_queue_alloc() Allocates rx_free
4786 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
4787 * ipw_rx_queue_restock
4788 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
4789 * queue, updates firmware pointers, and updates
4790 * the WRITE index. If insufficient rx_free buffers
4791 * are available, schedules ipw_rx_queue_replenish
4793 * -- enable interrupts --
4794 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
4795 * READ INDEX, detaching the SKB from the pool.
4796 * Moves the packet buffer from queue to rx_used.
4797 * Calls ipw_rx_queue_restock to refill any empty
4798 * slots.
4799 * ...
4804 * If there are slots in the RX queue that need to be restocked,
4805 * and we have free pre-allocated buffers, fill the ranks as much
4806 * as we can pulling from rx_free.
4808 * This moves the 'write' index forward to catch up with 'processed', and
4809 * also updates the memory address in the firmware to reference the new
4810 * target buffer.
4812 static void ipw_rx_queue_restock(struct ipw_priv *priv)
4814 struct ipw_rx_queue *rxq = priv->rxq;
4815 struct list_head *element;
4816 struct ipw_rx_mem_buffer *rxb;
4817 unsigned long flags;
4818 int write;
4820 spin_lock_irqsave(&rxq->lock, flags);
4821 write = rxq->write;
4822 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
4823 element = rxq->rx_free.next;
4824 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4825 list_del(element);
4827 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
4828 rxb->dma_addr);
4829 rxq->queue[rxq->write] = rxb;
4830 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
4831 rxq->free_count--;
4833 spin_unlock_irqrestore(&rxq->lock, flags);
4835 /* If the pre-allocated buffer pool is dropping low, schedule to
4836 * refill it */
4837 if (rxq->free_count <= RX_LOW_WATERMARK)
4838 queue_work(priv->workqueue, &priv->rx_replenish);
4840 /* If we've added more space for the firmware to place data, tell it */
4841 if (write != rxq->write)
4842 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
4846 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
4847 * Also restock the Rx queue via ipw_rx_queue_restock.
4849 * This is called as a scheduled work item (except for during intialization)
4851 static void ipw_rx_queue_replenish(void *data)
4853 struct ipw_priv *priv = data;
4854 struct ipw_rx_queue *rxq = priv->rxq;
4855 struct list_head *element;
4856 struct ipw_rx_mem_buffer *rxb;
4857 unsigned long flags;
4859 spin_lock_irqsave(&rxq->lock, flags);
4860 while (!list_empty(&rxq->rx_used)) {
4861 element = rxq->rx_used.next;
4862 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4863 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
4864 if (!rxb->skb) {
4865 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
4866 priv->net_dev->name);
4867 /* We don't reschedule replenish work here -- we will
4868 * call the restock method and if it still needs
4869 * more buffers it will schedule replenish */
4870 break;
4872 list_del(element);
4874 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
4875 rxb->dma_addr =
4876 pci_map_single(priv->pci_dev, rxb->skb->data,
4877 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4879 list_add_tail(&rxb->list, &rxq->rx_free);
4880 rxq->free_count++;
4882 spin_unlock_irqrestore(&rxq->lock, flags);
4884 ipw_rx_queue_restock(priv);
4887 static void ipw_bg_rx_queue_replenish(void *data)
4889 struct ipw_priv *priv = data;
4890 mutex_lock(&priv->mutex);
4891 ipw_rx_queue_replenish(data);
4892 mutex_unlock(&priv->mutex);
4895 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4896 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
4897 * This free routine walks the list of POOL entries and if SKB is set to
4898 * non NULL it is unmapped and freed
4900 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
4902 int i;
4904 if (!rxq)
4905 return;
4907 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4908 if (rxq->pool[i].skb != NULL) {
4909 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
4910 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4911 dev_kfree_skb(rxq->pool[i].skb);
4915 kfree(rxq);
4918 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4920 struct ipw_rx_queue *rxq;
4921 int i;
4923 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
4924 if (unlikely(!rxq)) {
4925 IPW_ERROR("memory allocation failed\n");
4926 return NULL;
4928 spin_lock_init(&rxq->lock);
4929 INIT_LIST_HEAD(&rxq->rx_free);
4930 INIT_LIST_HEAD(&rxq->rx_used);
4932 /* Fill the rx_used queue with _all_ of the Rx buffers */
4933 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4934 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4936 /* Set us so that we have processed and used all buffers, but have
4937 * not restocked the Rx queue with fresh buffers */
4938 rxq->read = rxq->write = 0;
4939 rxq->processed = RX_QUEUE_SIZE - 1;
4940 rxq->free_count = 0;
4942 return rxq;
4945 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
4947 rate &= ~IEEE80211_BASIC_RATE_MASK;
4948 if (ieee_mode == IEEE_A) {
4949 switch (rate) {
4950 case IEEE80211_OFDM_RATE_6MB:
4951 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
4952 1 : 0;
4953 case IEEE80211_OFDM_RATE_9MB:
4954 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
4955 1 : 0;
4956 case IEEE80211_OFDM_RATE_12MB:
4957 return priv->
4958 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4959 case IEEE80211_OFDM_RATE_18MB:
4960 return priv->
4961 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4962 case IEEE80211_OFDM_RATE_24MB:
4963 return priv->
4964 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4965 case IEEE80211_OFDM_RATE_36MB:
4966 return priv->
4967 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4968 case IEEE80211_OFDM_RATE_48MB:
4969 return priv->
4970 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4971 case IEEE80211_OFDM_RATE_54MB:
4972 return priv->
4973 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4974 default:
4975 return 0;
4979 /* B and G mixed */
4980 switch (rate) {
4981 case IEEE80211_CCK_RATE_1MB:
4982 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
4983 case IEEE80211_CCK_RATE_2MB:
4984 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
4985 case IEEE80211_CCK_RATE_5MB:
4986 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
4987 case IEEE80211_CCK_RATE_11MB:
4988 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
4991 /* If we are limited to B modulations, bail at this point */
4992 if (ieee_mode == IEEE_B)
4993 return 0;
4995 /* G */
4996 switch (rate) {
4997 case IEEE80211_OFDM_RATE_6MB:
4998 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
4999 case IEEE80211_OFDM_RATE_9MB:
5000 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5001 case IEEE80211_OFDM_RATE_12MB:
5002 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5003 case IEEE80211_OFDM_RATE_18MB:
5004 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5005 case IEEE80211_OFDM_RATE_24MB:
5006 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5007 case IEEE80211_OFDM_RATE_36MB:
5008 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5009 case IEEE80211_OFDM_RATE_48MB:
5010 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5011 case IEEE80211_OFDM_RATE_54MB:
5012 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5015 return 0;
5018 static int ipw_compatible_rates(struct ipw_priv *priv,
5019 const struct ieee80211_network *network,
5020 struct ipw_supported_rates *rates)
5022 int num_rates, i;
5024 memset(rates, 0, sizeof(*rates));
5025 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5026 rates->num_rates = 0;
5027 for (i = 0; i < num_rates; i++) {
5028 if (!ipw_is_rate_in_mask(priv, network->mode,
5029 network->rates[i])) {
5031 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5032 IPW_DEBUG_SCAN("Adding masked mandatory "
5033 "rate %02X\n",
5034 network->rates[i]);
5035 rates->supported_rates[rates->num_rates++] =
5036 network->rates[i];
5037 continue;
5040 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5041 network->rates[i], priv->rates_mask);
5042 continue;
5045 rates->supported_rates[rates->num_rates++] = network->rates[i];
5048 num_rates = min(network->rates_ex_len,
5049 (u8) (IPW_MAX_RATES - num_rates));
5050 for (i = 0; i < num_rates; i++) {
5051 if (!ipw_is_rate_in_mask(priv, network->mode,
5052 network->rates_ex[i])) {
5053 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5054 IPW_DEBUG_SCAN("Adding masked mandatory "
5055 "rate %02X\n",
5056 network->rates_ex[i]);
5057 rates->supported_rates[rates->num_rates++] =
5058 network->rates[i];
5059 continue;
5062 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5063 network->rates_ex[i], priv->rates_mask);
5064 continue;
5067 rates->supported_rates[rates->num_rates++] =
5068 network->rates_ex[i];
5071 return 1;
5074 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5075 const struct ipw_supported_rates *src)
5077 u8 i;
5078 for (i = 0; i < src->num_rates; i++)
5079 dest->supported_rates[i] = src->supported_rates[i];
5080 dest->num_rates = src->num_rates;
5083 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5084 * mask should ever be used -- right now all callers to add the scan rates are
5085 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5086 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5087 u8 modulation, u32 rate_mask)
5089 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5090 IEEE80211_BASIC_RATE_MASK : 0;
5092 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5093 rates->supported_rates[rates->num_rates++] =
5094 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5096 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5097 rates->supported_rates[rates->num_rates++] =
5098 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5100 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5101 rates->supported_rates[rates->num_rates++] = basic_mask |
5102 IEEE80211_CCK_RATE_5MB;
5104 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5105 rates->supported_rates[rates->num_rates++] = basic_mask |
5106 IEEE80211_CCK_RATE_11MB;
5109 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5110 u8 modulation, u32 rate_mask)
5112 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5113 IEEE80211_BASIC_RATE_MASK : 0;
5115 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5116 rates->supported_rates[rates->num_rates++] = basic_mask |
5117 IEEE80211_OFDM_RATE_6MB;
5119 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5120 rates->supported_rates[rates->num_rates++] =
5121 IEEE80211_OFDM_RATE_9MB;
5123 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5124 rates->supported_rates[rates->num_rates++] = basic_mask |
5125 IEEE80211_OFDM_RATE_12MB;
5127 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5128 rates->supported_rates[rates->num_rates++] =
5129 IEEE80211_OFDM_RATE_18MB;
5131 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5132 rates->supported_rates[rates->num_rates++] = basic_mask |
5133 IEEE80211_OFDM_RATE_24MB;
5135 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5136 rates->supported_rates[rates->num_rates++] =
5137 IEEE80211_OFDM_RATE_36MB;
5139 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5140 rates->supported_rates[rates->num_rates++] =
5141 IEEE80211_OFDM_RATE_48MB;
5143 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5144 rates->supported_rates[rates->num_rates++] =
5145 IEEE80211_OFDM_RATE_54MB;
5148 struct ipw_network_match {
5149 struct ieee80211_network *network;
5150 struct ipw_supported_rates rates;
5153 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5154 struct ipw_network_match *match,
5155 struct ieee80211_network *network,
5156 int roaming)
5158 struct ipw_supported_rates rates;
5160 /* Verify that this network's capability is compatible with the
5161 * current mode (AdHoc or Infrastructure) */
5162 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5163 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5164 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5165 "capability mismatch.\n",
5166 escape_essid(network->ssid, network->ssid_len),
5167 MAC_ARG(network->bssid));
5168 return 0;
5171 /* If we do not have an ESSID for this AP, we can not associate with
5172 * it */
5173 if (network->flags & NETWORK_EMPTY_ESSID) {
5174 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5175 "because of hidden ESSID.\n",
5176 escape_essid(network->ssid, network->ssid_len),
5177 MAC_ARG(network->bssid));
5178 return 0;
5181 if (unlikely(roaming)) {
5182 /* If we are roaming, then ensure check if this is a valid
5183 * network to try and roam to */
5184 if ((network->ssid_len != match->network->ssid_len) ||
5185 memcmp(network->ssid, match->network->ssid,
5186 network->ssid_len)) {
5187 IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5188 "because of non-network ESSID.\n",
5189 escape_essid(network->ssid,
5190 network->ssid_len),
5191 MAC_ARG(network->bssid));
5192 return 0;
5194 } else {
5195 /* If an ESSID has been configured then compare the broadcast
5196 * ESSID to ours */
5197 if ((priv->config & CFG_STATIC_ESSID) &&
5198 ((network->ssid_len != priv->essid_len) ||
5199 memcmp(network->ssid, priv->essid,
5200 min(network->ssid_len, priv->essid_len)))) {
5201 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5203 strncpy(escaped,
5204 escape_essid(network->ssid, network->ssid_len),
5205 sizeof(escaped));
5206 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5207 "because of ESSID mismatch: '%s'.\n",
5208 escaped, MAC_ARG(network->bssid),
5209 escape_essid(priv->essid,
5210 priv->essid_len));
5211 return 0;
5215 /* If the old network rate is better than this one, don't bother
5216 * testing everything else. */
5218 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5219 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5220 "current network.\n",
5221 escape_essid(match->network->ssid,
5222 match->network->ssid_len));
5223 return 0;
5224 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5225 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5226 "current network.\n",
5227 escape_essid(match->network->ssid,
5228 match->network->ssid_len));
5229 return 0;
5232 /* Now go through and see if the requested network is valid... */
5233 if (priv->ieee->scan_age != 0 &&
5234 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5235 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5236 "because of age: %ums.\n",
5237 escape_essid(network->ssid, network->ssid_len),
5238 MAC_ARG(network->bssid),
5239 jiffies_to_msecs(jiffies -
5240 network->last_scanned));
5241 return 0;
5244 if ((priv->config & CFG_STATIC_CHANNEL) &&
5245 (network->channel != priv->channel)) {
5246 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5247 "because of channel mismatch: %d != %d.\n",
5248 escape_essid(network->ssid, network->ssid_len),
5249 MAC_ARG(network->bssid),
5250 network->channel, priv->channel);
5251 return 0;
5254 /* Verify privacy compatability */
5255 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5256 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5257 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5258 "because of privacy mismatch: %s != %s.\n",
5259 escape_essid(network->ssid, network->ssid_len),
5260 MAC_ARG(network->bssid),
5261 priv->
5262 capability & CAP_PRIVACY_ON ? "on" : "off",
5263 network->
5264 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5265 "off");
5266 return 0;
5269 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5270 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5271 "because of the same BSSID match: " MAC_FMT
5272 ".\n", escape_essid(network->ssid,
5273 network->ssid_len),
5274 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5275 return 0;
5278 /* Filter out any incompatible freq / mode combinations */
5279 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5280 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5281 "because of invalid frequency/mode "
5282 "combination.\n",
5283 escape_essid(network->ssid, network->ssid_len),
5284 MAC_ARG(network->bssid));
5285 return 0;
5288 /* Ensure that the rates supported by the driver are compatible with
5289 * this AP, including verification of basic rates (mandatory) */
5290 if (!ipw_compatible_rates(priv, network, &rates)) {
5291 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5292 "because configured rate mask excludes "
5293 "AP mandatory rate.\n",
5294 escape_essid(network->ssid, network->ssid_len),
5295 MAC_ARG(network->bssid));
5296 return 0;
5299 if (rates.num_rates == 0) {
5300 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5301 "because of no compatible rates.\n",
5302 escape_essid(network->ssid, network->ssid_len),
5303 MAC_ARG(network->bssid));
5304 return 0;
5307 /* TODO: Perform any further minimal comparititive tests. We do not
5308 * want to put too much policy logic here; intelligent scan selection
5309 * should occur within a generic IEEE 802.11 user space tool. */
5311 /* Set up 'new' AP to this network */
5312 ipw_copy_rates(&match->rates, &rates);
5313 match->network = network;
5314 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5315 escape_essid(network->ssid, network->ssid_len),
5316 MAC_ARG(network->bssid));
5318 return 1;
5321 static void ipw_merge_adhoc_network(void *data)
5323 struct ipw_priv *priv = data;
5324 struct ieee80211_network *network = NULL;
5325 struct ipw_network_match match = {
5326 .network = priv->assoc_network
5329 if ((priv->status & STATUS_ASSOCIATED) &&
5330 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5331 /* First pass through ROAM process -- look for a better
5332 * network */
5333 unsigned long flags;
5335 spin_lock_irqsave(&priv->ieee->lock, flags);
5336 list_for_each_entry(network, &priv->ieee->network_list, list) {
5337 if (network != priv->assoc_network)
5338 ipw_find_adhoc_network(priv, &match, network,
5341 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5343 if (match.network == priv->assoc_network) {
5344 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5345 "merge to.\n");
5346 return;
5349 mutex_lock(&priv->mutex);
5350 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5351 IPW_DEBUG_MERGE("remove network %s\n",
5352 escape_essid(priv->essid,
5353 priv->essid_len));
5354 ipw_remove_current_network(priv);
5357 ipw_disassociate(priv);
5358 priv->assoc_network = match.network;
5359 mutex_unlock(&priv->mutex);
5360 return;
5364 static int ipw_best_network(struct ipw_priv *priv,
5365 struct ipw_network_match *match,
5366 struct ieee80211_network *network, int roaming)
5368 struct ipw_supported_rates rates;
5370 /* Verify that this network's capability is compatible with the
5371 * current mode (AdHoc or Infrastructure) */
5372 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5373 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5374 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5375 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5376 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5377 "capability mismatch.\n",
5378 escape_essid(network->ssid, network->ssid_len),
5379 MAC_ARG(network->bssid));
5380 return 0;
5383 /* If we do not have an ESSID for this AP, we can not associate with
5384 * it */
5385 if (network->flags & NETWORK_EMPTY_ESSID) {
5386 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5387 "because of hidden ESSID.\n",
5388 escape_essid(network->ssid, network->ssid_len),
5389 MAC_ARG(network->bssid));
5390 return 0;
5393 if (unlikely(roaming)) {
5394 /* If we are roaming, then ensure check if this is a valid
5395 * network to try and roam to */
5396 if ((network->ssid_len != match->network->ssid_len) ||
5397 memcmp(network->ssid, match->network->ssid,
5398 network->ssid_len)) {
5399 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5400 "because of non-network ESSID.\n",
5401 escape_essid(network->ssid,
5402 network->ssid_len),
5403 MAC_ARG(network->bssid));
5404 return 0;
5406 } else {
5407 /* If an ESSID has been configured then compare the broadcast
5408 * ESSID to ours */
5409 if ((priv->config & CFG_STATIC_ESSID) &&
5410 ((network->ssid_len != priv->essid_len) ||
5411 memcmp(network->ssid, priv->essid,
5412 min(network->ssid_len, priv->essid_len)))) {
5413 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5414 strncpy(escaped,
5415 escape_essid(network->ssid, network->ssid_len),
5416 sizeof(escaped));
5417 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5418 "because of ESSID mismatch: '%s'.\n",
5419 escaped, MAC_ARG(network->bssid),
5420 escape_essid(priv->essid,
5421 priv->essid_len));
5422 return 0;
5426 /* If the old network rate is better than this one, don't bother
5427 * testing everything else. */
5428 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5429 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5430 strncpy(escaped,
5431 escape_essid(network->ssid, network->ssid_len),
5432 sizeof(escaped));
5433 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5434 "'%s (" MAC_FMT ")' has a stronger signal.\n",
5435 escaped, MAC_ARG(network->bssid),
5436 escape_essid(match->network->ssid,
5437 match->network->ssid_len),
5438 MAC_ARG(match->network->bssid));
5439 return 0;
5442 /* If this network has already had an association attempt within the
5443 * last 3 seconds, do not try and associate again... */
5444 if (network->last_associate &&
5445 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5446 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5447 "because of storming (%ums since last "
5448 "assoc attempt).\n",
5449 escape_essid(network->ssid, network->ssid_len),
5450 MAC_ARG(network->bssid),
5451 jiffies_to_msecs(jiffies -
5452 network->last_associate));
5453 return 0;
5456 /* Now go through and see if the requested network is valid... */
5457 if (priv->ieee->scan_age != 0 &&
5458 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5459 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5460 "because of age: %ums.\n",
5461 escape_essid(network->ssid, network->ssid_len),
5462 MAC_ARG(network->bssid),
5463 jiffies_to_msecs(jiffies -
5464 network->last_scanned));
5465 return 0;
5468 if ((priv->config & CFG_STATIC_CHANNEL) &&
5469 (network->channel != priv->channel)) {
5470 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5471 "because of channel mismatch: %d != %d.\n",
5472 escape_essid(network->ssid, network->ssid_len),
5473 MAC_ARG(network->bssid),
5474 network->channel, priv->channel);
5475 return 0;
5478 /* Verify privacy compatability */
5479 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5480 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5481 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5482 "because of privacy mismatch: %s != %s.\n",
5483 escape_essid(network->ssid, network->ssid_len),
5484 MAC_ARG(network->bssid),
5485 priv->capability & CAP_PRIVACY_ON ? "on" :
5486 "off",
5487 network->capability &
5488 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5489 return 0;
5492 if ((priv->config & CFG_STATIC_BSSID) &&
5493 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5494 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5495 "because of BSSID mismatch: " MAC_FMT ".\n",
5496 escape_essid(network->ssid, network->ssid_len),
5497 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5498 return 0;
5501 /* Filter out any incompatible freq / mode combinations */
5502 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5503 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5504 "because of invalid frequency/mode "
5505 "combination.\n",
5506 escape_essid(network->ssid, network->ssid_len),
5507 MAC_ARG(network->bssid));
5508 return 0;
5511 /* Filter out invalid channel in current GEO */
5512 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5513 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5514 "because of invalid channel in current GEO\n",
5515 escape_essid(network->ssid, network->ssid_len),
5516 MAC_ARG(network->bssid));
5517 return 0;
5520 /* Ensure that the rates supported by the driver are compatible with
5521 * this AP, including verification of basic rates (mandatory) */
5522 if (!ipw_compatible_rates(priv, network, &rates)) {
5523 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5524 "because configured rate mask excludes "
5525 "AP mandatory rate.\n",
5526 escape_essid(network->ssid, network->ssid_len),
5527 MAC_ARG(network->bssid));
5528 return 0;
5531 if (rates.num_rates == 0) {
5532 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5533 "because of no compatible rates.\n",
5534 escape_essid(network->ssid, network->ssid_len),
5535 MAC_ARG(network->bssid));
5536 return 0;
5539 /* TODO: Perform any further minimal comparititive tests. We do not
5540 * want to put too much policy logic here; intelligent scan selection
5541 * should occur within a generic IEEE 802.11 user space tool. */
5543 /* Set up 'new' AP to this network */
5544 ipw_copy_rates(&match->rates, &rates);
5545 match->network = network;
5547 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5548 escape_essid(network->ssid, network->ssid_len),
5549 MAC_ARG(network->bssid));
5551 return 1;
5554 static void ipw_adhoc_create(struct ipw_priv *priv,
5555 struct ieee80211_network *network)
5557 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5558 int i;
5561 * For the purposes of scanning, we can set our wireless mode
5562 * to trigger scans across combinations of bands, but when it
5563 * comes to creating a new ad-hoc network, we have tell the FW
5564 * exactly which band to use.
5566 * We also have the possibility of an invalid channel for the
5567 * chossen band. Attempting to create a new ad-hoc network
5568 * with an invalid channel for wireless mode will trigger a
5569 * FW fatal error.
5572 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5573 case IEEE80211_52GHZ_BAND:
5574 network->mode = IEEE_A;
5575 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5576 if (i == -1)
5577 BUG();
5578 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5579 IPW_WARNING("Overriding invalid channel\n");
5580 priv->channel = geo->a[0].channel;
5582 break;
5584 case IEEE80211_24GHZ_BAND:
5585 if (priv->ieee->mode & IEEE_G)
5586 network->mode = IEEE_G;
5587 else
5588 network->mode = IEEE_B;
5589 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5590 if (i == -1)
5591 BUG();
5592 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5593 IPW_WARNING("Overriding invalid channel\n");
5594 priv->channel = geo->bg[0].channel;
5596 break;
5598 default:
5599 IPW_WARNING("Overriding invalid channel\n");
5600 if (priv->ieee->mode & IEEE_A) {
5601 network->mode = IEEE_A;
5602 priv->channel = geo->a[0].channel;
5603 } else if (priv->ieee->mode & IEEE_G) {
5604 network->mode = IEEE_G;
5605 priv->channel = geo->bg[0].channel;
5606 } else {
5607 network->mode = IEEE_B;
5608 priv->channel = geo->bg[0].channel;
5610 break;
5613 network->channel = priv->channel;
5614 priv->config |= CFG_ADHOC_PERSIST;
5615 ipw_create_bssid(priv, network->bssid);
5616 network->ssid_len = priv->essid_len;
5617 memcpy(network->ssid, priv->essid, priv->essid_len);
5618 memset(&network->stats, 0, sizeof(network->stats));
5619 network->capability = WLAN_CAPABILITY_IBSS;
5620 if (!(priv->config & CFG_PREAMBLE_LONG))
5621 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5622 if (priv->capability & CAP_PRIVACY_ON)
5623 network->capability |= WLAN_CAPABILITY_PRIVACY;
5624 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5625 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5626 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5627 memcpy(network->rates_ex,
5628 &priv->rates.supported_rates[network->rates_len],
5629 network->rates_ex_len);
5630 network->last_scanned = 0;
5631 network->flags = 0;
5632 network->last_associate = 0;
5633 network->time_stamp[0] = 0;
5634 network->time_stamp[1] = 0;
5635 network->beacon_interval = 100; /* Default */
5636 network->listen_interval = 10; /* Default */
5637 network->atim_window = 0; /* Default */
5638 network->wpa_ie_len = 0;
5639 network->rsn_ie_len = 0;
5642 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5644 struct ipw_tgi_tx_key key;
5646 if (!(priv->ieee->sec.flags & (1 << index)))
5647 return;
5649 key.key_id = index;
5650 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5651 key.security_type = type;
5652 key.station_index = 0; /* always 0 for BSS */
5653 key.flags = 0;
5654 /* 0 for new key; previous value of counter (after fatal error) */
5655 key.tx_counter[0] = 0;
5656 key.tx_counter[1] = 0;
5658 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5661 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5663 struct ipw_wep_key key;
5664 int i;
5666 key.cmd_id = DINO_CMD_WEP_KEY;
5667 key.seq_num = 0;
5669 /* Note: AES keys cannot be set for multiple times.
5670 * Only set it at the first time. */
5671 for (i = 0; i < 4; i++) {
5672 key.key_index = i | type;
5673 if (!(priv->ieee->sec.flags & (1 << i))) {
5674 key.key_size = 0;
5675 continue;
5678 key.key_size = priv->ieee->sec.key_sizes[i];
5679 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5681 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5685 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5687 if (priv->ieee->host_encrypt)
5688 return;
5690 switch (level) {
5691 case SEC_LEVEL_3:
5692 priv->sys_config.disable_unicast_decryption = 0;
5693 priv->ieee->host_decrypt = 0;
5694 break;
5695 case SEC_LEVEL_2:
5696 priv->sys_config.disable_unicast_decryption = 1;
5697 priv->ieee->host_decrypt = 1;
5698 break;
5699 case SEC_LEVEL_1:
5700 priv->sys_config.disable_unicast_decryption = 0;
5701 priv->ieee->host_decrypt = 0;
5702 break;
5703 case SEC_LEVEL_0:
5704 priv->sys_config.disable_unicast_decryption = 1;
5705 break;
5706 default:
5707 break;
5711 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5713 if (priv->ieee->host_encrypt)
5714 return;
5716 switch (level) {
5717 case SEC_LEVEL_3:
5718 priv->sys_config.disable_multicast_decryption = 0;
5719 break;
5720 case SEC_LEVEL_2:
5721 priv->sys_config.disable_multicast_decryption = 1;
5722 break;
5723 case SEC_LEVEL_1:
5724 priv->sys_config.disable_multicast_decryption = 0;
5725 break;
5726 case SEC_LEVEL_0:
5727 priv->sys_config.disable_multicast_decryption = 1;
5728 break;
5729 default:
5730 break;
5734 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5736 switch (priv->ieee->sec.level) {
5737 case SEC_LEVEL_3:
5738 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5739 ipw_send_tgi_tx_key(priv,
5740 DCT_FLAG_EXT_SECURITY_CCM,
5741 priv->ieee->sec.active_key);
5743 if (!priv->ieee->host_mc_decrypt)
5744 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5745 break;
5746 case SEC_LEVEL_2:
5747 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5748 ipw_send_tgi_tx_key(priv,
5749 DCT_FLAG_EXT_SECURITY_TKIP,
5750 priv->ieee->sec.active_key);
5751 break;
5752 case SEC_LEVEL_1:
5753 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5754 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5755 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5756 break;
5757 case SEC_LEVEL_0:
5758 default:
5759 break;
5763 static void ipw_adhoc_check(void *data)
5765 struct ipw_priv *priv = data;
5767 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
5768 !(priv->config & CFG_ADHOC_PERSIST)) {
5769 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
5770 IPW_DL_STATE | IPW_DL_ASSOC,
5771 "Missed beacon: %d - disassociate\n",
5772 priv->missed_adhoc_beacons);
5773 ipw_remove_current_network(priv);
5774 ipw_disassociate(priv);
5775 return;
5778 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
5779 priv->assoc_request.beacon_interval);
5782 static void ipw_bg_adhoc_check(void *data)
5784 struct ipw_priv *priv = data;
5785 mutex_lock(&priv->mutex);
5786 ipw_adhoc_check(data);
5787 mutex_unlock(&priv->mutex);
5790 #ifdef CONFIG_IPW2200_DEBUG
5791 static void ipw_debug_config(struct ipw_priv *priv)
5793 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
5794 "[CFG 0x%08X]\n", priv->config);
5795 if (priv->config & CFG_STATIC_CHANNEL)
5796 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
5797 else
5798 IPW_DEBUG_INFO("Channel unlocked.\n");
5799 if (priv->config & CFG_STATIC_ESSID)
5800 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
5801 escape_essid(priv->essid, priv->essid_len));
5802 else
5803 IPW_DEBUG_INFO("ESSID unlocked.\n");
5804 if (priv->config & CFG_STATIC_BSSID)
5805 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
5806 MAC_ARG(priv->bssid));
5807 else
5808 IPW_DEBUG_INFO("BSSID unlocked.\n");
5809 if (priv->capability & CAP_PRIVACY_ON)
5810 IPW_DEBUG_INFO("PRIVACY on\n");
5811 else
5812 IPW_DEBUG_INFO("PRIVACY off\n");
5813 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
5815 #else
5816 #define ipw_debug_config(x) do {} while (0)
5817 #endif
5819 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
5821 /* TODO: Verify that this works... */
5822 struct ipw_fixed_rate fr = {
5823 .tx_rates = priv->rates_mask
5825 u32 reg;
5826 u16 mask = 0;
5828 /* Identify 'current FW band' and match it with the fixed
5829 * Tx rates */
5831 switch (priv->ieee->freq_band) {
5832 case IEEE80211_52GHZ_BAND: /* A only */
5833 /* IEEE_A */
5834 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
5835 /* Invalid fixed rate mask */
5836 IPW_DEBUG_WX
5837 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5838 fr.tx_rates = 0;
5839 break;
5842 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
5843 break;
5845 default: /* 2.4Ghz or Mixed */
5846 /* IEEE_B */
5847 if (mode == IEEE_B) {
5848 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
5849 /* Invalid fixed rate mask */
5850 IPW_DEBUG_WX
5851 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5852 fr.tx_rates = 0;
5854 break;
5857 /* IEEE_G */
5858 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
5859 IEEE80211_OFDM_RATES_MASK)) {
5860 /* Invalid fixed rate mask */
5861 IPW_DEBUG_WX
5862 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5863 fr.tx_rates = 0;
5864 break;
5867 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
5868 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
5869 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
5872 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
5873 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
5874 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
5877 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
5878 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
5879 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
5882 fr.tx_rates |= mask;
5883 break;
5886 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
5887 ipw_write_reg32(priv, reg, *(u32 *) & fr);
5890 static void ipw_abort_scan(struct ipw_priv *priv)
5892 int err;
5894 if (priv->status & STATUS_SCAN_ABORTING) {
5895 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
5896 return;
5898 priv->status |= STATUS_SCAN_ABORTING;
5900 err = ipw_send_scan_abort(priv);
5901 if (err)
5902 IPW_DEBUG_HC("Request to abort scan failed.\n");
5905 static void ipw_add_scan_channels(struct ipw_priv *priv,
5906 struct ipw_scan_request_ext *scan,
5907 int scan_type)
5909 int channel_index = 0;
5910 const struct ieee80211_geo *geo;
5911 int i;
5913 geo = ieee80211_get_geo(priv->ieee);
5915 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
5916 int start = channel_index;
5917 for (i = 0; i < geo->a_channels; i++) {
5918 if ((priv->status & STATUS_ASSOCIATED) &&
5919 geo->a[i].channel == priv->channel)
5920 continue;
5921 channel_index++;
5922 scan->channels_list[channel_index] = geo->a[i].channel;
5923 ipw_set_scan_type(scan, channel_index,
5924 geo->a[i].
5925 flags & IEEE80211_CH_PASSIVE_ONLY ?
5926 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
5927 scan_type);
5930 if (start != channel_index) {
5931 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
5932 (channel_index - start);
5933 channel_index++;
5937 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
5938 int start = channel_index;
5939 if (priv->config & CFG_SPEED_SCAN) {
5940 int index;
5941 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
5942 /* nop out the list */
5943 [0] = 0
5946 u8 channel;
5947 while (channel_index < IPW_SCAN_CHANNELS) {
5948 channel =
5949 priv->speed_scan[priv->speed_scan_pos];
5950 if (channel == 0) {
5951 priv->speed_scan_pos = 0;
5952 channel = priv->speed_scan[0];
5954 if ((priv->status & STATUS_ASSOCIATED) &&
5955 channel == priv->channel) {
5956 priv->speed_scan_pos++;
5957 continue;
5960 /* If this channel has already been
5961 * added in scan, break from loop
5962 * and this will be the first channel
5963 * in the next scan.
5965 if (channels[channel - 1] != 0)
5966 break;
5968 channels[channel - 1] = 1;
5969 priv->speed_scan_pos++;
5970 channel_index++;
5971 scan->channels_list[channel_index] = channel;
5972 index =
5973 ieee80211_channel_to_index(priv->ieee, channel);
5974 ipw_set_scan_type(scan, channel_index,
5975 geo->bg[index].
5976 flags &
5977 IEEE80211_CH_PASSIVE_ONLY ?
5978 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
5979 : scan_type);
5981 } else {
5982 for (i = 0; i < geo->bg_channels; i++) {
5983 if ((priv->status & STATUS_ASSOCIATED) &&
5984 geo->bg[i].channel == priv->channel)
5985 continue;
5986 channel_index++;
5987 scan->channels_list[channel_index] =
5988 geo->bg[i].channel;
5989 ipw_set_scan_type(scan, channel_index,
5990 geo->bg[i].
5991 flags &
5992 IEEE80211_CH_PASSIVE_ONLY ?
5993 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
5994 : scan_type);
5998 if (start != channel_index) {
5999 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6000 (channel_index - start);
6005 static int ipw_request_scan(struct ipw_priv *priv)
6007 struct ipw_scan_request_ext scan;
6008 int err = 0, scan_type;
6010 if (!(priv->status & STATUS_INIT) ||
6011 (priv->status & STATUS_EXIT_PENDING))
6012 return 0;
6014 mutex_lock(&priv->mutex);
6016 if (priv->status & STATUS_SCANNING) {
6017 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6018 priv->status |= STATUS_SCAN_PENDING;
6019 goto done;
6022 if (!(priv->status & STATUS_SCAN_FORCED) &&
6023 priv->status & STATUS_SCAN_ABORTING) {
6024 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6025 priv->status |= STATUS_SCAN_PENDING;
6026 goto done;
6029 if (priv->status & STATUS_RF_KILL_MASK) {
6030 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6031 priv->status |= STATUS_SCAN_PENDING;
6032 goto done;
6035 memset(&scan, 0, sizeof(scan));
6037 if (priv->config & CFG_SPEED_SCAN)
6038 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6039 cpu_to_le16(30);
6040 else
6041 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6042 cpu_to_le16(20);
6044 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6045 cpu_to_le16(20);
6046 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6048 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6050 #ifdef CONFIG_IPW2200_MONITOR
6051 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6052 u8 channel;
6053 u8 band = 0;
6055 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6056 case IEEE80211_52GHZ_BAND:
6057 band = (u8) (IPW_A_MODE << 6) | 1;
6058 channel = priv->channel;
6059 break;
6061 case IEEE80211_24GHZ_BAND:
6062 band = (u8) (IPW_B_MODE << 6) | 1;
6063 channel = priv->channel;
6064 break;
6066 default:
6067 band = (u8) (IPW_B_MODE << 6) | 1;
6068 channel = 9;
6069 break;
6072 scan.channels_list[0] = band;
6073 scan.channels_list[1] = channel;
6074 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6076 /* NOTE: The card will sit on this channel for this time
6077 * period. Scan aborts are timing sensitive and frequently
6078 * result in firmware restarts. As such, it is best to
6079 * set a small dwell_time here and just keep re-issuing
6080 * scans. Otherwise fast channel hopping will not actually
6081 * hop channels.
6083 * TODO: Move SPEED SCAN support to all modes and bands */
6084 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6085 cpu_to_le16(2000);
6086 } else {
6087 #endif /* CONFIG_IPW2200_MONITOR */
6088 /* If we are roaming, then make this a directed scan for the
6089 * current network. Otherwise, ensure that every other scan
6090 * is a fast channel hop scan */
6091 if ((priv->status & STATUS_ROAMING)
6092 || (!(priv->status & STATUS_ASSOCIATED)
6093 && (priv->config & CFG_STATIC_ESSID)
6094 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6095 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6096 if (err) {
6097 IPW_DEBUG_HC("Attempt to send SSID command "
6098 "failed.\n");
6099 goto done;
6102 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6103 } else
6104 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6106 ipw_add_scan_channels(priv, &scan, scan_type);
6107 #ifdef CONFIG_IPW2200_MONITOR
6109 #endif
6111 err = ipw_send_scan_request_ext(priv, &scan);
6112 if (err) {
6113 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6114 goto done;
6117 priv->status |= STATUS_SCANNING;
6118 priv->status &= ~STATUS_SCAN_PENDING;
6119 queue_delayed_work(priv->workqueue, &priv->scan_check,
6120 IPW_SCAN_CHECK_WATCHDOG);
6121 done:
6122 mutex_unlock(&priv->mutex);
6123 return err;
6126 static void ipw_bg_abort_scan(void *data)
6128 struct ipw_priv *priv = data;
6129 mutex_lock(&priv->mutex);
6130 ipw_abort_scan(data);
6131 mutex_unlock(&priv->mutex);
6134 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6136 /* This is called when wpa_supplicant loads and closes the driver
6137 * interface. */
6138 priv->ieee->wpa_enabled = value;
6139 return 0;
6142 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6144 struct ieee80211_device *ieee = priv->ieee;
6145 struct ieee80211_security sec = {
6146 .flags = SEC_AUTH_MODE,
6148 int ret = 0;
6150 if (value & IW_AUTH_ALG_SHARED_KEY) {
6151 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6152 ieee->open_wep = 0;
6153 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6154 sec.auth_mode = WLAN_AUTH_OPEN;
6155 ieee->open_wep = 1;
6156 } else if (value & IW_AUTH_ALG_LEAP) {
6157 sec.auth_mode = WLAN_AUTH_LEAP;
6158 ieee->open_wep = 1;
6159 } else
6160 return -EINVAL;
6162 if (ieee->set_security)
6163 ieee->set_security(ieee->dev, &sec);
6164 else
6165 ret = -EOPNOTSUPP;
6167 return ret;
6170 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6171 int wpa_ie_len)
6173 /* make sure WPA is enabled */
6174 ipw_wpa_enable(priv, 1);
6176 ipw_disassociate(priv);
6179 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6180 char *capabilities, int length)
6182 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6184 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6185 capabilities);
6189 * WE-18 support
6192 /* SIOCSIWGENIE */
6193 static int ipw_wx_set_genie(struct net_device *dev,
6194 struct iw_request_info *info,
6195 union iwreq_data *wrqu, char *extra)
6197 struct ipw_priv *priv = ieee80211_priv(dev);
6198 struct ieee80211_device *ieee = priv->ieee;
6199 u8 *buf;
6200 int err = 0;
6202 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6203 (wrqu->data.length && extra == NULL))
6204 return -EINVAL;
6206 //mutex_lock(&priv->mutex);
6208 //if (!ieee->wpa_enabled) {
6209 // err = -EOPNOTSUPP;
6210 // goto out;
6213 if (wrqu->data.length) {
6214 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6215 if (buf == NULL) {
6216 err = -ENOMEM;
6217 goto out;
6220 memcpy(buf, extra, wrqu->data.length);
6221 kfree(ieee->wpa_ie);
6222 ieee->wpa_ie = buf;
6223 ieee->wpa_ie_len = wrqu->data.length;
6224 } else {
6225 kfree(ieee->wpa_ie);
6226 ieee->wpa_ie = NULL;
6227 ieee->wpa_ie_len = 0;
6230 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6231 out:
6232 //mutex_unlock(&priv->mutex);
6233 return err;
6236 /* SIOCGIWGENIE */
6237 static int ipw_wx_get_genie(struct net_device *dev,
6238 struct iw_request_info *info,
6239 union iwreq_data *wrqu, char *extra)
6241 struct ipw_priv *priv = ieee80211_priv(dev);
6242 struct ieee80211_device *ieee = priv->ieee;
6243 int err = 0;
6245 //mutex_lock(&priv->mutex);
6247 //if (!ieee->wpa_enabled) {
6248 // err = -EOPNOTSUPP;
6249 // goto out;
6252 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6253 wrqu->data.length = 0;
6254 goto out;
6257 if (wrqu->data.length < ieee->wpa_ie_len) {
6258 err = -E2BIG;
6259 goto out;
6262 wrqu->data.length = ieee->wpa_ie_len;
6263 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6265 out:
6266 //mutex_unlock(&priv->mutex);
6267 return err;
6270 static int wext_cipher2level(int cipher)
6272 switch (cipher) {
6273 case IW_AUTH_CIPHER_NONE:
6274 return SEC_LEVEL_0;
6275 case IW_AUTH_CIPHER_WEP40:
6276 case IW_AUTH_CIPHER_WEP104:
6277 return SEC_LEVEL_1;
6278 case IW_AUTH_CIPHER_TKIP:
6279 return SEC_LEVEL_2;
6280 case IW_AUTH_CIPHER_CCMP:
6281 return SEC_LEVEL_3;
6282 default:
6283 return -1;
6287 /* SIOCSIWAUTH */
6288 static int ipw_wx_set_auth(struct net_device *dev,
6289 struct iw_request_info *info,
6290 union iwreq_data *wrqu, char *extra)
6292 struct ipw_priv *priv = ieee80211_priv(dev);
6293 struct ieee80211_device *ieee = priv->ieee;
6294 struct iw_param *param = &wrqu->param;
6295 struct ieee80211_crypt_data *crypt;
6296 unsigned long flags;
6297 int ret = 0;
6299 switch (param->flags & IW_AUTH_INDEX) {
6300 case IW_AUTH_WPA_VERSION:
6301 break;
6302 case IW_AUTH_CIPHER_PAIRWISE:
6303 ipw_set_hw_decrypt_unicast(priv,
6304 wext_cipher2level(param->value));
6305 break;
6306 case IW_AUTH_CIPHER_GROUP:
6307 ipw_set_hw_decrypt_multicast(priv,
6308 wext_cipher2level(param->value));
6309 break;
6310 case IW_AUTH_KEY_MGMT:
6312 * ipw2200 does not use these parameters
6314 break;
6316 case IW_AUTH_TKIP_COUNTERMEASURES:
6317 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6318 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6319 break;
6321 flags = crypt->ops->get_flags(crypt->priv);
6323 if (param->value)
6324 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6325 else
6326 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6328 crypt->ops->set_flags(flags, crypt->priv);
6330 break;
6332 case IW_AUTH_DROP_UNENCRYPTED:{
6333 /* HACK:
6335 * wpa_supplicant calls set_wpa_enabled when the driver
6336 * is loaded and unloaded, regardless of if WPA is being
6337 * used. No other calls are made which can be used to
6338 * determine if encryption will be used or not prior to
6339 * association being expected. If encryption is not being
6340 * used, drop_unencrypted is set to false, else true -- we
6341 * can use this to determine if the CAP_PRIVACY_ON bit should
6342 * be set.
6344 struct ieee80211_security sec = {
6345 .flags = SEC_ENABLED,
6346 .enabled = param->value,
6348 priv->ieee->drop_unencrypted = param->value;
6349 /* We only change SEC_LEVEL for open mode. Others
6350 * are set by ipw_wpa_set_encryption.
6352 if (!param->value) {
6353 sec.flags |= SEC_LEVEL;
6354 sec.level = SEC_LEVEL_0;
6355 } else {
6356 sec.flags |= SEC_LEVEL;
6357 sec.level = SEC_LEVEL_1;
6359 if (priv->ieee->set_security)
6360 priv->ieee->set_security(priv->ieee->dev, &sec);
6361 break;
6364 case IW_AUTH_80211_AUTH_ALG:
6365 ret = ipw_wpa_set_auth_algs(priv, param->value);
6366 break;
6368 case IW_AUTH_WPA_ENABLED:
6369 ret = ipw_wpa_enable(priv, param->value);
6370 break;
6372 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6373 ieee->ieee802_1x = param->value;
6374 break;
6376 //case IW_AUTH_ROAMING_CONTROL:
6377 case IW_AUTH_PRIVACY_INVOKED:
6378 ieee->privacy_invoked = param->value;
6379 break;
6381 default:
6382 return -EOPNOTSUPP;
6384 return ret;
6387 /* SIOCGIWAUTH */
6388 static int ipw_wx_get_auth(struct net_device *dev,
6389 struct iw_request_info *info,
6390 union iwreq_data *wrqu, char *extra)
6392 struct ipw_priv *priv = ieee80211_priv(dev);
6393 struct ieee80211_device *ieee = priv->ieee;
6394 struct ieee80211_crypt_data *crypt;
6395 struct iw_param *param = &wrqu->param;
6396 int ret = 0;
6398 switch (param->flags & IW_AUTH_INDEX) {
6399 case IW_AUTH_WPA_VERSION:
6400 case IW_AUTH_CIPHER_PAIRWISE:
6401 case IW_AUTH_CIPHER_GROUP:
6402 case IW_AUTH_KEY_MGMT:
6404 * wpa_supplicant will control these internally
6406 ret = -EOPNOTSUPP;
6407 break;
6409 case IW_AUTH_TKIP_COUNTERMEASURES:
6410 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6411 if (!crypt || !crypt->ops->get_flags)
6412 break;
6414 param->value = (crypt->ops->get_flags(crypt->priv) &
6415 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6417 break;
6419 case IW_AUTH_DROP_UNENCRYPTED:
6420 param->value = ieee->drop_unencrypted;
6421 break;
6423 case IW_AUTH_80211_AUTH_ALG:
6424 param->value = ieee->sec.auth_mode;
6425 break;
6427 case IW_AUTH_WPA_ENABLED:
6428 param->value = ieee->wpa_enabled;
6429 break;
6431 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6432 param->value = ieee->ieee802_1x;
6433 break;
6435 case IW_AUTH_ROAMING_CONTROL:
6436 case IW_AUTH_PRIVACY_INVOKED:
6437 param->value = ieee->privacy_invoked;
6438 break;
6440 default:
6441 return -EOPNOTSUPP;
6443 return 0;
6446 /* SIOCSIWENCODEEXT */
6447 static int ipw_wx_set_encodeext(struct net_device *dev,
6448 struct iw_request_info *info,
6449 union iwreq_data *wrqu, char *extra)
6451 struct ipw_priv *priv = ieee80211_priv(dev);
6452 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6454 if (hwcrypto) {
6455 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6456 /* IPW HW can't build TKIP MIC,
6457 host decryption still needed */
6458 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6459 priv->ieee->host_mc_decrypt = 1;
6460 else {
6461 priv->ieee->host_encrypt = 0;
6462 priv->ieee->host_encrypt_msdu = 1;
6463 priv->ieee->host_decrypt = 1;
6465 } else {
6466 priv->ieee->host_encrypt = 0;
6467 priv->ieee->host_encrypt_msdu = 0;
6468 priv->ieee->host_decrypt = 0;
6469 priv->ieee->host_mc_decrypt = 0;
6473 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6476 /* SIOCGIWENCODEEXT */
6477 static int ipw_wx_get_encodeext(struct net_device *dev,
6478 struct iw_request_info *info,
6479 union iwreq_data *wrqu, char *extra)
6481 struct ipw_priv *priv = ieee80211_priv(dev);
6482 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6485 /* SIOCSIWMLME */
6486 static int ipw_wx_set_mlme(struct net_device *dev,
6487 struct iw_request_info *info,
6488 union iwreq_data *wrqu, char *extra)
6490 struct ipw_priv *priv = ieee80211_priv(dev);
6491 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6492 u16 reason;
6494 reason = cpu_to_le16(mlme->reason_code);
6496 switch (mlme->cmd) {
6497 case IW_MLME_DEAUTH:
6498 // silently ignore
6499 break;
6501 case IW_MLME_DISASSOC:
6502 ipw_disassociate(priv);
6503 break;
6505 default:
6506 return -EOPNOTSUPP;
6508 return 0;
6511 #ifdef CONFIG_IPW_QOS
6513 /* QoS */
6515 * get the modulation type of the current network or
6516 * the card current mode
6518 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6520 u8 mode = 0;
6522 if (priv->status & STATUS_ASSOCIATED) {
6523 unsigned long flags;
6525 spin_lock_irqsave(&priv->ieee->lock, flags);
6526 mode = priv->assoc_network->mode;
6527 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6528 } else {
6529 mode = priv->ieee->mode;
6531 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6532 return mode;
6536 * Handle management frame beacon and probe response
6538 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6539 int active_network,
6540 struct ieee80211_network *network)
6542 u32 size = sizeof(struct ieee80211_qos_parameters);
6544 if (network->capability & WLAN_CAPABILITY_IBSS)
6545 network->qos_data.active = network->qos_data.supported;
6547 if (network->flags & NETWORK_HAS_QOS_MASK) {
6548 if (active_network &&
6549 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6550 network->qos_data.active = network->qos_data.supported;
6552 if ((network->qos_data.active == 1) && (active_network == 1) &&
6553 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6554 (network->qos_data.old_param_count !=
6555 network->qos_data.param_count)) {
6556 network->qos_data.old_param_count =
6557 network->qos_data.param_count;
6558 schedule_work(&priv->qos_activate);
6559 IPW_DEBUG_QOS("QoS parameters change call "
6560 "qos_activate\n");
6562 } else {
6563 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6564 memcpy(&network->qos_data.parameters,
6565 &def_parameters_CCK, size);
6566 else
6567 memcpy(&network->qos_data.parameters,
6568 &def_parameters_OFDM, size);
6570 if ((network->qos_data.active == 1) && (active_network == 1)) {
6571 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6572 schedule_work(&priv->qos_activate);
6575 network->qos_data.active = 0;
6576 network->qos_data.supported = 0;
6578 if ((priv->status & STATUS_ASSOCIATED) &&
6579 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6580 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6581 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6582 !(network->flags & NETWORK_EMPTY_ESSID))
6583 if ((network->ssid_len ==
6584 priv->assoc_network->ssid_len) &&
6585 !memcmp(network->ssid,
6586 priv->assoc_network->ssid,
6587 network->ssid_len)) {
6588 queue_work(priv->workqueue,
6589 &priv->merge_networks);
6593 return 0;
6597 * This function set up the firmware to support QoS. It sends
6598 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6600 static int ipw_qos_activate(struct ipw_priv *priv,
6601 struct ieee80211_qos_data *qos_network_data)
6603 int err;
6604 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6605 struct ieee80211_qos_parameters *active_one = NULL;
6606 u32 size = sizeof(struct ieee80211_qos_parameters);
6607 u32 burst_duration;
6608 int i;
6609 u8 type;
6611 type = ipw_qos_current_mode(priv);
6613 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6614 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6615 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6616 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6618 if (qos_network_data == NULL) {
6619 if (type == IEEE_B) {
6620 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6621 active_one = &def_parameters_CCK;
6622 } else
6623 active_one = &def_parameters_OFDM;
6625 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6626 burst_duration = ipw_qos_get_burst_duration(priv);
6627 for (i = 0; i < QOS_QUEUE_NUM; i++)
6628 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6629 (u16) burst_duration;
6630 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6631 if (type == IEEE_B) {
6632 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6633 type);
6634 if (priv->qos_data.qos_enable == 0)
6635 active_one = &def_parameters_CCK;
6636 else
6637 active_one = priv->qos_data.def_qos_parm_CCK;
6638 } else {
6639 if (priv->qos_data.qos_enable == 0)
6640 active_one = &def_parameters_OFDM;
6641 else
6642 active_one = priv->qos_data.def_qos_parm_OFDM;
6644 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6645 } else {
6646 unsigned long flags;
6647 int active;
6649 spin_lock_irqsave(&priv->ieee->lock, flags);
6650 active_one = &(qos_network_data->parameters);
6651 qos_network_data->old_param_count =
6652 qos_network_data->param_count;
6653 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6654 active = qos_network_data->supported;
6655 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6657 if (active == 0) {
6658 burst_duration = ipw_qos_get_burst_duration(priv);
6659 for (i = 0; i < QOS_QUEUE_NUM; i++)
6660 qos_parameters[QOS_PARAM_SET_ACTIVE].
6661 tx_op_limit[i] = (u16) burst_duration;
6665 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6666 err = ipw_send_qos_params_command(priv,
6667 (struct ieee80211_qos_parameters *)
6668 &(qos_parameters[0]));
6669 if (err)
6670 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6672 return err;
6676 * send IPW_CMD_WME_INFO to the firmware
6678 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6680 int ret = 0;
6681 struct ieee80211_qos_information_element qos_info;
6683 if (priv == NULL)
6684 return -1;
6686 qos_info.elementID = QOS_ELEMENT_ID;
6687 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6689 qos_info.version = QOS_VERSION_1;
6690 qos_info.ac_info = 0;
6692 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6693 qos_info.qui_type = QOS_OUI_TYPE;
6694 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6696 ret = ipw_send_qos_info_command(priv, &qos_info);
6697 if (ret != 0) {
6698 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6700 return ret;
6704 * Set the QoS parameter with the association request structure
6706 static int ipw_qos_association(struct ipw_priv *priv,
6707 struct ieee80211_network *network)
6709 int err = 0;
6710 struct ieee80211_qos_data *qos_data = NULL;
6711 struct ieee80211_qos_data ibss_data = {
6712 .supported = 1,
6713 .active = 1,
6716 switch (priv->ieee->iw_mode) {
6717 case IW_MODE_ADHOC:
6718 if (!(network->capability & WLAN_CAPABILITY_IBSS))
6719 BUG();
6721 qos_data = &ibss_data;
6722 break;
6724 case IW_MODE_INFRA:
6725 qos_data = &network->qos_data;
6726 break;
6728 default:
6729 BUG();
6730 break;
6733 err = ipw_qos_activate(priv, qos_data);
6734 if (err) {
6735 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6736 return err;
6739 if (priv->qos_data.qos_enable && qos_data->supported) {
6740 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6741 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6742 return ipw_qos_set_info_element(priv);
6745 return 0;
6749 * handling the beaconing responces. if we get different QoS setting
6750 * of the network from the the associated setting adjust the QoS
6751 * setting
6753 static int ipw_qos_association_resp(struct ipw_priv *priv,
6754 struct ieee80211_network *network)
6756 int ret = 0;
6757 unsigned long flags;
6758 u32 size = sizeof(struct ieee80211_qos_parameters);
6759 int set_qos_param = 0;
6761 if ((priv == NULL) || (network == NULL) ||
6762 (priv->assoc_network == NULL))
6763 return ret;
6765 if (!(priv->status & STATUS_ASSOCIATED))
6766 return ret;
6768 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
6769 return ret;
6771 spin_lock_irqsave(&priv->ieee->lock, flags);
6772 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
6773 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
6774 sizeof(struct ieee80211_qos_data));
6775 priv->assoc_network->qos_data.active = 1;
6776 if ((network->qos_data.old_param_count !=
6777 network->qos_data.param_count)) {
6778 set_qos_param = 1;
6779 network->qos_data.old_param_count =
6780 network->qos_data.param_count;
6783 } else {
6784 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
6785 memcpy(&priv->assoc_network->qos_data.parameters,
6786 &def_parameters_CCK, size);
6787 else
6788 memcpy(&priv->assoc_network->qos_data.parameters,
6789 &def_parameters_OFDM, size);
6790 priv->assoc_network->qos_data.active = 0;
6791 priv->assoc_network->qos_data.supported = 0;
6792 set_qos_param = 1;
6795 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6797 if (set_qos_param == 1)
6798 schedule_work(&priv->qos_activate);
6800 return ret;
6803 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
6805 u32 ret = 0;
6807 if ((priv == NULL))
6808 return 0;
6810 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
6811 ret = priv->qos_data.burst_duration_CCK;
6812 else
6813 ret = priv->qos_data.burst_duration_OFDM;
6815 return ret;
6819 * Initialize the setting of QoS global
6821 static void ipw_qos_init(struct ipw_priv *priv, int enable,
6822 int burst_enable, u32 burst_duration_CCK,
6823 u32 burst_duration_OFDM)
6825 priv->qos_data.qos_enable = enable;
6827 if (priv->qos_data.qos_enable) {
6828 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
6829 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
6830 IPW_DEBUG_QOS("QoS is enabled\n");
6831 } else {
6832 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
6833 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
6834 IPW_DEBUG_QOS("QoS is not enabled\n");
6837 priv->qos_data.burst_enable = burst_enable;
6839 if (burst_enable) {
6840 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
6841 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
6842 } else {
6843 priv->qos_data.burst_duration_CCK = 0;
6844 priv->qos_data.burst_duration_OFDM = 0;
6849 * map the packet priority to the right TX Queue
6851 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
6853 if (priority > 7 || !priv->qos_data.qos_enable)
6854 priority = 0;
6856 return from_priority_to_tx_queue[priority] - 1;
6860 * add QoS parameter to the TX command
6862 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
6863 u16 priority,
6864 struct tfd_data *tfd, u8 unicast)
6866 int ret = 0;
6867 int tx_queue_id = 0;
6868 struct ieee80211_qos_data *qos_data = NULL;
6869 int active, supported;
6870 unsigned long flags;
6872 if (!(priv->status & STATUS_ASSOCIATED))
6873 return 0;
6875 qos_data = &priv->assoc_network->qos_data;
6877 spin_lock_irqsave(&priv->ieee->lock, flags);
6879 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6880 if (unicast == 0)
6881 qos_data->active = 0;
6882 else
6883 qos_data->active = qos_data->supported;
6886 active = qos_data->active;
6887 supported = qos_data->supported;
6889 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6891 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
6892 "unicast %d\n",
6893 priv->qos_data.qos_enable, active, supported, unicast);
6894 if (active && priv->qos_data.qos_enable) {
6895 ret = from_priority_to_tx_queue[priority];
6896 tx_queue_id = ret - 1;
6897 IPW_DEBUG_QOS("QoS packet priority is %d \n", priority);
6898 if (priority <= 7) {
6899 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
6900 tfd->tfd.tfd_26.mchdr.qos_ctrl = priority;
6901 tfd->tfd.tfd_26.mchdr.frame_ctl |=
6902 IEEE80211_STYPE_QOS_DATA;
6904 if (priv->qos_data.qos_no_ack_mask &
6905 (1UL << tx_queue_id)) {
6906 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
6907 tfd->tfd.tfd_26.mchdr.qos_ctrl |=
6908 CTRL_QOS_NO_ACK;
6913 return ret;
6917 * background support to run QoS activate functionality
6919 static void ipw_bg_qos_activate(void *data)
6921 struct ipw_priv *priv = data;
6923 if (priv == NULL)
6924 return;
6926 mutex_lock(&priv->mutex);
6928 if (priv->status & STATUS_ASSOCIATED)
6929 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
6931 mutex_unlock(&priv->mutex);
6934 static int ipw_handle_probe_response(struct net_device *dev,
6935 struct ieee80211_probe_response *resp,
6936 struct ieee80211_network *network)
6938 struct ipw_priv *priv = ieee80211_priv(dev);
6939 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
6940 (network == priv->assoc_network));
6942 ipw_qos_handle_probe_response(priv, active_network, network);
6944 return 0;
6947 static int ipw_handle_beacon(struct net_device *dev,
6948 struct ieee80211_beacon *resp,
6949 struct ieee80211_network *network)
6951 struct ipw_priv *priv = ieee80211_priv(dev);
6952 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
6953 (network == priv->assoc_network));
6955 ipw_qos_handle_probe_response(priv, active_network, network);
6957 return 0;
6960 static int ipw_handle_assoc_response(struct net_device *dev,
6961 struct ieee80211_assoc_response *resp,
6962 struct ieee80211_network *network)
6964 struct ipw_priv *priv = ieee80211_priv(dev);
6965 ipw_qos_association_resp(priv, network);
6966 return 0;
6969 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
6970 *qos_param)
6972 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
6973 sizeof(*qos_param) * 3, qos_param);
6976 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
6977 *qos_param)
6979 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
6980 qos_param);
6983 #endif /* CONFIG_IPW_QOS */
6985 static int ipw_associate_network(struct ipw_priv *priv,
6986 struct ieee80211_network *network,
6987 struct ipw_supported_rates *rates, int roaming)
6989 int err;
6991 if (priv->config & CFG_FIXED_RATE)
6992 ipw_set_fixed_rate(priv, network->mode);
6994 if (!(priv->config & CFG_STATIC_ESSID)) {
6995 priv->essid_len = min(network->ssid_len,
6996 (u8) IW_ESSID_MAX_SIZE);
6997 memcpy(priv->essid, network->ssid, priv->essid_len);
7000 network->last_associate = jiffies;
7002 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7003 priv->assoc_request.channel = network->channel;
7004 priv->assoc_request.auth_key = 0;
7006 if ((priv->capability & CAP_PRIVACY_ON) &&
7007 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7008 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7009 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7011 if (priv->ieee->sec.level == SEC_LEVEL_1)
7012 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7014 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7015 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7016 priv->assoc_request.auth_type = AUTH_LEAP;
7017 else
7018 priv->assoc_request.auth_type = AUTH_OPEN;
7020 if (priv->ieee->wpa_ie_len) {
7021 priv->assoc_request.policy_support = 0x02; /* RSN active */
7022 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7023 priv->ieee->wpa_ie_len);
7027 * It is valid for our ieee device to support multiple modes, but
7028 * when it comes to associating to a given network we have to choose
7029 * just one mode.
7031 if (network->mode & priv->ieee->mode & IEEE_A)
7032 priv->assoc_request.ieee_mode = IPW_A_MODE;
7033 else if (network->mode & priv->ieee->mode & IEEE_G)
7034 priv->assoc_request.ieee_mode = IPW_G_MODE;
7035 else if (network->mode & priv->ieee->mode & IEEE_B)
7036 priv->assoc_request.ieee_mode = IPW_B_MODE;
7038 priv->assoc_request.capability = network->capability;
7039 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7040 && !(priv->config & CFG_PREAMBLE_LONG)) {
7041 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7042 } else {
7043 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7045 /* Clear the short preamble if we won't be supporting it */
7046 priv->assoc_request.capability &=
7047 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7050 /* Clear capability bits that aren't used in Ad Hoc */
7051 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7052 priv->assoc_request.capability &=
7053 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7055 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7056 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7057 roaming ? "Rea" : "A",
7058 escape_essid(priv->essid, priv->essid_len),
7059 network->channel,
7060 ipw_modes[priv->assoc_request.ieee_mode],
7061 rates->num_rates,
7062 (priv->assoc_request.preamble_length ==
7063 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7064 network->capability &
7065 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7066 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7067 priv->capability & CAP_PRIVACY_ON ?
7068 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7069 "(open)") : "",
7070 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7071 priv->capability & CAP_PRIVACY_ON ?
7072 '1' + priv->ieee->sec.active_key : '.',
7073 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7075 priv->assoc_request.beacon_interval = network->beacon_interval;
7076 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7077 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7078 priv->assoc_request.assoc_type = HC_IBSS_START;
7079 priv->assoc_request.assoc_tsf_msw = 0;
7080 priv->assoc_request.assoc_tsf_lsw = 0;
7081 } else {
7082 if (unlikely(roaming))
7083 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7084 else
7085 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7086 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7087 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7090 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7092 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7093 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7094 priv->assoc_request.atim_window = network->atim_window;
7095 } else {
7096 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7097 priv->assoc_request.atim_window = 0;
7100 priv->assoc_request.listen_interval = network->listen_interval;
7102 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7103 if (err) {
7104 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7105 return err;
7108 rates->ieee_mode = priv->assoc_request.ieee_mode;
7109 rates->purpose = IPW_RATE_CONNECT;
7110 ipw_send_supported_rates(priv, rates);
7112 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7113 priv->sys_config.dot11g_auto_detection = 1;
7114 else
7115 priv->sys_config.dot11g_auto_detection = 0;
7117 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7118 priv->sys_config.answer_broadcast_ssid_probe = 1;
7119 else
7120 priv->sys_config.answer_broadcast_ssid_probe = 0;
7122 err = ipw_send_system_config(priv, &priv->sys_config);
7123 if (err) {
7124 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7125 return err;
7128 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7129 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7130 if (err) {
7131 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7132 return err;
7136 * If preemption is enabled, it is possible for the association
7137 * to complete before we return from ipw_send_associate. Therefore
7138 * we have to be sure and update our priviate data first.
7140 priv->channel = network->channel;
7141 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7142 priv->status |= STATUS_ASSOCIATING;
7143 priv->status &= ~STATUS_SECURITY_UPDATED;
7145 priv->assoc_network = network;
7147 #ifdef CONFIG_IPW_QOS
7148 ipw_qos_association(priv, network);
7149 #endif
7151 err = ipw_send_associate(priv, &priv->assoc_request);
7152 if (err) {
7153 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7154 return err;
7157 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
7158 escape_essid(priv->essid, priv->essid_len),
7159 MAC_ARG(priv->bssid));
7161 return 0;
7164 static void ipw_roam(void *data)
7166 struct ipw_priv *priv = data;
7167 struct ieee80211_network *network = NULL;
7168 struct ipw_network_match match = {
7169 .network = priv->assoc_network
7172 /* The roaming process is as follows:
7174 * 1. Missed beacon threshold triggers the roaming process by
7175 * setting the status ROAM bit and requesting a scan.
7176 * 2. When the scan completes, it schedules the ROAM work
7177 * 3. The ROAM work looks at all of the known networks for one that
7178 * is a better network than the currently associated. If none
7179 * found, the ROAM process is over (ROAM bit cleared)
7180 * 4. If a better network is found, a disassociation request is
7181 * sent.
7182 * 5. When the disassociation completes, the roam work is again
7183 * scheduled. The second time through, the driver is no longer
7184 * associated, and the newly selected network is sent an
7185 * association request.
7186 * 6. At this point ,the roaming process is complete and the ROAM
7187 * status bit is cleared.
7190 /* If we are no longer associated, and the roaming bit is no longer
7191 * set, then we are not actively roaming, so just return */
7192 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7193 return;
7195 if (priv->status & STATUS_ASSOCIATED) {
7196 /* First pass through ROAM process -- look for a better
7197 * network */
7198 unsigned long flags;
7199 u8 rssi = priv->assoc_network->stats.rssi;
7200 priv->assoc_network->stats.rssi = -128;
7201 spin_lock_irqsave(&priv->ieee->lock, flags);
7202 list_for_each_entry(network, &priv->ieee->network_list, list) {
7203 if (network != priv->assoc_network)
7204 ipw_best_network(priv, &match, network, 1);
7206 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7207 priv->assoc_network->stats.rssi = rssi;
7209 if (match.network == priv->assoc_network) {
7210 IPW_DEBUG_ASSOC("No better APs in this network to "
7211 "roam to.\n");
7212 priv->status &= ~STATUS_ROAMING;
7213 ipw_debug_config(priv);
7214 return;
7217 ipw_send_disassociate(priv, 1);
7218 priv->assoc_network = match.network;
7220 return;
7223 /* Second pass through ROAM process -- request association */
7224 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7225 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7226 priv->status &= ~STATUS_ROAMING;
7229 static void ipw_bg_roam(void *data)
7231 struct ipw_priv *priv = data;
7232 mutex_lock(&priv->mutex);
7233 ipw_roam(data);
7234 mutex_unlock(&priv->mutex);
7237 static int ipw_associate(void *data)
7239 struct ipw_priv *priv = data;
7241 struct ieee80211_network *network = NULL;
7242 struct ipw_network_match match = {
7243 .network = NULL
7245 struct ipw_supported_rates *rates;
7246 struct list_head *element;
7247 unsigned long flags;
7249 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7250 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7251 return 0;
7254 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7255 IPW_DEBUG_ASSOC("Not attempting association (already in "
7256 "progress)\n");
7257 return 0;
7260 if (priv->status & STATUS_DISASSOCIATING) {
7261 IPW_DEBUG_ASSOC("Not attempting association (in "
7262 "disassociating)\n ");
7263 queue_work(priv->workqueue, &priv->associate);
7264 return 0;
7267 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7268 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7269 "initialized)\n");
7270 return 0;
7273 if (!(priv->config & CFG_ASSOCIATE) &&
7274 !(priv->config & (CFG_STATIC_ESSID |
7275 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7276 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7277 return 0;
7280 /* Protect our use of the network_list */
7281 spin_lock_irqsave(&priv->ieee->lock, flags);
7282 list_for_each_entry(network, &priv->ieee->network_list, list)
7283 ipw_best_network(priv, &match, network, 0);
7285 network = match.network;
7286 rates = &match.rates;
7288 if (network == NULL &&
7289 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7290 priv->config & CFG_ADHOC_CREATE &&
7291 priv->config & CFG_STATIC_ESSID &&
7292 priv->config & CFG_STATIC_CHANNEL &&
7293 !list_empty(&priv->ieee->network_free_list)) {
7294 element = priv->ieee->network_free_list.next;
7295 network = list_entry(element, struct ieee80211_network, list);
7296 ipw_adhoc_create(priv, network);
7297 rates = &priv->rates;
7298 list_del(element);
7299 list_add_tail(&network->list, &priv->ieee->network_list);
7301 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7303 /* If we reached the end of the list, then we don't have any valid
7304 * matching APs */
7305 if (!network) {
7306 ipw_debug_config(priv);
7308 if (!(priv->status & STATUS_SCANNING)) {
7309 if (!(priv->config & CFG_SPEED_SCAN))
7310 queue_delayed_work(priv->workqueue,
7311 &priv->request_scan,
7312 SCAN_INTERVAL);
7313 else
7314 queue_work(priv->workqueue,
7315 &priv->request_scan);
7318 return 0;
7321 ipw_associate_network(priv, network, rates, 0);
7323 return 1;
7326 static void ipw_bg_associate(void *data)
7328 struct ipw_priv *priv = data;
7329 mutex_lock(&priv->mutex);
7330 ipw_associate(data);
7331 mutex_unlock(&priv->mutex);
7334 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7335 struct sk_buff *skb)
7337 struct ieee80211_hdr *hdr;
7338 u16 fc;
7340 hdr = (struct ieee80211_hdr *)skb->data;
7341 fc = le16_to_cpu(hdr->frame_ctl);
7342 if (!(fc & IEEE80211_FCTL_PROTECTED))
7343 return;
7345 fc &= ~IEEE80211_FCTL_PROTECTED;
7346 hdr->frame_ctl = cpu_to_le16(fc);
7347 switch (priv->ieee->sec.level) {
7348 case SEC_LEVEL_3:
7349 /* Remove CCMP HDR */
7350 memmove(skb->data + IEEE80211_3ADDR_LEN,
7351 skb->data + IEEE80211_3ADDR_LEN + 8,
7352 skb->len - IEEE80211_3ADDR_LEN - 8);
7353 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7354 break;
7355 case SEC_LEVEL_2:
7356 break;
7357 case SEC_LEVEL_1:
7358 /* Remove IV */
7359 memmove(skb->data + IEEE80211_3ADDR_LEN,
7360 skb->data + IEEE80211_3ADDR_LEN + 4,
7361 skb->len - IEEE80211_3ADDR_LEN - 4);
7362 skb_trim(skb, skb->len - 8); /* IV + ICV */
7363 break;
7364 case SEC_LEVEL_0:
7365 break;
7366 default:
7367 printk(KERN_ERR "Unknow security level %d\n",
7368 priv->ieee->sec.level);
7369 break;
7373 static void ipw_handle_data_packet(struct ipw_priv *priv,
7374 struct ipw_rx_mem_buffer *rxb,
7375 struct ieee80211_rx_stats *stats)
7377 struct ieee80211_hdr_4addr *hdr;
7378 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7380 /* We received data from the HW, so stop the watchdog */
7381 priv->net_dev->trans_start = jiffies;
7383 /* We only process data packets if the
7384 * interface is open */
7385 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7386 skb_tailroom(rxb->skb))) {
7387 priv->ieee->stats.rx_errors++;
7388 priv->wstats.discard.misc++;
7389 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7390 return;
7391 } else if (unlikely(!netif_running(priv->net_dev))) {
7392 priv->ieee->stats.rx_dropped++;
7393 priv->wstats.discard.misc++;
7394 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7395 return;
7398 /* Advance skb->data to the start of the actual payload */
7399 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7401 /* Set the size of the skb to the size of the frame */
7402 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7404 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7406 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7407 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7408 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7409 (is_multicast_ether_addr(hdr->addr1) ?
7410 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7411 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7413 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7414 priv->ieee->stats.rx_errors++;
7415 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7416 rxb->skb = NULL;
7417 __ipw_led_activity_on(priv);
7421 #ifdef CONFIG_IEEE80211_RADIOTAP
7422 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7423 struct ipw_rx_mem_buffer *rxb,
7424 struct ieee80211_rx_stats *stats)
7426 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7427 struct ipw_rx_frame *frame = &pkt->u.frame;
7429 /* initial pull of some data */
7430 u16 received_channel = frame->received_channel;
7431 u8 antennaAndPhy = frame->antennaAndPhy;
7432 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7433 u16 pktrate = frame->rate;
7435 /* Magic struct that slots into the radiotap header -- no reason
7436 * to build this manually element by element, we can write it much
7437 * more efficiently than we can parse it. ORDER MATTERS HERE */
7438 struct ipw_rt_hdr {
7439 struct ieee80211_radiotap_header rt_hdr;
7440 u8 rt_flags; /* radiotap packet flags */
7441 u8 rt_rate; /* rate in 500kb/s */
7442 u16 rt_channel; /* channel in mhz */
7443 u16 rt_chbitmask; /* channel bitfield */
7444 s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
7445 u8 rt_antenna; /* antenna number */
7446 } *ipw_rt;
7448 short len = le16_to_cpu(pkt->u.frame.length);
7450 /* We received data from the HW, so stop the watchdog */
7451 priv->net_dev->trans_start = jiffies;
7453 /* We only process data packets if the
7454 * interface is open */
7455 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7456 skb_tailroom(rxb->skb))) {
7457 priv->ieee->stats.rx_errors++;
7458 priv->wstats.discard.misc++;
7459 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7460 return;
7461 } else if (unlikely(!netif_running(priv->net_dev))) {
7462 priv->ieee->stats.rx_dropped++;
7463 priv->wstats.discard.misc++;
7464 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7465 return;
7468 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7469 * that now */
7470 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7471 /* FIXME: Should alloc bigger skb instead */
7472 priv->ieee->stats.rx_dropped++;
7473 priv->wstats.discard.misc++;
7474 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7475 return;
7478 /* copy the frame itself */
7479 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7480 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7482 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7483 * part of our real header, saves a little time.
7485 * No longer necessary since we fill in all our data. Purge before merging
7486 * patch officially.
7487 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7488 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7491 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7493 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7494 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7495 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7497 /* Big bitfield of all the fields we provide in radiotap */
7498 ipw_rt->rt_hdr.it_present =
7499 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7500 (1 << IEEE80211_RADIOTAP_RATE) |
7501 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7502 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7503 (1 << IEEE80211_RADIOTAP_ANTENNA));
7505 /* Zero the flags, we'll add to them as we go */
7506 ipw_rt->rt_flags = 0;
7508 /* Convert signal to DBM */
7509 ipw_rt->rt_dbmsignal = antsignal;
7511 /* Convert the channel data and set the flags */
7512 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7513 if (received_channel > 14) { /* 802.11a */
7514 ipw_rt->rt_chbitmask =
7515 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7516 } else if (antennaAndPhy & 32) { /* 802.11b */
7517 ipw_rt->rt_chbitmask =
7518 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7519 } else { /* 802.11g */
7520 ipw_rt->rt_chbitmask =
7521 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7524 /* set the rate in multiples of 500k/s */
7525 switch (pktrate) {
7526 case IPW_TX_RATE_1MB:
7527 ipw_rt->rt_rate = 2;
7528 break;
7529 case IPW_TX_RATE_2MB:
7530 ipw_rt->rt_rate = 4;
7531 break;
7532 case IPW_TX_RATE_5MB:
7533 ipw_rt->rt_rate = 10;
7534 break;
7535 case IPW_TX_RATE_6MB:
7536 ipw_rt->rt_rate = 12;
7537 break;
7538 case IPW_TX_RATE_9MB:
7539 ipw_rt->rt_rate = 18;
7540 break;
7541 case IPW_TX_RATE_11MB:
7542 ipw_rt->rt_rate = 22;
7543 break;
7544 case IPW_TX_RATE_12MB:
7545 ipw_rt->rt_rate = 24;
7546 break;
7547 case IPW_TX_RATE_18MB:
7548 ipw_rt->rt_rate = 36;
7549 break;
7550 case IPW_TX_RATE_24MB:
7551 ipw_rt->rt_rate = 48;
7552 break;
7553 case IPW_TX_RATE_36MB:
7554 ipw_rt->rt_rate = 72;
7555 break;
7556 case IPW_TX_RATE_48MB:
7557 ipw_rt->rt_rate = 96;
7558 break;
7559 case IPW_TX_RATE_54MB:
7560 ipw_rt->rt_rate = 108;
7561 break;
7562 default:
7563 ipw_rt->rt_rate = 0;
7564 break;
7567 /* antenna number */
7568 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7570 /* set the preamble flag if we have it */
7571 if ((antennaAndPhy & 64))
7572 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7574 /* Set the size of the skb to the size of the frame */
7575 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7577 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7579 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7580 priv->ieee->stats.rx_errors++;
7581 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7582 rxb->skb = NULL;
7583 /* no LED during capture */
7586 #endif
7588 static int is_network_packet(struct ipw_priv *priv,
7589 struct ieee80211_hdr_4addr *header)
7591 /* Filter incoming packets to determine if they are targetted toward
7592 * this network, discarding packets coming from ourselves */
7593 switch (priv->ieee->iw_mode) {
7594 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
7595 /* packets from our adapter are dropped (echo) */
7596 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
7597 return 0;
7599 /* {broad,multi}cast packets to our BSSID go through */
7600 if (is_multicast_ether_addr(header->addr1))
7601 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
7603 /* packets to our adapter go through */
7604 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7605 ETH_ALEN);
7607 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
7608 /* packets from our adapter are dropped (echo) */
7609 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
7610 return 0;
7612 /* {broad,multi}cast packets to our BSS go through */
7613 if (is_multicast_ether_addr(header->addr1))
7614 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
7616 /* packets to our adapter go through */
7617 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7618 ETH_ALEN);
7621 return 1;
7624 #define IPW_PACKET_RETRY_TIME HZ
7626 static int is_duplicate_packet(struct ipw_priv *priv,
7627 struct ieee80211_hdr_4addr *header)
7629 u16 sc = le16_to_cpu(header->seq_ctl);
7630 u16 seq = WLAN_GET_SEQ_SEQ(sc);
7631 u16 frag = WLAN_GET_SEQ_FRAG(sc);
7632 u16 *last_seq, *last_frag;
7633 unsigned long *last_time;
7635 switch (priv->ieee->iw_mode) {
7636 case IW_MODE_ADHOC:
7638 struct list_head *p;
7639 struct ipw_ibss_seq *entry = NULL;
7640 u8 *mac = header->addr2;
7641 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
7643 __list_for_each(p, &priv->ibss_mac_hash[index]) {
7644 entry =
7645 list_entry(p, struct ipw_ibss_seq, list);
7646 if (!memcmp(entry->mac, mac, ETH_ALEN))
7647 break;
7649 if (p == &priv->ibss_mac_hash[index]) {
7650 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
7651 if (!entry) {
7652 IPW_ERROR
7653 ("Cannot malloc new mac entry\n");
7654 return 0;
7656 memcpy(entry->mac, mac, ETH_ALEN);
7657 entry->seq_num = seq;
7658 entry->frag_num = frag;
7659 entry->packet_time = jiffies;
7660 list_add(&entry->list,
7661 &priv->ibss_mac_hash[index]);
7662 return 0;
7664 last_seq = &entry->seq_num;
7665 last_frag = &entry->frag_num;
7666 last_time = &entry->packet_time;
7667 break;
7669 case IW_MODE_INFRA:
7670 last_seq = &priv->last_seq_num;
7671 last_frag = &priv->last_frag_num;
7672 last_time = &priv->last_packet_time;
7673 break;
7674 default:
7675 return 0;
7677 if ((*last_seq == seq) &&
7678 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
7679 if (*last_frag == frag)
7680 goto drop;
7681 if (*last_frag + 1 != frag)
7682 /* out-of-order fragment */
7683 goto drop;
7684 } else
7685 *last_seq = seq;
7687 *last_frag = frag;
7688 *last_time = jiffies;
7689 return 0;
7691 drop:
7692 /* Comment this line now since we observed the card receives
7693 * duplicate packets but the FCTL_RETRY bit is not set in the
7694 * IBSS mode with fragmentation enabled.
7695 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
7696 return 1;
7699 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
7700 struct ipw_rx_mem_buffer *rxb,
7701 struct ieee80211_rx_stats *stats)
7703 struct sk_buff *skb = rxb->skb;
7704 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
7705 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
7706 (skb->data + IPW_RX_FRAME_SIZE);
7708 ieee80211_rx_mgt(priv->ieee, header, stats);
7710 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
7711 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
7712 IEEE80211_STYPE_PROBE_RESP) ||
7713 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
7714 IEEE80211_STYPE_BEACON))) {
7715 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
7716 ipw_add_station(priv, header->addr2);
7719 if (priv->config & CFG_NET_STATS) {
7720 IPW_DEBUG_HC("sending stat packet\n");
7722 /* Set the size of the skb to the size of the full
7723 * ipw header and 802.11 frame */
7724 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
7725 IPW_RX_FRAME_SIZE);
7727 /* Advance past the ipw packet header to the 802.11 frame */
7728 skb_pull(skb, IPW_RX_FRAME_SIZE);
7730 /* Push the ieee80211_rx_stats before the 802.11 frame */
7731 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
7733 skb->dev = priv->ieee->dev;
7735 /* Point raw at the ieee80211_stats */
7736 skb->mac.raw = skb->data;
7738 skb->pkt_type = PACKET_OTHERHOST;
7739 skb->protocol = __constant_htons(ETH_P_80211_STATS);
7740 memset(skb->cb, 0, sizeof(rxb->skb->cb));
7741 netif_rx(skb);
7742 rxb->skb = NULL;
7747 * Main entry function for recieving a packet with 80211 headers. This
7748 * should be called when ever the FW has notified us that there is a new
7749 * skb in the recieve queue.
7751 static void ipw_rx(struct ipw_priv *priv)
7753 struct ipw_rx_mem_buffer *rxb;
7754 struct ipw_rx_packet *pkt;
7755 struct ieee80211_hdr_4addr *header;
7756 u32 r, w, i;
7757 u8 network_packet;
7759 r = ipw_read32(priv, IPW_RX_READ_INDEX);
7760 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
7761 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
7763 while (i != r) {
7764 rxb = priv->rxq->queue[i];
7765 if (unlikely(rxb == NULL)) {
7766 printk(KERN_CRIT "Queue not allocated!\n");
7767 break;
7769 priv->rxq->queue[i] = NULL;
7771 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
7772 IPW_RX_BUF_SIZE,
7773 PCI_DMA_FROMDEVICE);
7775 pkt = (struct ipw_rx_packet *)rxb->skb->data;
7776 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
7777 pkt->header.message_type,
7778 pkt->header.rx_seq_num, pkt->header.control_bits);
7780 switch (pkt->header.message_type) {
7781 case RX_FRAME_TYPE: /* 802.11 frame */ {
7782 struct ieee80211_rx_stats stats = {
7783 .rssi =
7784 le16_to_cpu(pkt->u.frame.rssi_dbm) -
7785 IPW_RSSI_TO_DBM,
7786 .signal =
7787 le16_to_cpu(pkt->u.frame.rssi_dbm) -
7788 IPW_RSSI_TO_DBM + 0x100,
7789 .noise =
7790 le16_to_cpu(pkt->u.frame.noise),
7791 .rate = pkt->u.frame.rate,
7792 .mac_time = jiffies,
7793 .received_channel =
7794 pkt->u.frame.received_channel,
7795 .freq =
7796 (pkt->u.frame.
7797 control & (1 << 0)) ?
7798 IEEE80211_24GHZ_BAND :
7799 IEEE80211_52GHZ_BAND,
7800 .len = le16_to_cpu(pkt->u.frame.length),
7803 if (stats.rssi != 0)
7804 stats.mask |= IEEE80211_STATMASK_RSSI;
7805 if (stats.signal != 0)
7806 stats.mask |= IEEE80211_STATMASK_SIGNAL;
7807 if (stats.noise != 0)
7808 stats.mask |= IEEE80211_STATMASK_NOISE;
7809 if (stats.rate != 0)
7810 stats.mask |= IEEE80211_STATMASK_RATE;
7812 priv->rx_packets++;
7814 #ifdef CONFIG_IPW2200_MONITOR
7815 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7816 #ifdef CONFIG_IEEE80211_RADIOTAP
7817 ipw_handle_data_packet_monitor(priv,
7818 rxb,
7819 &stats);
7820 #else
7821 ipw_handle_data_packet(priv, rxb,
7822 &stats);
7823 #endif
7824 break;
7826 #endif
7828 header =
7829 (struct ieee80211_hdr_4addr *)(rxb->skb->
7830 data +
7831 IPW_RX_FRAME_SIZE);
7832 /* TODO: Check Ad-Hoc dest/source and make sure
7833 * that we are actually parsing these packets
7834 * correctly -- we should probably use the
7835 * frame control of the packet and disregard
7836 * the current iw_mode */
7838 network_packet =
7839 is_network_packet(priv, header);
7840 if (network_packet && priv->assoc_network) {
7841 priv->assoc_network->stats.rssi =
7842 stats.rssi;
7843 average_add(&priv->average_rssi,
7844 stats.rssi);
7845 priv->last_rx_rssi = stats.rssi;
7848 IPW_DEBUG_RX("Frame: len=%u\n",
7849 le16_to_cpu(pkt->u.frame.length));
7851 if (le16_to_cpu(pkt->u.frame.length) <
7852 ieee80211_get_hdrlen(le16_to_cpu(
7853 header->frame_ctl))) {
7854 IPW_DEBUG_DROP
7855 ("Received packet is too small. "
7856 "Dropping.\n");
7857 priv->ieee->stats.rx_errors++;
7858 priv->wstats.discard.misc++;
7859 break;
7862 switch (WLAN_FC_GET_TYPE
7863 (le16_to_cpu(header->frame_ctl))) {
7865 case IEEE80211_FTYPE_MGMT:
7866 ipw_handle_mgmt_packet(priv, rxb,
7867 &stats);
7868 break;
7870 case IEEE80211_FTYPE_CTL:
7871 break;
7873 case IEEE80211_FTYPE_DATA:
7874 if (unlikely(!network_packet ||
7875 is_duplicate_packet(priv,
7876 header)))
7878 IPW_DEBUG_DROP("Dropping: "
7879 MAC_FMT ", "
7880 MAC_FMT ", "
7881 MAC_FMT "\n",
7882 MAC_ARG(header->
7883 addr1),
7884 MAC_ARG(header->
7885 addr2),
7886 MAC_ARG(header->
7887 addr3));
7888 break;
7891 ipw_handle_data_packet(priv, rxb,
7892 &stats);
7894 break;
7896 break;
7899 case RX_HOST_NOTIFICATION_TYPE:{
7900 IPW_DEBUG_RX
7901 ("Notification: subtype=%02X flags=%02X size=%d\n",
7902 pkt->u.notification.subtype,
7903 pkt->u.notification.flags,
7904 pkt->u.notification.size);
7905 ipw_rx_notification(priv, &pkt->u.notification);
7906 break;
7909 default:
7910 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
7911 pkt->header.message_type);
7912 break;
7915 /* For now we just don't re-use anything. We can tweak this
7916 * later to try and re-use notification packets and SKBs that
7917 * fail to Rx correctly */
7918 if (rxb->skb != NULL) {
7919 dev_kfree_skb_any(rxb->skb);
7920 rxb->skb = NULL;
7923 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
7924 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
7925 list_add_tail(&rxb->list, &priv->rxq->rx_used);
7927 i = (i + 1) % RX_QUEUE_SIZE;
7930 /* Backtrack one entry */
7931 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
7933 ipw_rx_queue_restock(priv);
7936 #define DEFAULT_RTS_THRESHOLD 2304U
7937 #define MIN_RTS_THRESHOLD 1U
7938 #define MAX_RTS_THRESHOLD 2304U
7939 #define DEFAULT_BEACON_INTERVAL 100U
7940 #define DEFAULT_SHORT_RETRY_LIMIT 7U
7941 #define DEFAULT_LONG_RETRY_LIMIT 4U
7944 * ipw_sw_reset
7945 * @option: options to control different reset behaviour
7946 * 0 = reset everything except the 'disable' module_param
7947 * 1 = reset everything and print out driver info (for probe only)
7948 * 2 = reset everything
7950 static int ipw_sw_reset(struct ipw_priv *priv, int option)
7952 int band, modulation;
7953 int old_mode = priv->ieee->iw_mode;
7955 /* Initialize module parameter values here */
7956 priv->config = 0;
7958 /* We default to disabling the LED code as right now it causes
7959 * too many systems to lock up... */
7960 if (!led)
7961 priv->config |= CFG_NO_LED;
7963 if (associate)
7964 priv->config |= CFG_ASSOCIATE;
7965 else
7966 IPW_DEBUG_INFO("Auto associate disabled.\n");
7968 if (auto_create)
7969 priv->config |= CFG_ADHOC_CREATE;
7970 else
7971 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
7973 priv->config &= ~CFG_STATIC_ESSID;
7974 priv->essid_len = 0;
7975 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7977 if (disable && option) {
7978 priv->status |= STATUS_RF_KILL_SW;
7979 IPW_DEBUG_INFO("Radio disabled.\n");
7982 if (channel != 0) {
7983 priv->config |= CFG_STATIC_CHANNEL;
7984 priv->channel = channel;
7985 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7986 /* TODO: Validate that provided channel is in range */
7988 #ifdef CONFIG_IPW_QOS
7989 ipw_qos_init(priv, qos_enable, qos_burst_enable,
7990 burst_duration_CCK, burst_duration_OFDM);
7991 #endif /* CONFIG_IPW_QOS */
7993 switch (mode) {
7994 case 1:
7995 priv->ieee->iw_mode = IW_MODE_ADHOC;
7996 priv->net_dev->type = ARPHRD_ETHER;
7998 break;
7999 #ifdef CONFIG_IPW2200_MONITOR
8000 case 2:
8001 priv->ieee->iw_mode = IW_MODE_MONITOR;
8002 #ifdef CONFIG_IEEE80211_RADIOTAP
8003 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8004 #else
8005 priv->net_dev->type = ARPHRD_IEEE80211;
8006 #endif
8007 break;
8008 #endif
8009 default:
8010 case 0:
8011 priv->net_dev->type = ARPHRD_ETHER;
8012 priv->ieee->iw_mode = IW_MODE_INFRA;
8013 break;
8016 if (hwcrypto) {
8017 priv->ieee->host_encrypt = 0;
8018 priv->ieee->host_encrypt_msdu = 0;
8019 priv->ieee->host_decrypt = 0;
8020 priv->ieee->host_mc_decrypt = 0;
8022 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8024 /* IPW2200/2915 is abled to do hardware fragmentation. */
8025 priv->ieee->host_open_frag = 0;
8027 if ((priv->pci_dev->device == 0x4223) ||
8028 (priv->pci_dev->device == 0x4224)) {
8029 if (option == 1)
8030 printk(KERN_INFO DRV_NAME
8031 ": Detected Intel PRO/Wireless 2915ABG Network "
8032 "Connection\n");
8033 priv->ieee->abg_true = 1;
8034 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8035 modulation = IEEE80211_OFDM_MODULATION |
8036 IEEE80211_CCK_MODULATION;
8037 priv->adapter = IPW_2915ABG;
8038 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8039 } else {
8040 if (option == 1)
8041 printk(KERN_INFO DRV_NAME
8042 ": Detected Intel PRO/Wireless 2200BG Network "
8043 "Connection\n");
8045 priv->ieee->abg_true = 0;
8046 band = IEEE80211_24GHZ_BAND;
8047 modulation = IEEE80211_OFDM_MODULATION |
8048 IEEE80211_CCK_MODULATION;
8049 priv->adapter = IPW_2200BG;
8050 priv->ieee->mode = IEEE_G | IEEE_B;
8053 priv->ieee->freq_band = band;
8054 priv->ieee->modulation = modulation;
8056 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8058 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8059 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8061 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8062 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8063 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8065 /* If power management is turned on, default to AC mode */
8066 priv->power_mode = IPW_POWER_AC;
8067 priv->tx_power = IPW_TX_POWER_DEFAULT;
8069 return old_mode == priv->ieee->iw_mode;
8073 * This file defines the Wireless Extension handlers. It does not
8074 * define any methods of hardware manipulation and relies on the
8075 * functions defined in ipw_main to provide the HW interaction.
8077 * The exception to this is the use of the ipw_get_ordinal()
8078 * function used to poll the hardware vs. making unecessary calls.
8082 static int ipw_wx_get_name(struct net_device *dev,
8083 struct iw_request_info *info,
8084 union iwreq_data *wrqu, char *extra)
8086 struct ipw_priv *priv = ieee80211_priv(dev);
8087 mutex_lock(&priv->mutex);
8088 if (priv->status & STATUS_RF_KILL_MASK)
8089 strcpy(wrqu->name, "radio off");
8090 else if (!(priv->status & STATUS_ASSOCIATED))
8091 strcpy(wrqu->name, "unassociated");
8092 else
8093 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8094 ipw_modes[priv->assoc_request.ieee_mode]);
8095 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8096 mutex_unlock(&priv->mutex);
8097 return 0;
8100 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8102 if (channel == 0) {
8103 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8104 priv->config &= ~CFG_STATIC_CHANNEL;
8105 IPW_DEBUG_ASSOC("Attempting to associate with new "
8106 "parameters.\n");
8107 ipw_associate(priv);
8108 return 0;
8111 priv->config |= CFG_STATIC_CHANNEL;
8113 if (priv->channel == channel) {
8114 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8115 channel);
8116 return 0;
8119 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8120 priv->channel = channel;
8122 #ifdef CONFIG_IPW2200_MONITOR
8123 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8124 int i;
8125 if (priv->status & STATUS_SCANNING) {
8126 IPW_DEBUG_SCAN("Scan abort triggered due to "
8127 "channel change.\n");
8128 ipw_abort_scan(priv);
8131 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8132 udelay(10);
8134 if (priv->status & STATUS_SCANNING)
8135 IPW_DEBUG_SCAN("Still scanning...\n");
8136 else
8137 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8138 1000 - i);
8140 return 0;
8142 #endif /* CONFIG_IPW2200_MONITOR */
8144 /* Network configuration changed -- force [re]association */
8145 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8146 if (!ipw_disassociate(priv))
8147 ipw_associate(priv);
8149 return 0;
8152 static int ipw_wx_set_freq(struct net_device *dev,
8153 struct iw_request_info *info,
8154 union iwreq_data *wrqu, char *extra)
8156 struct ipw_priv *priv = ieee80211_priv(dev);
8157 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8158 struct iw_freq *fwrq = &wrqu->freq;
8159 int ret = 0, i;
8160 u8 channel, flags;
8161 int band;
8163 if (fwrq->m == 0) {
8164 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8165 mutex_lock(&priv->mutex);
8166 ret = ipw_set_channel(priv, 0);
8167 mutex_unlock(&priv->mutex);
8168 return ret;
8170 /* if setting by freq convert to channel */
8171 if (fwrq->e == 1) {
8172 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8173 if (channel == 0)
8174 return -EINVAL;
8175 } else
8176 channel = fwrq->m;
8178 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8179 return -EINVAL;
8181 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8182 i = ieee80211_channel_to_index(priv->ieee, channel);
8183 if (i == -1)
8184 return -EINVAL;
8186 flags = (band == IEEE80211_24GHZ_BAND) ?
8187 geo->bg[i].flags : geo->a[i].flags;
8188 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8189 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8190 return -EINVAL;
8194 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8195 mutex_lock(&priv->mutex);
8196 ret = ipw_set_channel(priv, channel);
8197 mutex_unlock(&priv->mutex);
8198 return ret;
8201 static int ipw_wx_get_freq(struct net_device *dev,
8202 struct iw_request_info *info,
8203 union iwreq_data *wrqu, char *extra)
8205 struct ipw_priv *priv = ieee80211_priv(dev);
8207 wrqu->freq.e = 0;
8209 /* If we are associated, trying to associate, or have a statically
8210 * configured CHANNEL then return that; otherwise return ANY */
8211 mutex_lock(&priv->mutex);
8212 if (priv->config & CFG_STATIC_CHANNEL ||
8213 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
8214 wrqu->freq.m = priv->channel;
8215 else
8216 wrqu->freq.m = 0;
8218 mutex_unlock(&priv->mutex);
8219 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8220 return 0;
8223 static int ipw_wx_set_mode(struct net_device *dev,
8224 struct iw_request_info *info,
8225 union iwreq_data *wrqu, char *extra)
8227 struct ipw_priv *priv = ieee80211_priv(dev);
8228 int err = 0;
8230 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8232 switch (wrqu->mode) {
8233 #ifdef CONFIG_IPW2200_MONITOR
8234 case IW_MODE_MONITOR:
8235 #endif
8236 case IW_MODE_ADHOC:
8237 case IW_MODE_INFRA:
8238 break;
8239 case IW_MODE_AUTO:
8240 wrqu->mode = IW_MODE_INFRA;
8241 break;
8242 default:
8243 return -EINVAL;
8245 if (wrqu->mode == priv->ieee->iw_mode)
8246 return 0;
8248 mutex_lock(&priv->mutex);
8250 ipw_sw_reset(priv, 0);
8252 #ifdef CONFIG_IPW2200_MONITOR
8253 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8254 priv->net_dev->type = ARPHRD_ETHER;
8256 if (wrqu->mode == IW_MODE_MONITOR)
8257 #ifdef CONFIG_IEEE80211_RADIOTAP
8258 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8259 #else
8260 priv->net_dev->type = ARPHRD_IEEE80211;
8261 #endif
8262 #endif /* CONFIG_IPW2200_MONITOR */
8264 /* Free the existing firmware and reset the fw_loaded
8265 * flag so ipw_load() will bring in the new firmawre */
8266 free_firmware();
8268 priv->ieee->iw_mode = wrqu->mode;
8270 queue_work(priv->workqueue, &priv->adapter_restart);
8271 mutex_unlock(&priv->mutex);
8272 return err;
8275 static int ipw_wx_get_mode(struct net_device *dev,
8276 struct iw_request_info *info,
8277 union iwreq_data *wrqu, char *extra)
8279 struct ipw_priv *priv = ieee80211_priv(dev);
8280 mutex_lock(&priv->mutex);
8281 wrqu->mode = priv->ieee->iw_mode;
8282 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8283 mutex_unlock(&priv->mutex);
8284 return 0;
8287 /* Values are in microsecond */
8288 static const s32 timeout_duration[] = {
8289 350000,
8290 250000,
8291 75000,
8292 37000,
8293 25000,
8296 static const s32 period_duration[] = {
8297 400000,
8298 700000,
8299 1000000,
8300 1000000,
8301 1000000
8304 static int ipw_wx_get_range(struct net_device *dev,
8305 struct iw_request_info *info,
8306 union iwreq_data *wrqu, char *extra)
8308 struct ipw_priv *priv = ieee80211_priv(dev);
8309 struct iw_range *range = (struct iw_range *)extra;
8310 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8311 int i = 0, j;
8313 wrqu->data.length = sizeof(*range);
8314 memset(range, 0, sizeof(*range));
8316 /* 54Mbs == ~27 Mb/s real (802.11g) */
8317 range->throughput = 27 * 1000 * 1000;
8319 range->max_qual.qual = 100;
8320 /* TODO: Find real max RSSI and stick here */
8321 range->max_qual.level = 0;
8322 range->max_qual.noise = 0;
8323 range->max_qual.updated = 7; /* Updated all three */
8325 range->avg_qual.qual = 70;
8326 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8327 range->avg_qual.level = 0; /* FIXME to real average level */
8328 range->avg_qual.noise = 0;
8329 range->avg_qual.updated = 7; /* Updated all three */
8330 mutex_lock(&priv->mutex);
8331 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8333 for (i = 0; i < range->num_bitrates; i++)
8334 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8335 500000;
8337 range->max_rts = DEFAULT_RTS_THRESHOLD;
8338 range->min_frag = MIN_FRAG_THRESHOLD;
8339 range->max_frag = MAX_FRAG_THRESHOLD;
8341 range->encoding_size[0] = 5;
8342 range->encoding_size[1] = 13;
8343 range->num_encoding_sizes = 2;
8344 range->max_encoding_tokens = WEP_KEYS;
8346 /* Set the Wireless Extension versions */
8347 range->we_version_compiled = WIRELESS_EXT;
8348 range->we_version_source = 18;
8350 i = 0;
8351 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8352 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8353 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8354 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8355 continue;
8357 range->freq[i].i = geo->bg[j].channel;
8358 range->freq[i].m = geo->bg[j].freq * 100000;
8359 range->freq[i].e = 1;
8360 i++;
8364 if (priv->ieee->mode & IEEE_A) {
8365 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8366 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8367 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8368 continue;
8370 range->freq[i].i = geo->a[j].channel;
8371 range->freq[i].m = geo->a[j].freq * 100000;
8372 range->freq[i].e = 1;
8373 i++;
8377 range->num_channels = i;
8378 range->num_frequency = i;
8380 mutex_unlock(&priv->mutex);
8382 /* Event capability (kernel + driver) */
8383 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8384 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8385 IW_EVENT_CAPA_MASK(SIOCGIWAP));
8386 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8388 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8389 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8391 IPW_DEBUG_WX("GET Range\n");
8392 return 0;
8395 static int ipw_wx_set_wap(struct net_device *dev,
8396 struct iw_request_info *info,
8397 union iwreq_data *wrqu, char *extra)
8399 struct ipw_priv *priv = ieee80211_priv(dev);
8401 static const unsigned char any[] = {
8402 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8404 static const unsigned char off[] = {
8405 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8408 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8409 return -EINVAL;
8410 mutex_lock(&priv->mutex);
8411 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8412 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8413 /* we disable mandatory BSSID association */
8414 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8415 priv->config &= ~CFG_STATIC_BSSID;
8416 IPW_DEBUG_ASSOC("Attempting to associate with new "
8417 "parameters.\n");
8418 ipw_associate(priv);
8419 mutex_unlock(&priv->mutex);
8420 return 0;
8423 priv->config |= CFG_STATIC_BSSID;
8424 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8425 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8426 mutex_unlock(&priv->mutex);
8427 return 0;
8430 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
8431 MAC_ARG(wrqu->ap_addr.sa_data));
8433 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8435 /* Network configuration changed -- force [re]association */
8436 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8437 if (!ipw_disassociate(priv))
8438 ipw_associate(priv);
8440 mutex_unlock(&priv->mutex);
8441 return 0;
8444 static int ipw_wx_get_wap(struct net_device *dev,
8445 struct iw_request_info *info,
8446 union iwreq_data *wrqu, char *extra)
8448 struct ipw_priv *priv = ieee80211_priv(dev);
8449 /* If we are associated, trying to associate, or have a statically
8450 * configured BSSID then return that; otherwise return ANY */
8451 mutex_lock(&priv->mutex);
8452 if (priv->config & CFG_STATIC_BSSID ||
8453 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8454 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8455 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8456 } else
8457 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8459 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8460 MAC_ARG(wrqu->ap_addr.sa_data));
8461 mutex_unlock(&priv->mutex);
8462 return 0;
8465 static int ipw_wx_set_essid(struct net_device *dev,
8466 struct iw_request_info *info,
8467 union iwreq_data *wrqu, char *extra)
8469 struct ipw_priv *priv = ieee80211_priv(dev);
8470 char *essid = ""; /* ANY */
8471 int length = 0;
8472 mutex_lock(&priv->mutex);
8473 if (wrqu->essid.flags && wrqu->essid.length) {
8474 length = wrqu->essid.length - 1;
8475 essid = extra;
8477 if (length == 0) {
8478 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8479 if ((priv->config & CFG_STATIC_ESSID) &&
8480 !(priv->status & (STATUS_ASSOCIATED |
8481 STATUS_ASSOCIATING))) {
8482 IPW_DEBUG_ASSOC("Attempting to associate with new "
8483 "parameters.\n");
8484 priv->config &= ~CFG_STATIC_ESSID;
8485 ipw_associate(priv);
8487 mutex_unlock(&priv->mutex);
8488 return 0;
8491 length = min(length, IW_ESSID_MAX_SIZE);
8493 priv->config |= CFG_STATIC_ESSID;
8495 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
8496 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8497 mutex_unlock(&priv->mutex);
8498 return 0;
8501 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
8502 length);
8504 priv->essid_len = length;
8505 memcpy(priv->essid, essid, priv->essid_len);
8507 /* Network configuration changed -- force [re]association */
8508 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8509 if (!ipw_disassociate(priv))
8510 ipw_associate(priv);
8512 mutex_unlock(&priv->mutex);
8513 return 0;
8516 static int ipw_wx_get_essid(struct net_device *dev,
8517 struct iw_request_info *info,
8518 union iwreq_data *wrqu, char *extra)
8520 struct ipw_priv *priv = ieee80211_priv(dev);
8522 /* If we are associated, trying to associate, or have a statically
8523 * configured ESSID then return that; otherwise return ANY */
8524 mutex_lock(&priv->mutex);
8525 if (priv->config & CFG_STATIC_ESSID ||
8526 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8527 IPW_DEBUG_WX("Getting essid: '%s'\n",
8528 escape_essid(priv->essid, priv->essid_len));
8529 memcpy(extra, priv->essid, priv->essid_len);
8530 wrqu->essid.length = priv->essid_len;
8531 wrqu->essid.flags = 1; /* active */
8532 } else {
8533 IPW_DEBUG_WX("Getting essid: ANY\n");
8534 wrqu->essid.length = 0;
8535 wrqu->essid.flags = 0; /* active */
8537 mutex_unlock(&priv->mutex);
8538 return 0;
8541 static int ipw_wx_set_nick(struct net_device *dev,
8542 struct iw_request_info *info,
8543 union iwreq_data *wrqu, char *extra)
8545 struct ipw_priv *priv = ieee80211_priv(dev);
8547 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
8548 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
8549 return -E2BIG;
8550 mutex_lock(&priv->mutex);
8551 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
8552 memset(priv->nick, 0, sizeof(priv->nick));
8553 memcpy(priv->nick, extra, wrqu->data.length);
8554 IPW_DEBUG_TRACE("<<\n");
8555 mutex_unlock(&priv->mutex);
8556 return 0;
8560 static int ipw_wx_get_nick(struct net_device *dev,
8561 struct iw_request_info *info,
8562 union iwreq_data *wrqu, char *extra)
8564 struct ipw_priv *priv = ieee80211_priv(dev);
8565 IPW_DEBUG_WX("Getting nick\n");
8566 mutex_lock(&priv->mutex);
8567 wrqu->data.length = strlen(priv->nick) + 1;
8568 memcpy(extra, priv->nick, wrqu->data.length);
8569 wrqu->data.flags = 1; /* active */
8570 mutex_unlock(&priv->mutex);
8571 return 0;
8574 static int ipw_wx_set_sens(struct net_device *dev,
8575 struct iw_request_info *info,
8576 union iwreq_data *wrqu, char *extra)
8578 struct ipw_priv *priv = ieee80211_priv(dev);
8579 int err = 0;
8581 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
8582 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
8583 mutex_lock(&priv->mutex);
8585 if (wrqu->sens.fixed == 0)
8587 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8588 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8589 goto out;
8591 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
8592 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
8593 err = -EINVAL;
8594 goto out;
8597 priv->roaming_threshold = wrqu->sens.value;
8598 priv->disassociate_threshold = 3*wrqu->sens.value;
8599 out:
8600 mutex_unlock(&priv->mutex);
8601 return err;
8604 static int ipw_wx_get_sens(struct net_device *dev,
8605 struct iw_request_info *info,
8606 union iwreq_data *wrqu, char *extra)
8608 struct ipw_priv *priv = ieee80211_priv(dev);
8609 mutex_lock(&priv->mutex);
8610 wrqu->sens.fixed = 1;
8611 wrqu->sens.value = priv->roaming_threshold;
8612 mutex_unlock(&priv->mutex);
8614 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
8615 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
8617 return 0;
8620 static int ipw_wx_set_rate(struct net_device *dev,
8621 struct iw_request_info *info,
8622 union iwreq_data *wrqu, char *extra)
8624 /* TODO: We should use semaphores or locks for access to priv */
8625 struct ipw_priv *priv = ieee80211_priv(dev);
8626 u32 target_rate = wrqu->bitrate.value;
8627 u32 fixed, mask;
8629 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
8630 /* value = X, fixed = 1 means only rate X */
8631 /* value = X, fixed = 0 means all rates lower equal X */
8633 if (target_rate == -1) {
8634 fixed = 0;
8635 mask = IEEE80211_DEFAULT_RATES_MASK;
8636 /* Now we should reassociate */
8637 goto apply;
8640 mask = 0;
8641 fixed = wrqu->bitrate.fixed;
8643 if (target_rate == 1000000 || !fixed)
8644 mask |= IEEE80211_CCK_RATE_1MB_MASK;
8645 if (target_rate == 1000000)
8646 goto apply;
8648 if (target_rate == 2000000 || !fixed)
8649 mask |= IEEE80211_CCK_RATE_2MB_MASK;
8650 if (target_rate == 2000000)
8651 goto apply;
8653 if (target_rate == 5500000 || !fixed)
8654 mask |= IEEE80211_CCK_RATE_5MB_MASK;
8655 if (target_rate == 5500000)
8656 goto apply;
8658 if (target_rate == 6000000 || !fixed)
8659 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
8660 if (target_rate == 6000000)
8661 goto apply;
8663 if (target_rate == 9000000 || !fixed)
8664 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
8665 if (target_rate == 9000000)
8666 goto apply;
8668 if (target_rate == 11000000 || !fixed)
8669 mask |= IEEE80211_CCK_RATE_11MB_MASK;
8670 if (target_rate == 11000000)
8671 goto apply;
8673 if (target_rate == 12000000 || !fixed)
8674 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
8675 if (target_rate == 12000000)
8676 goto apply;
8678 if (target_rate == 18000000 || !fixed)
8679 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
8680 if (target_rate == 18000000)
8681 goto apply;
8683 if (target_rate == 24000000 || !fixed)
8684 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
8685 if (target_rate == 24000000)
8686 goto apply;
8688 if (target_rate == 36000000 || !fixed)
8689 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
8690 if (target_rate == 36000000)
8691 goto apply;
8693 if (target_rate == 48000000 || !fixed)
8694 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
8695 if (target_rate == 48000000)
8696 goto apply;
8698 if (target_rate == 54000000 || !fixed)
8699 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
8700 if (target_rate == 54000000)
8701 goto apply;
8703 IPW_DEBUG_WX("invalid rate specified, returning error\n");
8704 return -EINVAL;
8706 apply:
8707 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
8708 mask, fixed ? "fixed" : "sub-rates");
8709 mutex_lock(&priv->mutex);
8710 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
8711 priv->config &= ~CFG_FIXED_RATE;
8712 ipw_set_fixed_rate(priv, priv->ieee->mode);
8713 } else
8714 priv->config |= CFG_FIXED_RATE;
8716 if (priv->rates_mask == mask) {
8717 IPW_DEBUG_WX("Mask set to current mask.\n");
8718 mutex_unlock(&priv->mutex);
8719 return 0;
8722 priv->rates_mask = mask;
8724 /* Network configuration changed -- force [re]association */
8725 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
8726 if (!ipw_disassociate(priv))
8727 ipw_associate(priv);
8729 mutex_unlock(&priv->mutex);
8730 return 0;
8733 static int ipw_wx_get_rate(struct net_device *dev,
8734 struct iw_request_info *info,
8735 union iwreq_data *wrqu, char *extra)
8737 struct ipw_priv *priv = ieee80211_priv(dev);
8738 mutex_lock(&priv->mutex);
8739 wrqu->bitrate.value = priv->last_rate;
8740 mutex_unlock(&priv->mutex);
8741 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
8742 return 0;
8745 static int ipw_wx_set_rts(struct net_device *dev,
8746 struct iw_request_info *info,
8747 union iwreq_data *wrqu, char *extra)
8749 struct ipw_priv *priv = ieee80211_priv(dev);
8750 mutex_lock(&priv->mutex);
8751 if (wrqu->rts.disabled)
8752 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8753 else {
8754 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
8755 wrqu->rts.value > MAX_RTS_THRESHOLD) {
8756 mutex_unlock(&priv->mutex);
8757 return -EINVAL;
8759 priv->rts_threshold = wrqu->rts.value;
8762 ipw_send_rts_threshold(priv, priv->rts_threshold);
8763 mutex_unlock(&priv->mutex);
8764 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
8765 return 0;
8768 static int ipw_wx_get_rts(struct net_device *dev,
8769 struct iw_request_info *info,
8770 union iwreq_data *wrqu, char *extra)
8772 struct ipw_priv *priv = ieee80211_priv(dev);
8773 mutex_lock(&priv->mutex);
8774 wrqu->rts.value = priv->rts_threshold;
8775 wrqu->rts.fixed = 0; /* no auto select */
8776 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
8777 mutex_unlock(&priv->mutex);
8778 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
8779 return 0;
8782 static int ipw_wx_set_txpow(struct net_device *dev,
8783 struct iw_request_info *info,
8784 union iwreq_data *wrqu, char *extra)
8786 struct ipw_priv *priv = ieee80211_priv(dev);
8787 int err = 0;
8789 mutex_lock(&priv->mutex);
8790 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
8791 err = -EINPROGRESS;
8792 goto out;
8795 if (!wrqu->power.fixed)
8796 wrqu->power.value = IPW_TX_POWER_DEFAULT;
8798 if (wrqu->power.flags != IW_TXPOW_DBM) {
8799 err = -EINVAL;
8800 goto out;
8803 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
8804 (wrqu->power.value < IPW_TX_POWER_MIN)) {
8805 err = -EINVAL;
8806 goto out;
8809 priv->tx_power = wrqu->power.value;
8810 err = ipw_set_tx_power(priv);
8811 out:
8812 mutex_unlock(&priv->mutex);
8813 return err;
8816 static int ipw_wx_get_txpow(struct net_device *dev,
8817 struct iw_request_info *info,
8818 union iwreq_data *wrqu, char *extra)
8820 struct ipw_priv *priv = ieee80211_priv(dev);
8821 mutex_lock(&priv->mutex);
8822 wrqu->power.value = priv->tx_power;
8823 wrqu->power.fixed = 1;
8824 wrqu->power.flags = IW_TXPOW_DBM;
8825 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
8826 mutex_unlock(&priv->mutex);
8828 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
8829 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
8831 return 0;
8834 static int ipw_wx_set_frag(struct net_device *dev,
8835 struct iw_request_info *info,
8836 union iwreq_data *wrqu, char *extra)
8838 struct ipw_priv *priv = ieee80211_priv(dev);
8839 mutex_lock(&priv->mutex);
8840 if (wrqu->frag.disabled)
8841 priv->ieee->fts = DEFAULT_FTS;
8842 else {
8843 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
8844 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
8845 mutex_unlock(&priv->mutex);
8846 return -EINVAL;
8849 priv->ieee->fts = wrqu->frag.value & ~0x1;
8852 ipw_send_frag_threshold(priv, wrqu->frag.value);
8853 mutex_unlock(&priv->mutex);
8854 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
8855 return 0;
8858 static int ipw_wx_get_frag(struct net_device *dev,
8859 struct iw_request_info *info,
8860 union iwreq_data *wrqu, char *extra)
8862 struct ipw_priv *priv = ieee80211_priv(dev);
8863 mutex_lock(&priv->mutex);
8864 wrqu->frag.value = priv->ieee->fts;
8865 wrqu->frag.fixed = 0; /* no auto select */
8866 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
8867 mutex_unlock(&priv->mutex);
8868 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
8870 return 0;
8873 static int ipw_wx_set_retry(struct net_device *dev,
8874 struct iw_request_info *info,
8875 union iwreq_data *wrqu, char *extra)
8877 struct ipw_priv *priv = ieee80211_priv(dev);
8879 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
8880 return -EINVAL;
8882 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
8883 return 0;
8885 if (wrqu->retry.value < 0 || wrqu->retry.value > 255)
8886 return -EINVAL;
8888 mutex_lock(&priv->mutex);
8889 if (wrqu->retry.flags & IW_RETRY_MIN)
8890 priv->short_retry_limit = (u8) wrqu->retry.value;
8891 else if (wrqu->retry.flags & IW_RETRY_MAX)
8892 priv->long_retry_limit = (u8) wrqu->retry.value;
8893 else {
8894 priv->short_retry_limit = (u8) wrqu->retry.value;
8895 priv->long_retry_limit = (u8) wrqu->retry.value;
8898 ipw_send_retry_limit(priv, priv->short_retry_limit,
8899 priv->long_retry_limit);
8900 mutex_unlock(&priv->mutex);
8901 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
8902 priv->short_retry_limit, priv->long_retry_limit);
8903 return 0;
8906 static int ipw_wx_get_retry(struct net_device *dev,
8907 struct iw_request_info *info,
8908 union iwreq_data *wrqu, char *extra)
8910 struct ipw_priv *priv = ieee80211_priv(dev);
8912 mutex_lock(&priv->mutex);
8913 wrqu->retry.disabled = 0;
8915 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
8916 mutex_unlock(&priv->mutex);
8917 return -EINVAL;
8920 if (wrqu->retry.flags & IW_RETRY_MAX) {
8921 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
8922 wrqu->retry.value = priv->long_retry_limit;
8923 } else if (wrqu->retry.flags & IW_RETRY_MIN) {
8924 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
8925 wrqu->retry.value = priv->short_retry_limit;
8926 } else {
8927 wrqu->retry.flags = IW_RETRY_LIMIT;
8928 wrqu->retry.value = priv->short_retry_limit;
8930 mutex_unlock(&priv->mutex);
8932 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
8934 return 0;
8937 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
8938 int essid_len)
8940 struct ipw_scan_request_ext scan;
8941 int err = 0, scan_type;
8943 if (!(priv->status & STATUS_INIT) ||
8944 (priv->status & STATUS_EXIT_PENDING))
8945 return 0;
8947 mutex_lock(&priv->mutex);
8949 if (priv->status & STATUS_RF_KILL_MASK) {
8950 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
8951 priv->status |= STATUS_SCAN_PENDING;
8952 goto done;
8955 IPW_DEBUG_HC("starting request direct scan!\n");
8957 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
8958 /* We should not sleep here; otherwise we will block most
8959 * of the system (for instance, we hold rtnl_lock when we
8960 * get here).
8962 err = -EAGAIN;
8963 goto done;
8965 memset(&scan, 0, sizeof(scan));
8967 if (priv->config & CFG_SPEED_SCAN)
8968 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
8969 cpu_to_le16(30);
8970 else
8971 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
8972 cpu_to_le16(20);
8974 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
8975 cpu_to_le16(20);
8976 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
8977 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
8979 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
8981 err = ipw_send_ssid(priv, essid, essid_len);
8982 if (err) {
8983 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
8984 goto done;
8986 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
8988 ipw_add_scan_channels(priv, &scan, scan_type);
8990 err = ipw_send_scan_request_ext(priv, &scan);
8991 if (err) {
8992 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
8993 goto done;
8996 priv->status |= STATUS_SCANNING;
8998 done:
8999 mutex_unlock(&priv->mutex);
9000 return err;
9003 static int ipw_wx_set_scan(struct net_device *dev,
9004 struct iw_request_info *info,
9005 union iwreq_data *wrqu, char *extra)
9007 struct ipw_priv *priv = ieee80211_priv(dev);
9008 struct iw_scan_req *req = NULL;
9009 if (wrqu->data.length
9010 && wrqu->data.length == sizeof(struct iw_scan_req)) {
9011 req = (struct iw_scan_req *)extra;
9012 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9013 ipw_request_direct_scan(priv, req->essid,
9014 req->essid_len);
9015 return 0;
9019 IPW_DEBUG_WX("Start scan\n");
9021 queue_work(priv->workqueue, &priv->request_scan);
9023 return 0;
9026 static int ipw_wx_get_scan(struct net_device *dev,
9027 struct iw_request_info *info,
9028 union iwreq_data *wrqu, char *extra)
9030 struct ipw_priv *priv = ieee80211_priv(dev);
9031 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9034 static int ipw_wx_set_encode(struct net_device *dev,
9035 struct iw_request_info *info,
9036 union iwreq_data *wrqu, char *key)
9038 struct ipw_priv *priv = ieee80211_priv(dev);
9039 int ret;
9040 u32 cap = priv->capability;
9042 mutex_lock(&priv->mutex);
9043 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9045 /* In IBSS mode, we need to notify the firmware to update
9046 * the beacon info after we changed the capability. */
9047 if (cap != priv->capability &&
9048 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9049 priv->status & STATUS_ASSOCIATED)
9050 ipw_disassociate(priv);
9052 mutex_unlock(&priv->mutex);
9053 return ret;
9056 static int ipw_wx_get_encode(struct net_device *dev,
9057 struct iw_request_info *info,
9058 union iwreq_data *wrqu, char *key)
9060 struct ipw_priv *priv = ieee80211_priv(dev);
9061 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9064 static int ipw_wx_set_power(struct net_device *dev,
9065 struct iw_request_info *info,
9066 union iwreq_data *wrqu, char *extra)
9068 struct ipw_priv *priv = ieee80211_priv(dev);
9069 int err;
9070 mutex_lock(&priv->mutex);
9071 if (wrqu->power.disabled) {
9072 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9073 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9074 if (err) {
9075 IPW_DEBUG_WX("failed setting power mode.\n");
9076 mutex_unlock(&priv->mutex);
9077 return err;
9079 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9080 mutex_unlock(&priv->mutex);
9081 return 0;
9084 switch (wrqu->power.flags & IW_POWER_MODE) {
9085 case IW_POWER_ON: /* If not specified */
9086 case IW_POWER_MODE: /* If set all mask */
9087 case IW_POWER_ALL_R: /* If explicitely state all */
9088 break;
9089 default: /* Otherwise we don't support it */
9090 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9091 wrqu->power.flags);
9092 mutex_unlock(&priv->mutex);
9093 return -EOPNOTSUPP;
9096 /* If the user hasn't specified a power management mode yet, default
9097 * to BATTERY */
9098 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9099 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9100 else
9101 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9102 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9103 if (err) {
9104 IPW_DEBUG_WX("failed setting power mode.\n");
9105 mutex_unlock(&priv->mutex);
9106 return err;
9109 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9110 mutex_unlock(&priv->mutex);
9111 return 0;
9114 static int ipw_wx_get_power(struct net_device *dev,
9115 struct iw_request_info *info,
9116 union iwreq_data *wrqu, char *extra)
9118 struct ipw_priv *priv = ieee80211_priv(dev);
9119 mutex_lock(&priv->mutex);
9120 if (!(priv->power_mode & IPW_POWER_ENABLED))
9121 wrqu->power.disabled = 1;
9122 else
9123 wrqu->power.disabled = 0;
9125 mutex_unlock(&priv->mutex);
9126 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9128 return 0;
9131 static int ipw_wx_set_powermode(struct net_device *dev,
9132 struct iw_request_info *info,
9133 union iwreq_data *wrqu, char *extra)
9135 struct ipw_priv *priv = ieee80211_priv(dev);
9136 int mode = *(int *)extra;
9137 int err;
9138 mutex_lock(&priv->mutex);
9139 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
9140 mode = IPW_POWER_AC;
9141 priv->power_mode = mode;
9142 } else {
9143 priv->power_mode = IPW_POWER_ENABLED | mode;
9146 if (priv->power_mode != mode) {
9147 err = ipw_send_power_mode(priv, mode);
9149 if (err) {
9150 IPW_DEBUG_WX("failed setting power mode.\n");
9151 mutex_unlock(&priv->mutex);
9152 return err;
9155 mutex_unlock(&priv->mutex);
9156 return 0;
9159 #define MAX_WX_STRING 80
9160 static int ipw_wx_get_powermode(struct net_device *dev,
9161 struct iw_request_info *info,
9162 union iwreq_data *wrqu, char *extra)
9164 struct ipw_priv *priv = ieee80211_priv(dev);
9165 int level = IPW_POWER_LEVEL(priv->power_mode);
9166 char *p = extra;
9168 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9170 switch (level) {
9171 case IPW_POWER_AC:
9172 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9173 break;
9174 case IPW_POWER_BATTERY:
9175 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9176 break;
9177 default:
9178 p += snprintf(p, MAX_WX_STRING - (p - extra),
9179 "(Timeout %dms, Period %dms)",
9180 timeout_duration[level - 1] / 1000,
9181 period_duration[level - 1] / 1000);
9184 if (!(priv->power_mode & IPW_POWER_ENABLED))
9185 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9187 wrqu->data.length = p - extra + 1;
9189 return 0;
9192 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9193 struct iw_request_info *info,
9194 union iwreq_data *wrqu, char *extra)
9196 struct ipw_priv *priv = ieee80211_priv(dev);
9197 int mode = *(int *)extra;
9198 u8 band = 0, modulation = 0;
9200 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9201 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9202 return -EINVAL;
9204 mutex_lock(&priv->mutex);
9205 if (priv->adapter == IPW_2915ABG) {
9206 priv->ieee->abg_true = 1;
9207 if (mode & IEEE_A) {
9208 band |= IEEE80211_52GHZ_BAND;
9209 modulation |= IEEE80211_OFDM_MODULATION;
9210 } else
9211 priv->ieee->abg_true = 0;
9212 } else {
9213 if (mode & IEEE_A) {
9214 IPW_WARNING("Attempt to set 2200BG into "
9215 "802.11a mode\n");
9216 mutex_unlock(&priv->mutex);
9217 return -EINVAL;
9220 priv->ieee->abg_true = 0;
9223 if (mode & IEEE_B) {
9224 band |= IEEE80211_24GHZ_BAND;
9225 modulation |= IEEE80211_CCK_MODULATION;
9226 } else
9227 priv->ieee->abg_true = 0;
9229 if (mode & IEEE_G) {
9230 band |= IEEE80211_24GHZ_BAND;
9231 modulation |= IEEE80211_OFDM_MODULATION;
9232 } else
9233 priv->ieee->abg_true = 0;
9235 priv->ieee->mode = mode;
9236 priv->ieee->freq_band = band;
9237 priv->ieee->modulation = modulation;
9238 init_supported_rates(priv, &priv->rates);
9240 /* Network configuration changed -- force [re]association */
9241 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9242 if (!ipw_disassociate(priv)) {
9243 ipw_send_supported_rates(priv, &priv->rates);
9244 ipw_associate(priv);
9247 /* Update the band LEDs */
9248 ipw_led_band_on(priv);
9250 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9251 mode & IEEE_A ? 'a' : '.',
9252 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9253 mutex_unlock(&priv->mutex);
9254 return 0;
9257 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9258 struct iw_request_info *info,
9259 union iwreq_data *wrqu, char *extra)
9261 struct ipw_priv *priv = ieee80211_priv(dev);
9262 mutex_lock(&priv->mutex);
9263 switch (priv->ieee->mode) {
9264 case IEEE_A:
9265 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9266 break;
9267 case IEEE_B:
9268 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9269 break;
9270 case IEEE_A | IEEE_B:
9271 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9272 break;
9273 case IEEE_G:
9274 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9275 break;
9276 case IEEE_A | IEEE_G:
9277 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9278 break;
9279 case IEEE_B | IEEE_G:
9280 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9281 break;
9282 case IEEE_A | IEEE_B | IEEE_G:
9283 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9284 break;
9285 default:
9286 strncpy(extra, "unknown", MAX_WX_STRING);
9287 break;
9290 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9292 wrqu->data.length = strlen(extra) + 1;
9293 mutex_unlock(&priv->mutex);
9295 return 0;
9298 static int ipw_wx_set_preamble(struct net_device *dev,
9299 struct iw_request_info *info,
9300 union iwreq_data *wrqu, char *extra)
9302 struct ipw_priv *priv = ieee80211_priv(dev);
9303 int mode = *(int *)extra;
9304 mutex_lock(&priv->mutex);
9305 /* Switching from SHORT -> LONG requires a disassociation */
9306 if (mode == 1) {
9307 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9308 priv->config |= CFG_PREAMBLE_LONG;
9310 /* Network configuration changed -- force [re]association */
9311 IPW_DEBUG_ASSOC
9312 ("[re]association triggered due to preamble change.\n");
9313 if (!ipw_disassociate(priv))
9314 ipw_associate(priv);
9316 goto done;
9319 if (mode == 0) {
9320 priv->config &= ~CFG_PREAMBLE_LONG;
9321 goto done;
9323 mutex_unlock(&priv->mutex);
9324 return -EINVAL;
9326 done:
9327 mutex_unlock(&priv->mutex);
9328 return 0;
9331 static int ipw_wx_get_preamble(struct net_device *dev,
9332 struct iw_request_info *info,
9333 union iwreq_data *wrqu, char *extra)
9335 struct ipw_priv *priv = ieee80211_priv(dev);
9336 mutex_lock(&priv->mutex);
9337 if (priv->config & CFG_PREAMBLE_LONG)
9338 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9339 else
9340 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9341 mutex_unlock(&priv->mutex);
9342 return 0;
9345 #ifdef CONFIG_IPW2200_MONITOR
9346 static int ipw_wx_set_monitor(struct net_device *dev,
9347 struct iw_request_info *info,
9348 union iwreq_data *wrqu, char *extra)
9350 struct ipw_priv *priv = ieee80211_priv(dev);
9351 int *parms = (int *)extra;
9352 int enable = (parms[0] > 0);
9353 mutex_lock(&priv->mutex);
9354 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9355 if (enable) {
9356 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9357 #ifdef CONFIG_IEEE80211_RADIOTAP
9358 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9359 #else
9360 priv->net_dev->type = ARPHRD_IEEE80211;
9361 #endif
9362 queue_work(priv->workqueue, &priv->adapter_restart);
9365 ipw_set_channel(priv, parms[1]);
9366 } else {
9367 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9368 mutex_unlock(&priv->mutex);
9369 return 0;
9371 priv->net_dev->type = ARPHRD_ETHER;
9372 queue_work(priv->workqueue, &priv->adapter_restart);
9374 mutex_unlock(&priv->mutex);
9375 return 0;
9378 #endif // CONFIG_IPW2200_MONITOR
9380 static int ipw_wx_reset(struct net_device *dev,
9381 struct iw_request_info *info,
9382 union iwreq_data *wrqu, char *extra)
9384 struct ipw_priv *priv = ieee80211_priv(dev);
9385 IPW_DEBUG_WX("RESET\n");
9386 queue_work(priv->workqueue, &priv->adapter_restart);
9387 return 0;
9390 static int ipw_wx_sw_reset(struct net_device *dev,
9391 struct iw_request_info *info,
9392 union iwreq_data *wrqu, char *extra)
9394 struct ipw_priv *priv = ieee80211_priv(dev);
9395 union iwreq_data wrqu_sec = {
9396 .encoding = {
9397 .flags = IW_ENCODE_DISABLED,
9400 int ret;
9402 IPW_DEBUG_WX("SW_RESET\n");
9404 mutex_lock(&priv->mutex);
9406 ret = ipw_sw_reset(priv, 2);
9407 if (!ret) {
9408 free_firmware();
9409 ipw_adapter_restart(priv);
9412 /* The SW reset bit might have been toggled on by the 'disable'
9413 * module parameter, so take appropriate action */
9414 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9416 mutex_unlock(&priv->mutex);
9417 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9418 mutex_lock(&priv->mutex);
9420 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9421 /* Configuration likely changed -- force [re]association */
9422 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9423 "reset.\n");
9424 if (!ipw_disassociate(priv))
9425 ipw_associate(priv);
9428 mutex_unlock(&priv->mutex);
9430 return 0;
9433 /* Rebase the WE IOCTLs to zero for the handler array */
9434 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9435 static iw_handler ipw_wx_handlers[] = {
9436 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9437 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9438 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9439 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9440 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9441 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9442 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9443 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9444 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9445 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9446 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9447 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9448 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9449 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9450 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9451 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9452 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9453 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9454 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9455 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9456 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9457 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9458 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9459 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9460 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9461 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9462 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9463 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9464 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9465 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9466 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9467 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9468 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9469 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9470 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9471 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9472 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9473 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9474 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9475 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9476 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9479 enum {
9480 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9481 IPW_PRIV_GET_POWER,
9482 IPW_PRIV_SET_MODE,
9483 IPW_PRIV_GET_MODE,
9484 IPW_PRIV_SET_PREAMBLE,
9485 IPW_PRIV_GET_PREAMBLE,
9486 IPW_PRIV_RESET,
9487 IPW_PRIV_SW_RESET,
9488 #ifdef CONFIG_IPW2200_MONITOR
9489 IPW_PRIV_SET_MONITOR,
9490 #endif
9493 static struct iw_priv_args ipw_priv_args[] = {
9495 .cmd = IPW_PRIV_SET_POWER,
9496 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9497 .name = "set_power"},
9499 .cmd = IPW_PRIV_GET_POWER,
9500 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9501 .name = "get_power"},
9503 .cmd = IPW_PRIV_SET_MODE,
9504 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9505 .name = "set_mode"},
9507 .cmd = IPW_PRIV_GET_MODE,
9508 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9509 .name = "get_mode"},
9511 .cmd = IPW_PRIV_SET_PREAMBLE,
9512 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9513 .name = "set_preamble"},
9515 .cmd = IPW_PRIV_GET_PREAMBLE,
9516 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9517 .name = "get_preamble"},
9519 IPW_PRIV_RESET,
9520 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9522 IPW_PRIV_SW_RESET,
9523 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9524 #ifdef CONFIG_IPW2200_MONITOR
9526 IPW_PRIV_SET_MONITOR,
9527 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9528 #endif /* CONFIG_IPW2200_MONITOR */
9531 static iw_handler ipw_priv_handler[] = {
9532 ipw_wx_set_powermode,
9533 ipw_wx_get_powermode,
9534 ipw_wx_set_wireless_mode,
9535 ipw_wx_get_wireless_mode,
9536 ipw_wx_set_preamble,
9537 ipw_wx_get_preamble,
9538 ipw_wx_reset,
9539 ipw_wx_sw_reset,
9540 #ifdef CONFIG_IPW2200_MONITOR
9541 ipw_wx_set_monitor,
9542 #endif
9545 static struct iw_handler_def ipw_wx_handler_def = {
9546 .standard = ipw_wx_handlers,
9547 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
9548 .num_private = ARRAY_SIZE(ipw_priv_handler),
9549 .num_private_args = ARRAY_SIZE(ipw_priv_args),
9550 .private = ipw_priv_handler,
9551 .private_args = ipw_priv_args,
9552 .get_wireless_stats = ipw_get_wireless_stats,
9556 * Get wireless statistics.
9557 * Called by /proc/net/wireless
9558 * Also called by SIOCGIWSTATS
9560 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9562 struct ipw_priv *priv = ieee80211_priv(dev);
9563 struct iw_statistics *wstats;
9565 wstats = &priv->wstats;
9567 /* if hw is disabled, then ipw_get_ordinal() can't be called.
9568 * netdev->get_wireless_stats seems to be called before fw is
9569 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
9570 * and associated; if not associcated, the values are all meaningless
9571 * anyway, so set them all to NULL and INVALID */
9572 if (!(priv->status & STATUS_ASSOCIATED)) {
9573 wstats->miss.beacon = 0;
9574 wstats->discard.retries = 0;
9575 wstats->qual.qual = 0;
9576 wstats->qual.level = 0;
9577 wstats->qual.noise = 0;
9578 wstats->qual.updated = 7;
9579 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
9580 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
9581 return wstats;
9584 wstats->qual.qual = priv->quality;
9585 wstats->qual.level = average_value(&priv->average_rssi);
9586 wstats->qual.noise = average_value(&priv->average_noise);
9587 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
9588 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
9590 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
9591 wstats->discard.retries = priv->last_tx_failures;
9592 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
9594 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
9595 goto fail_get_ordinal;
9596 wstats->discard.retries += tx_retry; */
9598 return wstats;
9601 /* net device stuff */
9603 static void init_sys_config(struct ipw_sys_config *sys_config)
9605 memset(sys_config, 0, sizeof(struct ipw_sys_config));
9606 sys_config->bt_coexistence = 0;
9607 sys_config->answer_broadcast_ssid_probe = 0;
9608 sys_config->accept_all_data_frames = 0;
9609 sys_config->accept_non_directed_frames = 1;
9610 sys_config->exclude_unicast_unencrypted = 0;
9611 sys_config->disable_unicast_decryption = 1;
9612 sys_config->exclude_multicast_unencrypted = 0;
9613 sys_config->disable_multicast_decryption = 1;
9614 sys_config->antenna_diversity = CFG_SYS_ANTENNA_SLOW_DIV;
9615 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
9616 sys_config->dot11g_auto_detection = 0;
9617 sys_config->enable_cts_to_self = 0;
9618 sys_config->bt_coexist_collision_thr = 0;
9619 sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256
9620 sys_config->silence_threshold = 0x1e;
9623 static int ipw_net_open(struct net_device *dev)
9625 struct ipw_priv *priv = ieee80211_priv(dev);
9626 IPW_DEBUG_INFO("dev->open\n");
9627 /* we should be verifying the device is ready to be opened */
9628 mutex_lock(&priv->mutex);
9629 if (!(priv->status & STATUS_RF_KILL_MASK) &&
9630 (priv->status & STATUS_ASSOCIATED))
9631 netif_start_queue(dev);
9632 mutex_unlock(&priv->mutex);
9633 return 0;
9636 static int ipw_net_stop(struct net_device *dev)
9638 IPW_DEBUG_INFO("dev->close\n");
9639 netif_stop_queue(dev);
9640 return 0;
9644 todo:
9646 modify to send one tfd per fragment instead of using chunking. otherwise
9647 we need to heavily modify the ieee80211_skb_to_txb.
9650 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
9651 int pri)
9653 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
9654 txb->fragments[0]->data;
9655 int i = 0;
9656 struct tfd_frame *tfd;
9657 #ifdef CONFIG_IPW_QOS
9658 int tx_id = ipw_get_tx_queue_number(priv, pri);
9659 struct clx2_tx_queue *txq = &priv->txq[tx_id];
9660 #else
9661 struct clx2_tx_queue *txq = &priv->txq[0];
9662 #endif
9663 struct clx2_queue *q = &txq->q;
9664 u8 id, hdr_len, unicast;
9665 u16 remaining_bytes;
9666 int fc;
9668 switch (priv->ieee->iw_mode) {
9669 case IW_MODE_ADHOC:
9670 hdr_len = IEEE80211_3ADDR_LEN;
9671 unicast = !is_multicast_ether_addr(hdr->addr1);
9672 id = ipw_find_station(priv, hdr->addr1);
9673 if (id == IPW_INVALID_STATION) {
9674 id = ipw_add_station(priv, hdr->addr1);
9675 if (id == IPW_INVALID_STATION) {
9676 IPW_WARNING("Attempt to send data to "
9677 "invalid cell: " MAC_FMT "\n",
9678 MAC_ARG(hdr->addr1));
9679 goto drop;
9682 break;
9684 case IW_MODE_INFRA:
9685 default:
9686 unicast = !is_multicast_ether_addr(hdr->addr3);
9687 hdr_len = IEEE80211_3ADDR_LEN;
9688 id = 0;
9689 break;
9692 tfd = &txq->bd[q->first_empty];
9693 txq->txb[q->first_empty] = txb;
9694 memset(tfd, 0, sizeof(*tfd));
9695 tfd->u.data.station_number = id;
9697 tfd->control_flags.message_type = TX_FRAME_TYPE;
9698 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
9700 tfd->u.data.cmd_id = DINO_CMD_TX;
9701 tfd->u.data.len = cpu_to_le16(txb->payload_size);
9702 remaining_bytes = txb->payload_size;
9704 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
9705 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
9706 else
9707 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
9709 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
9710 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
9712 fc = le16_to_cpu(hdr->frame_ctl);
9713 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
9715 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
9717 if (likely(unicast))
9718 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
9720 if (txb->encrypted && !priv->ieee->host_encrypt) {
9721 switch (priv->ieee->sec.level) {
9722 case SEC_LEVEL_3:
9723 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9724 IEEE80211_FCTL_PROTECTED;
9725 /* XXX: ACK flag must be set for CCMP even if it
9726 * is a multicast/broadcast packet, because CCMP
9727 * group communication encrypted by GTK is
9728 * actually done by the AP. */
9729 if (!unicast)
9730 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
9732 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
9733 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
9734 tfd->u.data.key_index = 0;
9735 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
9736 break;
9737 case SEC_LEVEL_2:
9738 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9739 IEEE80211_FCTL_PROTECTED;
9740 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
9741 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
9742 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
9743 break;
9744 case SEC_LEVEL_1:
9745 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9746 IEEE80211_FCTL_PROTECTED;
9747 tfd->u.data.key_index = priv->ieee->tx_keyidx;
9748 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
9750 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
9751 else
9752 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
9753 break;
9754 case SEC_LEVEL_0:
9755 break;
9756 default:
9757 printk(KERN_ERR "Unknow security level %d\n",
9758 priv->ieee->sec.level);
9759 break;
9761 } else
9762 /* No hardware encryption */
9763 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
9765 #ifdef CONFIG_IPW_QOS
9766 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data), unicast);
9767 #endif /* CONFIG_IPW_QOS */
9769 /* payload */
9770 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
9771 txb->nr_frags));
9772 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
9773 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
9774 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
9775 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
9776 i, le32_to_cpu(tfd->u.data.num_chunks),
9777 txb->fragments[i]->len - hdr_len);
9778 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
9779 i, tfd->u.data.num_chunks,
9780 txb->fragments[i]->len - hdr_len);
9781 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
9782 txb->fragments[i]->len - hdr_len);
9784 tfd->u.data.chunk_ptr[i] =
9785 cpu_to_le32(pci_map_single
9786 (priv->pci_dev,
9787 txb->fragments[i]->data + hdr_len,
9788 txb->fragments[i]->len - hdr_len,
9789 PCI_DMA_TODEVICE));
9790 tfd->u.data.chunk_len[i] =
9791 cpu_to_le16(txb->fragments[i]->len - hdr_len);
9794 if (i != txb->nr_frags) {
9795 struct sk_buff *skb;
9796 u16 remaining_bytes = 0;
9797 int j;
9799 for (j = i; j < txb->nr_frags; j++)
9800 remaining_bytes += txb->fragments[j]->len - hdr_len;
9802 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
9803 remaining_bytes);
9804 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
9805 if (skb != NULL) {
9806 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
9807 for (j = i; j < txb->nr_frags; j++) {
9808 int size = txb->fragments[j]->len - hdr_len;
9810 printk(KERN_INFO "Adding frag %d %d...\n",
9811 j, size);
9812 memcpy(skb_put(skb, size),
9813 txb->fragments[j]->data + hdr_len, size);
9815 dev_kfree_skb_any(txb->fragments[i]);
9816 txb->fragments[i] = skb;
9817 tfd->u.data.chunk_ptr[i] =
9818 cpu_to_le32(pci_map_single
9819 (priv->pci_dev, skb->data,
9820 tfd->u.data.chunk_len[i],
9821 PCI_DMA_TODEVICE));
9823 tfd->u.data.num_chunks =
9824 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
9829 /* kick DMA */
9830 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
9831 ipw_write32(priv, q->reg_w, q->first_empty);
9833 if (ipw_queue_space(q) < q->high_mark)
9834 netif_stop_queue(priv->net_dev);
9836 return NETDEV_TX_OK;
9838 drop:
9839 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
9840 ieee80211_txb_free(txb);
9841 return NETDEV_TX_OK;
9844 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
9846 struct ipw_priv *priv = ieee80211_priv(dev);
9847 #ifdef CONFIG_IPW_QOS
9848 int tx_id = ipw_get_tx_queue_number(priv, pri);
9849 struct clx2_tx_queue *txq = &priv->txq[tx_id];
9850 #else
9851 struct clx2_tx_queue *txq = &priv->txq[0];
9852 #endif /* CONFIG_IPW_QOS */
9854 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
9855 return 1;
9857 return 0;
9860 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
9861 struct net_device *dev, int pri)
9863 struct ipw_priv *priv = ieee80211_priv(dev);
9864 unsigned long flags;
9865 int ret;
9867 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
9868 spin_lock_irqsave(&priv->lock, flags);
9870 if (!(priv->status & STATUS_ASSOCIATED)) {
9871 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
9872 priv->ieee->stats.tx_carrier_errors++;
9873 netif_stop_queue(dev);
9874 goto fail_unlock;
9877 ret = ipw_tx_skb(priv, txb, pri);
9878 if (ret == NETDEV_TX_OK)
9879 __ipw_led_activity_on(priv);
9880 spin_unlock_irqrestore(&priv->lock, flags);
9882 return ret;
9884 fail_unlock:
9885 spin_unlock_irqrestore(&priv->lock, flags);
9886 return 1;
9889 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
9891 struct ipw_priv *priv = ieee80211_priv(dev);
9893 priv->ieee->stats.tx_packets = priv->tx_packets;
9894 priv->ieee->stats.rx_packets = priv->rx_packets;
9895 return &priv->ieee->stats;
9898 static void ipw_net_set_multicast_list(struct net_device *dev)
9903 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
9905 struct ipw_priv *priv = ieee80211_priv(dev);
9906 struct sockaddr *addr = p;
9907 if (!is_valid_ether_addr(addr->sa_data))
9908 return -EADDRNOTAVAIL;
9909 mutex_lock(&priv->mutex);
9910 priv->config |= CFG_CUSTOM_MAC;
9911 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
9912 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
9913 priv->net_dev->name, MAC_ARG(priv->mac_addr));
9914 queue_work(priv->workqueue, &priv->adapter_restart);
9915 mutex_unlock(&priv->mutex);
9916 return 0;
9919 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
9920 struct ethtool_drvinfo *info)
9922 struct ipw_priv *p = ieee80211_priv(dev);
9923 char vers[64];
9924 char date[32];
9925 u32 len;
9927 strcpy(info->driver, DRV_NAME);
9928 strcpy(info->version, DRV_VERSION);
9930 len = sizeof(vers);
9931 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
9932 len = sizeof(date);
9933 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
9935 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
9936 vers, date);
9937 strcpy(info->bus_info, pci_name(p->pci_dev));
9938 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
9941 static u32 ipw_ethtool_get_link(struct net_device *dev)
9943 struct ipw_priv *priv = ieee80211_priv(dev);
9944 return (priv->status & STATUS_ASSOCIATED) != 0;
9947 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
9949 return IPW_EEPROM_IMAGE_SIZE;
9952 static int ipw_ethtool_get_eeprom(struct net_device *dev,
9953 struct ethtool_eeprom *eeprom, u8 * bytes)
9955 struct ipw_priv *p = ieee80211_priv(dev);
9957 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
9958 return -EINVAL;
9959 mutex_lock(&p->mutex);
9960 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
9961 mutex_unlock(&p->mutex);
9962 return 0;
9965 static int ipw_ethtool_set_eeprom(struct net_device *dev,
9966 struct ethtool_eeprom *eeprom, u8 * bytes)
9968 struct ipw_priv *p = ieee80211_priv(dev);
9969 int i;
9971 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
9972 return -EINVAL;
9973 mutex_lock(&p->mutex);
9974 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
9975 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
9976 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
9977 mutex_unlock(&p->mutex);
9978 return 0;
9981 static struct ethtool_ops ipw_ethtool_ops = {
9982 .get_link = ipw_ethtool_get_link,
9983 .get_drvinfo = ipw_ethtool_get_drvinfo,
9984 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
9985 .get_eeprom = ipw_ethtool_get_eeprom,
9986 .set_eeprom = ipw_ethtool_set_eeprom,
9989 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
9991 struct ipw_priv *priv = data;
9992 u32 inta, inta_mask;
9994 if (!priv)
9995 return IRQ_NONE;
9997 spin_lock(&priv->lock);
9999 if (!(priv->status & STATUS_INT_ENABLED)) {
10000 /* Shared IRQ */
10001 goto none;
10004 inta = ipw_read32(priv, IPW_INTA_RW);
10005 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10007 if (inta == 0xFFFFFFFF) {
10008 /* Hardware disappeared */
10009 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10010 goto none;
10013 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10014 /* Shared interrupt */
10015 goto none;
10018 /* tell the device to stop sending interrupts */
10019 ipw_disable_interrupts(priv);
10021 /* ack current interrupts */
10022 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10023 ipw_write32(priv, IPW_INTA_RW, inta);
10025 /* Cache INTA value for our tasklet */
10026 priv->isr_inta = inta;
10028 tasklet_schedule(&priv->irq_tasklet);
10030 spin_unlock(&priv->lock);
10032 return IRQ_HANDLED;
10033 none:
10034 spin_unlock(&priv->lock);
10035 return IRQ_NONE;
10038 static void ipw_rf_kill(void *adapter)
10040 struct ipw_priv *priv = adapter;
10041 unsigned long flags;
10043 spin_lock_irqsave(&priv->lock, flags);
10045 if (rf_kill_active(priv)) {
10046 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10047 if (priv->workqueue)
10048 queue_delayed_work(priv->workqueue,
10049 &priv->rf_kill, 2 * HZ);
10050 goto exit_unlock;
10053 /* RF Kill is now disabled, so bring the device back up */
10055 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10056 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10057 "device\n");
10059 /* we can not do an adapter restart while inside an irq lock */
10060 queue_work(priv->workqueue, &priv->adapter_restart);
10061 } else
10062 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10063 "enabled\n");
10065 exit_unlock:
10066 spin_unlock_irqrestore(&priv->lock, flags);
10069 static void ipw_bg_rf_kill(void *data)
10071 struct ipw_priv *priv = data;
10072 mutex_lock(&priv->mutex);
10073 ipw_rf_kill(data);
10074 mutex_unlock(&priv->mutex);
10077 static void ipw_link_up(struct ipw_priv *priv)
10079 priv->last_seq_num = -1;
10080 priv->last_frag_num = -1;
10081 priv->last_packet_time = 0;
10083 netif_carrier_on(priv->net_dev);
10084 if (netif_queue_stopped(priv->net_dev)) {
10085 IPW_DEBUG_NOTIF("waking queue\n");
10086 netif_wake_queue(priv->net_dev);
10087 } else {
10088 IPW_DEBUG_NOTIF("starting queue\n");
10089 netif_start_queue(priv->net_dev);
10092 cancel_delayed_work(&priv->request_scan);
10093 ipw_reset_stats(priv);
10094 /* Ensure the rate is updated immediately */
10095 priv->last_rate = ipw_get_current_rate(priv);
10096 ipw_gather_stats(priv);
10097 ipw_led_link_up(priv);
10098 notify_wx_assoc_event(priv);
10100 if (priv->config & CFG_BACKGROUND_SCAN)
10101 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10104 static void ipw_bg_link_up(void *data)
10106 struct ipw_priv *priv = data;
10107 mutex_lock(&priv->mutex);
10108 ipw_link_up(data);
10109 mutex_unlock(&priv->mutex);
10112 static void ipw_link_down(struct ipw_priv *priv)
10114 ipw_led_link_down(priv);
10115 netif_carrier_off(priv->net_dev);
10116 netif_stop_queue(priv->net_dev);
10117 notify_wx_assoc_event(priv);
10119 /* Cancel any queued work ... */
10120 cancel_delayed_work(&priv->request_scan);
10121 cancel_delayed_work(&priv->adhoc_check);
10122 cancel_delayed_work(&priv->gather_stats);
10124 ipw_reset_stats(priv);
10126 if (!(priv->status & STATUS_EXIT_PENDING)) {
10127 /* Queue up another scan... */
10128 queue_work(priv->workqueue, &priv->request_scan);
10132 static void ipw_bg_link_down(void *data)
10134 struct ipw_priv *priv = data;
10135 mutex_lock(&priv->mutex);
10136 ipw_link_down(data);
10137 mutex_unlock(&priv->mutex);
10140 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10142 int ret = 0;
10144 priv->workqueue = create_workqueue(DRV_NAME);
10145 init_waitqueue_head(&priv->wait_command_queue);
10146 init_waitqueue_head(&priv->wait_state);
10148 INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
10149 INIT_WORK(&priv->associate, ipw_bg_associate, priv);
10150 INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
10151 INIT_WORK(&priv->system_config, ipw_system_config, priv);
10152 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
10153 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
10154 INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
10155 INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
10156 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
10157 INIT_WORK(&priv->request_scan,
10158 (void (*)(void *))ipw_request_scan, priv);
10159 INIT_WORK(&priv->gather_stats,
10160 (void (*)(void *))ipw_bg_gather_stats, priv);
10161 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
10162 INIT_WORK(&priv->roam, ipw_bg_roam, priv);
10163 INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
10164 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
10165 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
10166 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
10167 priv);
10168 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
10169 priv);
10170 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
10171 priv);
10172 INIT_WORK(&priv->merge_networks,
10173 (void (*)(void *))ipw_merge_adhoc_network, priv);
10175 #ifdef CONFIG_IPW_QOS
10176 INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
10177 priv);
10178 #endif /* CONFIG_IPW_QOS */
10180 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10181 ipw_irq_tasklet, (unsigned long)priv);
10183 return ret;
10186 static void shim__set_security(struct net_device *dev,
10187 struct ieee80211_security *sec)
10189 struct ipw_priv *priv = ieee80211_priv(dev);
10190 int i;
10191 for (i = 0; i < 4; i++) {
10192 if (sec->flags & (1 << i)) {
10193 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10194 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10195 if (sec->key_sizes[i] == 0)
10196 priv->ieee->sec.flags &= ~(1 << i);
10197 else {
10198 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10199 sec->key_sizes[i]);
10200 priv->ieee->sec.flags |= (1 << i);
10202 priv->status |= STATUS_SECURITY_UPDATED;
10203 } else if (sec->level != SEC_LEVEL_1)
10204 priv->ieee->sec.flags &= ~(1 << i);
10207 if (sec->flags & SEC_ACTIVE_KEY) {
10208 if (sec->active_key <= 3) {
10209 priv->ieee->sec.active_key = sec->active_key;
10210 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10211 } else
10212 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10213 priv->status |= STATUS_SECURITY_UPDATED;
10214 } else
10215 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10217 if ((sec->flags & SEC_AUTH_MODE) &&
10218 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10219 priv->ieee->sec.auth_mode = sec->auth_mode;
10220 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10221 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10222 priv->capability |= CAP_SHARED_KEY;
10223 else
10224 priv->capability &= ~CAP_SHARED_KEY;
10225 priv->status |= STATUS_SECURITY_UPDATED;
10228 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10229 priv->ieee->sec.flags |= SEC_ENABLED;
10230 priv->ieee->sec.enabled = sec->enabled;
10231 priv->status |= STATUS_SECURITY_UPDATED;
10232 if (sec->enabled)
10233 priv->capability |= CAP_PRIVACY_ON;
10234 else
10235 priv->capability &= ~CAP_PRIVACY_ON;
10238 if (sec->flags & SEC_ENCRYPT)
10239 priv->ieee->sec.encrypt = sec->encrypt;
10241 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10242 priv->ieee->sec.level = sec->level;
10243 priv->ieee->sec.flags |= SEC_LEVEL;
10244 priv->status |= STATUS_SECURITY_UPDATED;
10247 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10248 ipw_set_hwcrypto_keys(priv);
10250 /* To match current functionality of ipw2100 (which works well w/
10251 * various supplicants, we don't force a disassociate if the
10252 * privacy capability changes ... */
10253 #if 0
10254 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10255 (((priv->assoc_request.capability &
10256 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10257 (!(priv->assoc_request.capability &
10258 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10259 IPW_DEBUG_ASSOC("Disassociating due to capability "
10260 "change.\n");
10261 ipw_disassociate(priv);
10263 #endif
10266 static int init_supported_rates(struct ipw_priv *priv,
10267 struct ipw_supported_rates *rates)
10269 /* TODO: Mask out rates based on priv->rates_mask */
10271 memset(rates, 0, sizeof(*rates));
10272 /* configure supported rates */
10273 switch (priv->ieee->freq_band) {
10274 case IEEE80211_52GHZ_BAND:
10275 rates->ieee_mode = IPW_A_MODE;
10276 rates->purpose = IPW_RATE_CAPABILITIES;
10277 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10278 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10279 break;
10281 default: /* Mixed or 2.4Ghz */
10282 rates->ieee_mode = IPW_G_MODE;
10283 rates->purpose = IPW_RATE_CAPABILITIES;
10284 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10285 IEEE80211_CCK_DEFAULT_RATES_MASK);
10286 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10287 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10288 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10290 break;
10293 return 0;
10296 static int ipw_config(struct ipw_priv *priv)
10298 /* This is only called from ipw_up, which resets/reloads the firmware
10299 so, we don't need to first disable the card before we configure
10300 it */
10301 if (ipw_set_tx_power(priv))
10302 goto error;
10304 /* initialize adapter address */
10305 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10306 goto error;
10308 /* set basic system config settings */
10309 init_sys_config(&priv->sys_config);
10311 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10312 * Does not support BT priority yet (don't abort or defer our Tx) */
10313 if (bt_coexist) {
10314 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10316 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10317 priv->sys_config.bt_coexistence
10318 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10319 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10320 priv->sys_config.bt_coexistence
10321 |= CFG_BT_COEXISTENCE_OOB;
10324 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10325 priv->sys_config.answer_broadcast_ssid_probe = 1;
10326 else
10327 priv->sys_config.answer_broadcast_ssid_probe = 0;
10329 if (ipw_send_system_config(priv, &priv->sys_config))
10330 goto error;
10332 init_supported_rates(priv, &priv->rates);
10333 if (ipw_send_supported_rates(priv, &priv->rates))
10334 goto error;
10336 /* Set request-to-send threshold */
10337 if (priv->rts_threshold) {
10338 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10339 goto error;
10341 #ifdef CONFIG_IPW_QOS
10342 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10343 ipw_qos_activate(priv, NULL);
10344 #endif /* CONFIG_IPW_QOS */
10346 if (ipw_set_random_seed(priv))
10347 goto error;
10349 /* final state transition to the RUN state */
10350 if (ipw_send_host_complete(priv))
10351 goto error;
10353 priv->status |= STATUS_INIT;
10355 ipw_led_init(priv);
10356 ipw_led_radio_on(priv);
10357 priv->notif_missed_beacons = 0;
10359 /* Set hardware WEP key if it is configured. */
10360 if ((priv->capability & CAP_PRIVACY_ON) &&
10361 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10362 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10363 ipw_set_hwcrypto_keys(priv);
10365 return 0;
10367 error:
10368 return -EIO;
10372 * NOTE:
10374 * These tables have been tested in conjunction with the
10375 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10377 * Altering this values, using it on other hardware, or in geographies
10378 * not intended for resale of the above mentioned Intel adapters has
10379 * not been tested.
10381 * Remember to update the table in README.ipw2200 when changing this
10382 * table.
10385 static const struct ieee80211_geo ipw_geos[] = {
10386 { /* Restricted */
10387 "---",
10388 .bg_channels = 11,
10389 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10390 {2427, 4}, {2432, 5}, {2437, 6},
10391 {2442, 7}, {2447, 8}, {2452, 9},
10392 {2457, 10}, {2462, 11}},
10395 { /* Custom US/Canada */
10396 "ZZF",
10397 .bg_channels = 11,
10398 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10399 {2427, 4}, {2432, 5}, {2437, 6},
10400 {2442, 7}, {2447, 8}, {2452, 9},
10401 {2457, 10}, {2462, 11}},
10402 .a_channels = 8,
10403 .a = {{5180, 36},
10404 {5200, 40},
10405 {5220, 44},
10406 {5240, 48},
10407 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10408 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10409 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10410 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10413 { /* Rest of World */
10414 "ZZD",
10415 .bg_channels = 13,
10416 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10417 {2427, 4}, {2432, 5}, {2437, 6},
10418 {2442, 7}, {2447, 8}, {2452, 9},
10419 {2457, 10}, {2462, 11}, {2467, 12},
10420 {2472, 13}},
10423 { /* Custom USA & Europe & High */
10424 "ZZA",
10425 .bg_channels = 11,
10426 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10427 {2427, 4}, {2432, 5}, {2437, 6},
10428 {2442, 7}, {2447, 8}, {2452, 9},
10429 {2457, 10}, {2462, 11}},
10430 .a_channels = 13,
10431 .a = {{5180, 36},
10432 {5200, 40},
10433 {5220, 44},
10434 {5240, 48},
10435 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10436 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10437 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10438 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10439 {5745, 149},
10440 {5765, 153},
10441 {5785, 157},
10442 {5805, 161},
10443 {5825, 165}},
10446 { /* Custom NA & Europe */
10447 "ZZB",
10448 .bg_channels = 11,
10449 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10450 {2427, 4}, {2432, 5}, {2437, 6},
10451 {2442, 7}, {2447, 8}, {2452, 9},
10452 {2457, 10}, {2462, 11}},
10453 .a_channels = 13,
10454 .a = {{5180, 36},
10455 {5200, 40},
10456 {5220, 44},
10457 {5240, 48},
10458 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10459 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10460 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10461 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10462 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10463 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10464 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10465 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10466 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10469 { /* Custom Japan */
10470 "ZZC",
10471 .bg_channels = 11,
10472 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10473 {2427, 4}, {2432, 5}, {2437, 6},
10474 {2442, 7}, {2447, 8}, {2452, 9},
10475 {2457, 10}, {2462, 11}},
10476 .a_channels = 4,
10477 .a = {{5170, 34}, {5190, 38},
10478 {5210, 42}, {5230, 46}},
10481 { /* Custom */
10482 "ZZM",
10483 .bg_channels = 11,
10484 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10485 {2427, 4}, {2432, 5}, {2437, 6},
10486 {2442, 7}, {2447, 8}, {2452, 9},
10487 {2457, 10}, {2462, 11}},
10490 { /* Europe */
10491 "ZZE",
10492 .bg_channels = 13,
10493 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10494 {2427, 4}, {2432, 5}, {2437, 6},
10495 {2442, 7}, {2447, 8}, {2452, 9},
10496 {2457, 10}, {2462, 11}, {2467, 12},
10497 {2472, 13}},
10498 .a_channels = 19,
10499 .a = {{5180, 36},
10500 {5200, 40},
10501 {5220, 44},
10502 {5240, 48},
10503 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10504 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10505 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10506 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10507 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10508 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10509 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10510 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10511 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10512 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10513 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10514 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10515 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10516 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10517 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
10520 { /* Custom Japan */
10521 "ZZJ",
10522 .bg_channels = 14,
10523 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10524 {2427, 4}, {2432, 5}, {2437, 6},
10525 {2442, 7}, {2447, 8}, {2452, 9},
10526 {2457, 10}, {2462, 11}, {2467, 12},
10527 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
10528 .a_channels = 4,
10529 .a = {{5170, 34}, {5190, 38},
10530 {5210, 42}, {5230, 46}},
10533 { /* Rest of World */
10534 "ZZR",
10535 .bg_channels = 14,
10536 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10537 {2427, 4}, {2432, 5}, {2437, 6},
10538 {2442, 7}, {2447, 8}, {2452, 9},
10539 {2457, 10}, {2462, 11}, {2467, 12},
10540 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
10541 IEEE80211_CH_PASSIVE_ONLY}},
10544 { /* High Band */
10545 "ZZH",
10546 .bg_channels = 13,
10547 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10548 {2427, 4}, {2432, 5}, {2437, 6},
10549 {2442, 7}, {2447, 8}, {2452, 9},
10550 {2457, 10}, {2462, 11},
10551 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
10552 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
10553 .a_channels = 4,
10554 .a = {{5745, 149}, {5765, 153},
10555 {5785, 157}, {5805, 161}},
10558 { /* Custom Europe */
10559 "ZZG",
10560 .bg_channels = 13,
10561 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10562 {2427, 4}, {2432, 5}, {2437, 6},
10563 {2442, 7}, {2447, 8}, {2452, 9},
10564 {2457, 10}, {2462, 11},
10565 {2467, 12}, {2472, 13}},
10566 .a_channels = 4,
10567 .a = {{5180, 36}, {5200, 40},
10568 {5220, 44}, {5240, 48}},
10571 { /* Europe */
10572 "ZZK",
10573 .bg_channels = 13,
10574 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10575 {2427, 4}, {2432, 5}, {2437, 6},
10576 {2442, 7}, {2447, 8}, {2452, 9},
10577 {2457, 10}, {2462, 11},
10578 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
10579 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
10580 .a_channels = 24,
10581 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
10582 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
10583 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
10584 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
10585 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10586 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10587 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10588 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10589 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10590 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10591 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10592 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10593 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10594 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10595 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10596 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10597 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10598 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10599 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
10600 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10601 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10602 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10603 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10604 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10607 { /* Europe */
10608 "ZZL",
10609 .bg_channels = 11,
10610 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10611 {2427, 4}, {2432, 5}, {2437, 6},
10612 {2442, 7}, {2447, 8}, {2452, 9},
10613 {2457, 10}, {2462, 11}},
10614 .a_channels = 13,
10615 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
10616 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
10617 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
10618 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
10619 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10620 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10621 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10622 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10623 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10624 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10625 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10626 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10627 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10631 #define MAX_HW_RESTARTS 5
10632 static int ipw_up(struct ipw_priv *priv)
10634 int rc, i, j;
10636 if (priv->status & STATUS_EXIT_PENDING)
10637 return -EIO;
10639 if (cmdlog && !priv->cmdlog) {
10640 priv->cmdlog = kmalloc(sizeof(*priv->cmdlog) * cmdlog,
10641 GFP_KERNEL);
10642 if (priv->cmdlog == NULL) {
10643 IPW_ERROR("Error allocating %d command log entries.\n",
10644 cmdlog);
10645 } else {
10646 memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
10647 priv->cmdlog_len = cmdlog;
10651 for (i = 0; i < MAX_HW_RESTARTS; i++) {
10652 /* Load the microcode, firmware, and eeprom.
10653 * Also start the clocks. */
10654 rc = ipw_load(priv);
10655 if (rc) {
10656 IPW_ERROR("Unable to load firmware: %d\n", rc);
10657 return rc;
10660 ipw_init_ordinals(priv);
10661 if (!(priv->config & CFG_CUSTOM_MAC))
10662 eeprom_parse_mac(priv, priv->mac_addr);
10663 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
10665 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
10666 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
10667 ipw_geos[j].name, 3))
10668 break;
10670 if (j == ARRAY_SIZE(ipw_geos)) {
10671 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
10672 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
10673 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
10674 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
10675 j = 0;
10677 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
10678 IPW_WARNING("Could not set geography.");
10679 return 0;
10682 if (priv->status & STATUS_RF_KILL_SW) {
10683 IPW_WARNING("Radio disabled by module parameter.\n");
10684 return 0;
10685 } else if (rf_kill_active(priv)) {
10686 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
10687 "Kill switch must be turned off for "
10688 "wireless networking to work.\n");
10689 queue_delayed_work(priv->workqueue, &priv->rf_kill,
10690 2 * HZ);
10691 return 0;
10694 rc = ipw_config(priv);
10695 if (!rc) {
10696 IPW_DEBUG_INFO("Configured device on count %i\n", i);
10698 /* If configure to try and auto-associate, kick
10699 * off a scan. */
10700 queue_work(priv->workqueue, &priv->request_scan);
10702 return 0;
10705 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
10706 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
10707 i, MAX_HW_RESTARTS);
10709 /* We had an error bringing up the hardware, so take it
10710 * all the way back down so we can try again */
10711 ipw_down(priv);
10714 /* tried to restart and config the device for as long as our
10715 * patience could withstand */
10716 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
10718 return -EIO;
10721 static void ipw_bg_up(void *data)
10723 struct ipw_priv *priv = data;
10724 mutex_lock(&priv->mutex);
10725 ipw_up(data);
10726 mutex_unlock(&priv->mutex);
10729 static void ipw_deinit(struct ipw_priv *priv)
10731 int i;
10733 if (priv->status & STATUS_SCANNING) {
10734 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
10735 ipw_abort_scan(priv);
10738 if (priv->status & STATUS_ASSOCIATED) {
10739 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
10740 ipw_disassociate(priv);
10743 ipw_led_shutdown(priv);
10745 /* Wait up to 1s for status to change to not scanning and not
10746 * associated (disassociation can take a while for a ful 802.11
10747 * exchange */
10748 for (i = 1000; i && (priv->status &
10749 (STATUS_DISASSOCIATING |
10750 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
10751 udelay(10);
10753 if (priv->status & (STATUS_DISASSOCIATING |
10754 STATUS_ASSOCIATED | STATUS_SCANNING))
10755 IPW_DEBUG_INFO("Still associated or scanning...\n");
10756 else
10757 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
10759 /* Attempt to disable the card */
10760 ipw_send_card_disable(priv, 0);
10762 priv->status &= ~STATUS_INIT;
10765 static void ipw_down(struct ipw_priv *priv)
10767 int exit_pending = priv->status & STATUS_EXIT_PENDING;
10769 priv->status |= STATUS_EXIT_PENDING;
10771 if (ipw_is_init(priv))
10772 ipw_deinit(priv);
10774 /* Wipe out the EXIT_PENDING status bit if we are not actually
10775 * exiting the module */
10776 if (!exit_pending)
10777 priv->status &= ~STATUS_EXIT_PENDING;
10779 /* tell the device to stop sending interrupts */
10780 ipw_disable_interrupts(priv);
10782 /* Clear all bits but the RF Kill */
10783 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
10784 netif_carrier_off(priv->net_dev);
10785 netif_stop_queue(priv->net_dev);
10787 ipw_stop_nic(priv);
10789 ipw_led_radio_off(priv);
10792 static void ipw_bg_down(void *data)
10794 struct ipw_priv *priv = data;
10795 mutex_lock(&priv->mutex);
10796 ipw_down(data);
10797 mutex_unlock(&priv->mutex);
10800 /* Called by register_netdev() */
10801 static int ipw_net_init(struct net_device *dev)
10803 struct ipw_priv *priv = ieee80211_priv(dev);
10804 mutex_lock(&priv->mutex);
10806 if (ipw_up(priv)) {
10807 mutex_unlock(&priv->mutex);
10808 return -EIO;
10811 mutex_unlock(&priv->mutex);
10812 return 0;
10815 /* PCI driver stuff */
10816 static struct pci_device_id card_ids[] = {
10817 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
10818 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
10819 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
10820 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
10821 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
10822 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
10823 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
10824 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
10825 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
10826 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
10827 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
10828 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
10829 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
10830 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
10831 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
10832 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
10833 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
10834 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
10835 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
10836 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
10837 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
10838 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
10840 /* required last entry */
10841 {0,}
10844 MODULE_DEVICE_TABLE(pci, card_ids);
10846 static struct attribute *ipw_sysfs_entries[] = {
10847 &dev_attr_rf_kill.attr,
10848 &dev_attr_direct_dword.attr,
10849 &dev_attr_indirect_byte.attr,
10850 &dev_attr_indirect_dword.attr,
10851 &dev_attr_mem_gpio_reg.attr,
10852 &dev_attr_command_event_reg.attr,
10853 &dev_attr_nic_type.attr,
10854 &dev_attr_status.attr,
10855 &dev_attr_cfg.attr,
10856 &dev_attr_error.attr,
10857 &dev_attr_event_log.attr,
10858 &dev_attr_cmd_log.attr,
10859 &dev_attr_eeprom_delay.attr,
10860 &dev_attr_ucode_version.attr,
10861 &dev_attr_rtc.attr,
10862 &dev_attr_scan_age.attr,
10863 &dev_attr_led.attr,
10864 &dev_attr_speed_scan.attr,
10865 &dev_attr_net_stats.attr,
10866 NULL
10869 static struct attribute_group ipw_attribute_group = {
10870 .name = NULL, /* put in device directory */
10871 .attrs = ipw_sysfs_entries,
10874 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10876 int err = 0;
10877 struct net_device *net_dev;
10878 void __iomem *base;
10879 u32 length, val;
10880 struct ipw_priv *priv;
10881 int i;
10883 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
10884 if (net_dev == NULL) {
10885 err = -ENOMEM;
10886 goto out;
10889 priv = ieee80211_priv(net_dev);
10890 priv->ieee = netdev_priv(net_dev);
10892 priv->net_dev = net_dev;
10893 priv->pci_dev = pdev;
10894 #ifdef CONFIG_IPW2200_DEBUG
10895 ipw_debug_level = debug;
10896 #endif
10897 spin_lock_init(&priv->lock);
10898 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
10899 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
10901 mutex_init(&priv->mutex);
10902 if (pci_enable_device(pdev)) {
10903 err = -ENODEV;
10904 goto out_free_ieee80211;
10907 pci_set_master(pdev);
10909 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10910 if (!err)
10911 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
10912 if (err) {
10913 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
10914 goto out_pci_disable_device;
10917 pci_set_drvdata(pdev, priv);
10919 err = pci_request_regions(pdev, DRV_NAME);
10920 if (err)
10921 goto out_pci_disable_device;
10923 /* We disable the RETRY_TIMEOUT register (0x41) to keep
10924 * PCI Tx retries from interfering with C3 CPU state */
10925 pci_read_config_dword(pdev, 0x40, &val);
10926 if ((val & 0x0000ff00) != 0)
10927 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
10929 length = pci_resource_len(pdev, 0);
10930 priv->hw_len = length;
10932 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
10933 if (!base) {
10934 err = -ENODEV;
10935 goto out_pci_release_regions;
10938 priv->hw_base = base;
10939 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
10940 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
10942 err = ipw_setup_deferred_work(priv);
10943 if (err) {
10944 IPW_ERROR("Unable to setup deferred work\n");
10945 goto out_iounmap;
10948 ipw_sw_reset(priv, 1);
10950 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
10951 if (err) {
10952 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
10953 goto out_destroy_workqueue;
10956 SET_MODULE_OWNER(net_dev);
10957 SET_NETDEV_DEV(net_dev, &pdev->dev);
10959 mutex_lock(&priv->mutex);
10961 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
10962 priv->ieee->set_security = shim__set_security;
10963 priv->ieee->is_queue_full = ipw_net_is_queue_full;
10965 #ifdef CONFIG_IPW_QOS
10966 priv->ieee->handle_probe_response = ipw_handle_beacon;
10967 priv->ieee->handle_beacon = ipw_handle_probe_response;
10968 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
10969 #endif /* CONFIG_IPW_QOS */
10971 priv->ieee->perfect_rssi = -20;
10972 priv->ieee->worst_rssi = -85;
10974 net_dev->open = ipw_net_open;
10975 net_dev->stop = ipw_net_stop;
10976 net_dev->init = ipw_net_init;
10977 net_dev->get_stats = ipw_net_get_stats;
10978 net_dev->set_multicast_list = ipw_net_set_multicast_list;
10979 net_dev->set_mac_address = ipw_net_set_mac_address;
10980 priv->wireless_data.spy_data = &priv->ieee->spy_data;
10981 net_dev->wireless_data = &priv->wireless_data;
10982 net_dev->wireless_handlers = &ipw_wx_handler_def;
10983 net_dev->ethtool_ops = &ipw_ethtool_ops;
10984 net_dev->irq = pdev->irq;
10985 net_dev->base_addr = (unsigned long)priv->hw_base;
10986 net_dev->mem_start = pci_resource_start(pdev, 0);
10987 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
10989 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
10990 if (err) {
10991 IPW_ERROR("failed to create sysfs device attributes\n");
10992 mutex_unlock(&priv->mutex);
10993 goto out_release_irq;
10996 mutex_unlock(&priv->mutex);
10997 err = register_netdev(net_dev);
10998 if (err) {
10999 IPW_ERROR("failed to register network device\n");
11000 goto out_remove_sysfs;
11003 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11004 "channels, %d 802.11a channels)\n",
11005 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11006 priv->ieee->geo.a_channels);
11008 return 0;
11010 out_remove_sysfs:
11011 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11012 out_release_irq:
11013 free_irq(pdev->irq, priv);
11014 out_destroy_workqueue:
11015 destroy_workqueue(priv->workqueue);
11016 priv->workqueue = NULL;
11017 out_iounmap:
11018 iounmap(priv->hw_base);
11019 out_pci_release_regions:
11020 pci_release_regions(pdev);
11021 out_pci_disable_device:
11022 pci_disable_device(pdev);
11023 pci_set_drvdata(pdev, NULL);
11024 out_free_ieee80211:
11025 free_ieee80211(priv->net_dev);
11026 out:
11027 return err;
11030 static void ipw_pci_remove(struct pci_dev *pdev)
11032 struct ipw_priv *priv = pci_get_drvdata(pdev);
11033 struct list_head *p, *q;
11034 int i;
11036 if (!priv)
11037 return;
11039 mutex_lock(&priv->mutex);
11041 priv->status |= STATUS_EXIT_PENDING;
11042 ipw_down(priv);
11043 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11045 mutex_unlock(&priv->mutex);
11047 unregister_netdev(priv->net_dev);
11049 if (priv->rxq) {
11050 ipw_rx_queue_free(priv, priv->rxq);
11051 priv->rxq = NULL;
11053 ipw_tx_queue_free(priv);
11055 if (priv->cmdlog) {
11056 kfree(priv->cmdlog);
11057 priv->cmdlog = NULL;
11059 /* ipw_down will ensure that there is no more pending work
11060 * in the workqueue's, so we can safely remove them now. */
11061 cancel_delayed_work(&priv->adhoc_check);
11062 cancel_delayed_work(&priv->gather_stats);
11063 cancel_delayed_work(&priv->request_scan);
11064 cancel_delayed_work(&priv->rf_kill);
11065 cancel_delayed_work(&priv->scan_check);
11066 destroy_workqueue(priv->workqueue);
11067 priv->workqueue = NULL;
11069 /* Free MAC hash list for ADHOC */
11070 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11071 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11072 list_del(p);
11073 kfree(list_entry(p, struct ipw_ibss_seq, list));
11077 if (priv->error) {
11078 ipw_free_error_log(priv->error);
11079 priv->error = NULL;
11082 free_irq(pdev->irq, priv);
11083 iounmap(priv->hw_base);
11084 pci_release_regions(pdev);
11085 pci_disable_device(pdev);
11086 pci_set_drvdata(pdev, NULL);
11087 free_ieee80211(priv->net_dev);
11088 free_firmware();
11091 #ifdef CONFIG_PM
11092 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11094 struct ipw_priv *priv = pci_get_drvdata(pdev);
11095 struct net_device *dev = priv->net_dev;
11097 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11099 /* Take down the device; powers it off, etc. */
11100 ipw_down(priv);
11102 /* Remove the PRESENT state of the device */
11103 netif_device_detach(dev);
11105 pci_save_state(pdev);
11106 pci_disable_device(pdev);
11107 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11109 return 0;
11112 static int ipw_pci_resume(struct pci_dev *pdev)
11114 struct ipw_priv *priv = pci_get_drvdata(pdev);
11115 struct net_device *dev = priv->net_dev;
11116 u32 val;
11118 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11120 pci_set_power_state(pdev, PCI_D0);
11121 pci_enable_device(pdev);
11122 pci_restore_state(pdev);
11125 * Suspend/Resume resets the PCI configuration space, so we have to
11126 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11127 * from interfering with C3 CPU state. pci_restore_state won't help
11128 * here since it only restores the first 64 bytes pci config header.
11130 pci_read_config_dword(pdev, 0x40, &val);
11131 if ((val & 0x0000ff00) != 0)
11132 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11134 /* Set the device back into the PRESENT state; this will also wake
11135 * the queue of needed */
11136 netif_device_attach(dev);
11138 /* Bring the device back up */
11139 queue_work(priv->workqueue, &priv->up);
11141 return 0;
11143 #endif
11145 /* driver initialization stuff */
11146 static struct pci_driver ipw_driver = {
11147 .name = DRV_NAME,
11148 .id_table = card_ids,
11149 .probe = ipw_pci_probe,
11150 .remove = __devexit_p(ipw_pci_remove),
11151 #ifdef CONFIG_PM
11152 .suspend = ipw_pci_suspend,
11153 .resume = ipw_pci_resume,
11154 #endif
11157 static int __init ipw_init(void)
11159 int ret;
11161 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11162 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11164 ret = pci_module_init(&ipw_driver);
11165 if (ret) {
11166 IPW_ERROR("Unable to initialize PCI module\n");
11167 return ret;
11170 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11171 if (ret) {
11172 IPW_ERROR("Unable to create driver sysfs file\n");
11173 pci_unregister_driver(&ipw_driver);
11174 return ret;
11177 return ret;
11180 static void __exit ipw_exit(void)
11182 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11183 pci_unregister_driver(&ipw_driver);
11186 module_param(disable, int, 0444);
11187 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11189 module_param(associate, int, 0444);
11190 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11192 module_param(auto_create, int, 0444);
11193 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11195 module_param(led, int, 0444);
11196 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11198 #ifdef CONFIG_IPW2200_DEBUG
11199 module_param(debug, int, 0444);
11200 MODULE_PARM_DESC(debug, "debug output mask");
11201 #endif
11203 module_param(channel, int, 0444);
11204 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11206 #ifdef CONFIG_IPW_QOS
11207 module_param(qos_enable, int, 0444);
11208 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11210 module_param(qos_burst_enable, int, 0444);
11211 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11213 module_param(qos_no_ack_mask, int, 0444);
11214 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11216 module_param(burst_duration_CCK, int, 0444);
11217 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11219 module_param(burst_duration_OFDM, int, 0444);
11220 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11221 #endif /* CONFIG_IPW_QOS */
11223 #ifdef CONFIG_IPW2200_MONITOR
11224 module_param(mode, int, 0444);
11225 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11226 #else
11227 module_param(mode, int, 0444);
11228 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11229 #endif
11231 module_param(bt_coexist, int, 0444);
11232 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11234 module_param(hwcrypto, int, 0444);
11235 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11237 module_param(cmdlog, int, 0444);
11238 MODULE_PARM_DESC(cmdlog,
11239 "allocate a ring buffer for logging firmware commands");
11241 module_param(roaming, int, 0444);
11242 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11244 module_exit(ipw_exit);
11245 module_init(ipw_init);