[PATCH] ipw2200: batch non-user-requested scan result notifications
[linux-2.6/linux-2.6-openrd.git] / drivers / net / wireless / ipw2200.c
blobfeb8fcbab2d5a35b4c81dbd449c64b5a0dd5a174
1 /******************************************************************************
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 ******************************************************************************/
33 #include "ipw2200.h"
34 #include <linux/version.h>
37 #ifndef KBUILD_EXTMOD
38 #define VK "k"
39 #else
40 #define VK
41 #endif
43 #ifdef CONFIG_IPW2200_DEBUG
44 #define VD "d"
45 #else
46 #define VD
47 #endif
49 #ifdef CONFIG_IPW2200_MONITOR
50 #define VM "m"
51 #else
52 #define VM
53 #endif
55 #ifdef CONFIG_IPW2200_PROMISCUOUS
56 #define VP "p"
57 #else
58 #define VP
59 #endif
61 #ifdef CONFIG_IPW2200_RADIOTAP
62 #define VR "r"
63 #else
64 #define VR
65 #endif
67 #ifdef CONFIG_IPW2200_QOS
68 #define VQ "q"
69 #else
70 #define VQ
71 #endif
73 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
74 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
76 #define DRV_VERSION IPW2200_VERSION
78 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_VERSION(DRV_VERSION);
82 MODULE_AUTHOR(DRV_COPYRIGHT);
83 MODULE_LICENSE("GPL");
85 static int cmdlog = 0;
86 static int debug = 0;
87 static int channel = 0;
88 static int mode = 0;
90 static u32 ipw_debug_level;
91 static int associate = 1;
92 static int auto_create = 1;
93 static int led = 0;
94 static int disable = 0;
95 static int bt_coexist = 0;
96 static int hwcrypto = 0;
97 static int roaming = 1;
98 static const char ipw_modes[] = {
99 'a', 'b', 'g', '?'
101 static int antenna = CFG_SYS_ANTENNA_BOTH;
103 #ifdef CONFIG_IPW2200_PROMISCUOUS
104 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
105 #endif
108 #ifdef CONFIG_IPW2200_QOS
109 static int qos_enable = 0;
110 static int qos_burst_enable = 0;
111 static int qos_no_ack_mask = 0;
112 static int burst_duration_CCK = 0;
113 static int burst_duration_OFDM = 0;
115 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
116 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
117 QOS_TX3_CW_MIN_OFDM},
118 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
119 QOS_TX3_CW_MAX_OFDM},
120 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
121 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
122 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
123 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
126 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
127 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
128 QOS_TX3_CW_MIN_CCK},
129 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
130 QOS_TX3_CW_MAX_CCK},
131 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
132 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
133 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
134 QOS_TX3_TXOP_LIMIT_CCK}
137 static struct ieee80211_qos_parameters def_parameters_OFDM = {
138 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
139 DEF_TX3_CW_MIN_OFDM},
140 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
141 DEF_TX3_CW_MAX_OFDM},
142 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
143 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
144 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
145 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
148 static struct ieee80211_qos_parameters def_parameters_CCK = {
149 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
150 DEF_TX3_CW_MIN_CCK},
151 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
152 DEF_TX3_CW_MAX_CCK},
153 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
154 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
155 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
156 DEF_TX3_TXOP_LIMIT_CCK}
159 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
161 static int from_priority_to_tx_queue[] = {
162 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
163 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
166 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
168 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
169 *qos_param);
170 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
171 *qos_param);
172 #endif /* CONFIG_IPW2200_QOS */
174 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
175 static void ipw_remove_current_network(struct ipw_priv *priv);
176 static void ipw_rx(struct ipw_priv *priv);
177 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
178 struct clx2_tx_queue *txq, int qindex);
179 static int ipw_queue_reset(struct ipw_priv *priv);
181 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
182 int len, int sync);
184 static void ipw_tx_queue_free(struct ipw_priv *);
186 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
187 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
188 static void ipw_rx_queue_replenish(void *);
189 static int ipw_up(struct ipw_priv *);
190 static void ipw_bg_up(struct work_struct *work);
191 static void ipw_down(struct ipw_priv *);
192 static void ipw_bg_down(struct work_struct *work);
193 static int ipw_config(struct ipw_priv *);
194 static int init_supported_rates(struct ipw_priv *priv,
195 struct ipw_supported_rates *prates);
196 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
197 static void ipw_send_wep_keys(struct ipw_priv *, int);
199 static int snprint_line(char *buf, size_t count,
200 const u8 * data, u32 len, u32 ofs)
202 int out, i, j, l;
203 char c;
205 out = snprintf(buf, count, "%08X", ofs);
207 for (l = 0, i = 0; i < 2; i++) {
208 out += snprintf(buf + out, count - out, " ");
209 for (j = 0; j < 8 && l < len; j++, l++)
210 out += snprintf(buf + out, count - out, "%02X ",
211 data[(i * 8 + j)]);
212 for (; j < 8; j++)
213 out += snprintf(buf + out, count - out, " ");
216 out += snprintf(buf + out, count - out, " ");
217 for (l = 0, i = 0; i < 2; i++) {
218 out += snprintf(buf + out, count - out, " ");
219 for (j = 0; j < 8 && l < len; j++, l++) {
220 c = data[(i * 8 + j)];
221 if (!isascii(c) || !isprint(c))
222 c = '.';
224 out += snprintf(buf + out, count - out, "%c", c);
227 for (; j < 8; j++)
228 out += snprintf(buf + out, count - out, " ");
231 return out;
234 static void printk_buf(int level, const u8 * data, u32 len)
236 char line[81];
237 u32 ofs = 0;
238 if (!(ipw_debug_level & level))
239 return;
241 while (len) {
242 snprint_line(line, sizeof(line), &data[ofs],
243 min(len, 16U), ofs);
244 printk(KERN_DEBUG "%s\n", line);
245 ofs += 16;
246 len -= min(len, 16U);
250 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
252 size_t out = size;
253 u32 ofs = 0;
254 int total = 0;
256 while (size && len) {
257 out = snprint_line(output, size, &data[ofs],
258 min_t(size_t, len, 16U), ofs);
260 ofs += 16;
261 output += out;
262 size -= out;
263 len -= min_t(size_t, len, 16U);
264 total += out;
266 return total;
269 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
270 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
271 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
273 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
274 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
275 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
277 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
278 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
279 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
281 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
282 __LINE__, (u32) (b), (u32) (c));
283 _ipw_write_reg8(a, b, c);
286 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
287 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
288 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
290 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
291 __LINE__, (u32) (b), (u32) (c));
292 _ipw_write_reg16(a, b, c);
295 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
296 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
297 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
299 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
300 __LINE__, (u32) (b), (u32) (c));
301 _ipw_write_reg32(a, b, c);
304 /* 8-bit direct write (low 4K) */
305 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
307 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
308 #define ipw_write8(ipw, ofs, val) \
309 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
310 _ipw_write8(ipw, ofs, val)
312 /* 16-bit direct write (low 4K) */
313 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
315 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
316 #define ipw_write16(ipw, ofs, val) \
317 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
318 _ipw_write16(ipw, ofs, val)
320 /* 32-bit direct write (low 4K) */
321 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
323 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
324 #define ipw_write32(ipw, ofs, val) \
325 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
326 _ipw_write32(ipw, ofs, val)
328 /* 8-bit direct read (low 4K) */
329 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
331 /* 8-bit direct read (low 4K), with debug wrapper */
332 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
334 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
335 return _ipw_read8(ipw, ofs);
338 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
339 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
341 /* 16-bit direct read (low 4K) */
342 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
344 /* 16-bit direct read (low 4K), with debug wrapper */
345 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
347 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
348 return _ipw_read16(ipw, ofs);
351 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
352 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
354 /* 32-bit direct read (low 4K) */
355 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
357 /* 32-bit direct read (low 4K), with debug wrapper */
358 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
360 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
361 return _ipw_read32(ipw, ofs);
364 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
365 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
367 /* multi-byte read (above 4K), with debug wrapper */
368 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
369 static inline void __ipw_read_indirect(const char *f, int l,
370 struct ipw_priv *a, u32 b, u8 * c, int d)
372 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
374 _ipw_read_indirect(a, b, c, d);
377 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
378 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
380 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
381 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
382 int num);
383 #define ipw_write_indirect(a, b, c, d) \
384 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
385 _ipw_write_indirect(a, b, c, d)
387 /* 32-bit indirect write (above 4K) */
388 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
390 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
391 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
392 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
395 /* 8-bit indirect write (above 4K) */
396 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
398 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
399 u32 dif_len = reg - aligned_addr;
401 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
402 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
403 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
406 /* 16-bit indirect write (above 4K) */
407 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
409 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
410 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
412 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
413 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
414 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
417 /* 8-bit indirect read (above 4K) */
418 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
420 u32 word;
421 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
422 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
423 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
424 return (word >> ((reg & 0x3) * 8)) & 0xff;
427 /* 32-bit indirect read (above 4K) */
428 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
430 u32 value;
432 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
434 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
435 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
436 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
437 return value;
440 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
441 /* for area above 1st 4K of SRAM/reg space */
442 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
443 int num)
445 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
446 u32 dif_len = addr - aligned_addr;
447 u32 i;
449 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
451 if (num <= 0) {
452 return;
455 /* Read the first dword (or portion) byte by byte */
456 if (unlikely(dif_len)) {
457 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
458 /* Start reading at aligned_addr + dif_len */
459 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
460 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
461 aligned_addr += 4;
464 /* Read all of the middle dwords as dwords, with auto-increment */
465 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
466 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
467 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
469 /* Read the last dword (or portion) byte by byte */
470 if (unlikely(num)) {
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472 for (i = 0; num > 0; i++, num--)
473 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
477 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
478 /* for area above 1st 4K of SRAM/reg space */
479 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
480 int num)
482 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
483 u32 dif_len = addr - aligned_addr;
484 u32 i;
486 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
488 if (num <= 0) {
489 return;
492 /* Write the first dword (or portion) byte by byte */
493 if (unlikely(dif_len)) {
494 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
495 /* Start writing at aligned_addr + dif_len */
496 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
497 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
498 aligned_addr += 4;
501 /* Write all of the middle dwords as dwords, with auto-increment */
502 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
503 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
504 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
506 /* Write the last dword (or portion) byte by byte */
507 if (unlikely(num)) {
508 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
509 for (i = 0; num > 0; i++, num--, buf++)
510 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
514 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
515 /* for 1st 4K of SRAM/regs space */
516 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
517 int num)
519 memcpy_toio((priv->hw_base + addr), buf, num);
522 /* Set bit(s) in low 4K of SRAM/regs */
523 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
525 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
528 /* Clear bit(s) in low 4K of SRAM/regs */
529 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
531 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
534 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
536 if (priv->status & STATUS_INT_ENABLED)
537 return;
538 priv->status |= STATUS_INT_ENABLED;
539 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
542 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
544 if (!(priv->status & STATUS_INT_ENABLED))
545 return;
546 priv->status &= ~STATUS_INT_ENABLED;
547 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
550 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
552 unsigned long flags;
554 spin_lock_irqsave(&priv->irq_lock, flags);
555 __ipw_enable_interrupts(priv);
556 spin_unlock_irqrestore(&priv->irq_lock, flags);
559 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
561 unsigned long flags;
563 spin_lock_irqsave(&priv->irq_lock, flags);
564 __ipw_disable_interrupts(priv);
565 spin_unlock_irqrestore(&priv->irq_lock, flags);
568 static char *ipw_error_desc(u32 val)
570 switch (val) {
571 case IPW_FW_ERROR_OK:
572 return "ERROR_OK";
573 case IPW_FW_ERROR_FAIL:
574 return "ERROR_FAIL";
575 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
576 return "MEMORY_UNDERFLOW";
577 case IPW_FW_ERROR_MEMORY_OVERFLOW:
578 return "MEMORY_OVERFLOW";
579 case IPW_FW_ERROR_BAD_PARAM:
580 return "BAD_PARAM";
581 case IPW_FW_ERROR_BAD_CHECKSUM:
582 return "BAD_CHECKSUM";
583 case IPW_FW_ERROR_NMI_INTERRUPT:
584 return "NMI_INTERRUPT";
585 case IPW_FW_ERROR_BAD_DATABASE:
586 return "BAD_DATABASE";
587 case IPW_FW_ERROR_ALLOC_FAIL:
588 return "ALLOC_FAIL";
589 case IPW_FW_ERROR_DMA_UNDERRUN:
590 return "DMA_UNDERRUN";
591 case IPW_FW_ERROR_DMA_STATUS:
592 return "DMA_STATUS";
593 case IPW_FW_ERROR_DINO_ERROR:
594 return "DINO_ERROR";
595 case IPW_FW_ERROR_EEPROM_ERROR:
596 return "EEPROM_ERROR";
597 case IPW_FW_ERROR_SYSASSERT:
598 return "SYSASSERT";
599 case IPW_FW_ERROR_FATAL_ERROR:
600 return "FATAL_ERROR";
601 default:
602 return "UNKNOWN_ERROR";
606 static void ipw_dump_error_log(struct ipw_priv *priv,
607 struct ipw_fw_error *error)
609 u32 i;
611 if (!error) {
612 IPW_ERROR("Error allocating and capturing error log. "
613 "Nothing to dump.\n");
614 return;
617 IPW_ERROR("Start IPW Error Log Dump:\n");
618 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
619 error->status, error->config);
621 for (i = 0; i < error->elem_len; i++)
622 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
623 ipw_error_desc(error->elem[i].desc),
624 error->elem[i].time,
625 error->elem[i].blink1,
626 error->elem[i].blink2,
627 error->elem[i].link1,
628 error->elem[i].link2, error->elem[i].data);
629 for (i = 0; i < error->log_len; i++)
630 IPW_ERROR("%i\t0x%08x\t%i\n",
631 error->log[i].time,
632 error->log[i].data, error->log[i].event);
635 static inline int ipw_is_init(struct ipw_priv *priv)
637 return (priv->status & STATUS_INIT) ? 1 : 0;
640 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
642 u32 addr, field_info, field_len, field_count, total_len;
644 IPW_DEBUG_ORD("ordinal = %i\n", ord);
646 if (!priv || !val || !len) {
647 IPW_DEBUG_ORD("Invalid argument\n");
648 return -EINVAL;
651 /* verify device ordinal tables have been initialized */
652 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
653 IPW_DEBUG_ORD("Access ordinals before initialization\n");
654 return -EINVAL;
657 switch (IPW_ORD_TABLE_ID_MASK & ord) {
658 case IPW_ORD_TABLE_0_MASK:
660 * TABLE 0: Direct access to a table of 32 bit values
662 * This is a very simple table with the data directly
663 * read from the table
666 /* remove the table id from the ordinal */
667 ord &= IPW_ORD_TABLE_VALUE_MASK;
669 /* boundary check */
670 if (ord > priv->table0_len) {
671 IPW_DEBUG_ORD("ordinal value (%i) longer then "
672 "max (%i)\n", ord, priv->table0_len);
673 return -EINVAL;
676 /* verify we have enough room to store the value */
677 if (*len < sizeof(u32)) {
678 IPW_DEBUG_ORD("ordinal buffer length too small, "
679 "need %zd\n", sizeof(u32));
680 return -EINVAL;
683 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
684 ord, priv->table0_addr + (ord << 2));
686 *len = sizeof(u32);
687 ord <<= 2;
688 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
689 break;
691 case IPW_ORD_TABLE_1_MASK:
693 * TABLE 1: Indirect access to a table of 32 bit values
695 * This is a fairly large table of u32 values each
696 * representing starting addr for the data (which is
697 * also a u32)
700 /* remove the table id from the ordinal */
701 ord &= IPW_ORD_TABLE_VALUE_MASK;
703 /* boundary check */
704 if (ord > priv->table1_len) {
705 IPW_DEBUG_ORD("ordinal value too long\n");
706 return -EINVAL;
709 /* verify we have enough room to store the value */
710 if (*len < sizeof(u32)) {
711 IPW_DEBUG_ORD("ordinal buffer length too small, "
712 "need %zd\n", sizeof(u32));
713 return -EINVAL;
716 *((u32 *) val) =
717 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
718 *len = sizeof(u32);
719 break;
721 case IPW_ORD_TABLE_2_MASK:
723 * TABLE 2: Indirect access to a table of variable sized values
725 * This table consist of six values, each containing
726 * - dword containing the starting offset of the data
727 * - dword containing the lengh in the first 16bits
728 * and the count in the second 16bits
731 /* remove the table id from the ordinal */
732 ord &= IPW_ORD_TABLE_VALUE_MASK;
734 /* boundary check */
735 if (ord > priv->table2_len) {
736 IPW_DEBUG_ORD("ordinal value too long\n");
737 return -EINVAL;
740 /* get the address of statistic */
741 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
743 /* get the second DW of statistics ;
744 * two 16-bit words - first is length, second is count */
745 field_info =
746 ipw_read_reg32(priv,
747 priv->table2_addr + (ord << 3) +
748 sizeof(u32));
750 /* get each entry length */
751 field_len = *((u16 *) & field_info);
753 /* get number of entries */
754 field_count = *(((u16 *) & field_info) + 1);
756 /* abort if not enought memory */
757 total_len = field_len * field_count;
758 if (total_len > *len) {
759 *len = total_len;
760 return -EINVAL;
763 *len = total_len;
764 if (!total_len)
765 return 0;
767 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
768 "field_info = 0x%08x\n",
769 addr, total_len, field_info);
770 ipw_read_indirect(priv, addr, val, total_len);
771 break;
773 default:
774 IPW_DEBUG_ORD("Invalid ordinal!\n");
775 return -EINVAL;
779 return 0;
782 static void ipw_init_ordinals(struct ipw_priv *priv)
784 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
785 priv->table0_len = ipw_read32(priv, priv->table0_addr);
787 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
788 priv->table0_addr, priv->table0_len);
790 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
791 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
793 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
794 priv->table1_addr, priv->table1_len);
796 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
797 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
798 priv->table2_len &= 0x0000ffff; /* use first two bytes */
800 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
801 priv->table2_addr, priv->table2_len);
805 static u32 ipw_register_toggle(u32 reg)
807 reg &= ~IPW_START_STANDBY;
808 if (reg & IPW_GATE_ODMA)
809 reg &= ~IPW_GATE_ODMA;
810 if (reg & IPW_GATE_IDMA)
811 reg &= ~IPW_GATE_IDMA;
812 if (reg & IPW_GATE_ADMA)
813 reg &= ~IPW_GATE_ADMA;
814 return reg;
818 * LED behavior:
819 * - On radio ON, turn on any LEDs that require to be on during start
820 * - On initialization, start unassociated blink
821 * - On association, disable unassociated blink
822 * - On disassociation, start unassociated blink
823 * - On radio OFF, turn off any LEDs started during radio on
826 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
827 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
828 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
830 static void ipw_led_link_on(struct ipw_priv *priv)
832 unsigned long flags;
833 u32 led;
835 /* If configured to not use LEDs, or nic_type is 1,
836 * then we don't toggle a LINK led */
837 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
838 return;
840 spin_lock_irqsave(&priv->lock, flags);
842 if (!(priv->status & STATUS_RF_KILL_MASK) &&
843 !(priv->status & STATUS_LED_LINK_ON)) {
844 IPW_DEBUG_LED("Link LED On\n");
845 led = ipw_read_reg32(priv, IPW_EVENT_REG);
846 led |= priv->led_association_on;
848 led = ipw_register_toggle(led);
850 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
851 ipw_write_reg32(priv, IPW_EVENT_REG, led);
853 priv->status |= STATUS_LED_LINK_ON;
855 /* If we aren't associated, schedule turning the LED off */
856 if (!(priv->status & STATUS_ASSOCIATED))
857 queue_delayed_work(priv->workqueue,
858 &priv->led_link_off,
859 LD_TIME_LINK_ON);
862 spin_unlock_irqrestore(&priv->lock, flags);
865 static void ipw_bg_led_link_on(struct work_struct *work)
867 struct ipw_priv *priv =
868 container_of(work, struct ipw_priv, led_link_on.work);
869 mutex_lock(&priv->mutex);
870 ipw_led_link_on(priv);
871 mutex_unlock(&priv->mutex);
874 static void ipw_led_link_off(struct ipw_priv *priv)
876 unsigned long flags;
877 u32 led;
879 /* If configured not to use LEDs, or nic type is 1,
880 * then we don't goggle the LINK led. */
881 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
882 return;
884 spin_lock_irqsave(&priv->lock, flags);
886 if (priv->status & STATUS_LED_LINK_ON) {
887 led = ipw_read_reg32(priv, IPW_EVENT_REG);
888 led &= priv->led_association_off;
889 led = ipw_register_toggle(led);
891 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
892 ipw_write_reg32(priv, IPW_EVENT_REG, led);
894 IPW_DEBUG_LED("Link LED Off\n");
896 priv->status &= ~STATUS_LED_LINK_ON;
898 /* If we aren't associated and the radio is on, schedule
899 * turning the LED on (blink while unassociated) */
900 if (!(priv->status & STATUS_RF_KILL_MASK) &&
901 !(priv->status & STATUS_ASSOCIATED))
902 queue_delayed_work(priv->workqueue, &priv->led_link_on,
903 LD_TIME_LINK_OFF);
907 spin_unlock_irqrestore(&priv->lock, flags);
910 static void ipw_bg_led_link_off(struct work_struct *work)
912 struct ipw_priv *priv =
913 container_of(work, struct ipw_priv, led_link_off.work);
914 mutex_lock(&priv->mutex);
915 ipw_led_link_off(priv);
916 mutex_unlock(&priv->mutex);
919 static void __ipw_led_activity_on(struct ipw_priv *priv)
921 u32 led;
923 if (priv->config & CFG_NO_LED)
924 return;
926 if (priv->status & STATUS_RF_KILL_MASK)
927 return;
929 if (!(priv->status & STATUS_LED_ACT_ON)) {
930 led = ipw_read_reg32(priv, IPW_EVENT_REG);
931 led |= priv->led_activity_on;
933 led = ipw_register_toggle(led);
935 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
936 ipw_write_reg32(priv, IPW_EVENT_REG, led);
938 IPW_DEBUG_LED("Activity LED On\n");
940 priv->status |= STATUS_LED_ACT_ON;
942 cancel_delayed_work(&priv->led_act_off);
943 queue_delayed_work(priv->workqueue, &priv->led_act_off,
944 LD_TIME_ACT_ON);
945 } else {
946 /* Reschedule LED off for full time period */
947 cancel_delayed_work(&priv->led_act_off);
948 queue_delayed_work(priv->workqueue, &priv->led_act_off,
949 LD_TIME_ACT_ON);
953 #if 0
954 void ipw_led_activity_on(struct ipw_priv *priv)
956 unsigned long flags;
957 spin_lock_irqsave(&priv->lock, flags);
958 __ipw_led_activity_on(priv);
959 spin_unlock_irqrestore(&priv->lock, flags);
961 #endif /* 0 */
963 static void ipw_led_activity_off(struct ipw_priv *priv)
965 unsigned long flags;
966 u32 led;
968 if (priv->config & CFG_NO_LED)
969 return;
971 spin_lock_irqsave(&priv->lock, flags);
973 if (priv->status & STATUS_LED_ACT_ON) {
974 led = ipw_read_reg32(priv, IPW_EVENT_REG);
975 led &= priv->led_activity_off;
977 led = ipw_register_toggle(led);
979 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
980 ipw_write_reg32(priv, IPW_EVENT_REG, led);
982 IPW_DEBUG_LED("Activity LED Off\n");
984 priv->status &= ~STATUS_LED_ACT_ON;
987 spin_unlock_irqrestore(&priv->lock, flags);
990 static void ipw_bg_led_activity_off(struct work_struct *work)
992 struct ipw_priv *priv =
993 container_of(work, struct ipw_priv, led_act_off.work);
994 mutex_lock(&priv->mutex);
995 ipw_led_activity_off(priv);
996 mutex_unlock(&priv->mutex);
999 static void ipw_led_band_on(struct ipw_priv *priv)
1001 unsigned long flags;
1002 u32 led;
1004 /* Only nic type 1 supports mode LEDs */
1005 if (priv->config & CFG_NO_LED ||
1006 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1007 return;
1009 spin_lock_irqsave(&priv->lock, flags);
1011 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1012 if (priv->assoc_network->mode == IEEE_A) {
1013 led |= priv->led_ofdm_on;
1014 led &= priv->led_association_off;
1015 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1016 } else if (priv->assoc_network->mode == IEEE_G) {
1017 led |= priv->led_ofdm_on;
1018 led |= priv->led_association_on;
1019 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1020 } else {
1021 led &= priv->led_ofdm_off;
1022 led |= priv->led_association_on;
1023 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1026 led = ipw_register_toggle(led);
1028 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1029 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1031 spin_unlock_irqrestore(&priv->lock, flags);
1034 static void ipw_led_band_off(struct ipw_priv *priv)
1036 unsigned long flags;
1037 u32 led;
1039 /* Only nic type 1 supports mode LEDs */
1040 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1041 return;
1043 spin_lock_irqsave(&priv->lock, flags);
1045 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1046 led &= priv->led_ofdm_off;
1047 led &= priv->led_association_off;
1049 led = ipw_register_toggle(led);
1051 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1052 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1054 spin_unlock_irqrestore(&priv->lock, flags);
1057 static void ipw_led_radio_on(struct ipw_priv *priv)
1059 ipw_led_link_on(priv);
1062 static void ipw_led_radio_off(struct ipw_priv *priv)
1064 ipw_led_activity_off(priv);
1065 ipw_led_link_off(priv);
1068 static void ipw_led_link_up(struct ipw_priv *priv)
1070 /* Set the Link Led on for all nic types */
1071 ipw_led_link_on(priv);
1074 static void ipw_led_link_down(struct ipw_priv *priv)
1076 ipw_led_activity_off(priv);
1077 ipw_led_link_off(priv);
1079 if (priv->status & STATUS_RF_KILL_MASK)
1080 ipw_led_radio_off(priv);
1083 static void ipw_led_init(struct ipw_priv *priv)
1085 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1087 /* Set the default PINs for the link and activity leds */
1088 priv->led_activity_on = IPW_ACTIVITY_LED;
1089 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1091 priv->led_association_on = IPW_ASSOCIATED_LED;
1092 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1094 /* Set the default PINs for the OFDM leds */
1095 priv->led_ofdm_on = IPW_OFDM_LED;
1096 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1098 switch (priv->nic_type) {
1099 case EEPROM_NIC_TYPE_1:
1100 /* In this NIC type, the LEDs are reversed.... */
1101 priv->led_activity_on = IPW_ASSOCIATED_LED;
1102 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1103 priv->led_association_on = IPW_ACTIVITY_LED;
1104 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1106 if (!(priv->config & CFG_NO_LED))
1107 ipw_led_band_on(priv);
1109 /* And we don't blink link LEDs for this nic, so
1110 * just return here */
1111 return;
1113 case EEPROM_NIC_TYPE_3:
1114 case EEPROM_NIC_TYPE_2:
1115 case EEPROM_NIC_TYPE_4:
1116 case EEPROM_NIC_TYPE_0:
1117 break;
1119 default:
1120 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1121 priv->nic_type);
1122 priv->nic_type = EEPROM_NIC_TYPE_0;
1123 break;
1126 if (!(priv->config & CFG_NO_LED)) {
1127 if (priv->status & STATUS_ASSOCIATED)
1128 ipw_led_link_on(priv);
1129 else
1130 ipw_led_link_off(priv);
1134 static void ipw_led_shutdown(struct ipw_priv *priv)
1136 ipw_led_activity_off(priv);
1137 ipw_led_link_off(priv);
1138 ipw_led_band_off(priv);
1139 cancel_delayed_work(&priv->led_link_on);
1140 cancel_delayed_work(&priv->led_link_off);
1141 cancel_delayed_work(&priv->led_act_off);
1145 * The following adds a new attribute to the sysfs representation
1146 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1147 * used for controling the debug level.
1149 * See the level definitions in ipw for details.
1151 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1153 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1156 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1157 size_t count)
1159 char *p = (char *)buf;
1160 u32 val;
1162 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1163 p++;
1164 if (p[0] == 'x' || p[0] == 'X')
1165 p++;
1166 val = simple_strtoul(p, &p, 16);
1167 } else
1168 val = simple_strtoul(p, &p, 10);
1169 if (p == buf)
1170 printk(KERN_INFO DRV_NAME
1171 ": %s is not in hex or decimal form.\n", buf);
1172 else
1173 ipw_debug_level = val;
1175 return strnlen(buf, count);
1178 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1179 show_debug_level, store_debug_level);
1181 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1183 /* length = 1st dword in log */
1184 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1187 static void ipw_capture_event_log(struct ipw_priv *priv,
1188 u32 log_len, struct ipw_event *log)
1190 u32 base;
1192 if (log_len) {
1193 base = ipw_read32(priv, IPW_EVENT_LOG);
1194 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1195 (u8 *) log, sizeof(*log) * log_len);
1199 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1201 struct ipw_fw_error *error;
1202 u32 log_len = ipw_get_event_log_len(priv);
1203 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1204 u32 elem_len = ipw_read_reg32(priv, base);
1206 error = kmalloc(sizeof(*error) +
1207 sizeof(*error->elem) * elem_len +
1208 sizeof(*error->log) * log_len, GFP_ATOMIC);
1209 if (!error) {
1210 IPW_ERROR("Memory allocation for firmware error log "
1211 "failed.\n");
1212 return NULL;
1214 error->jiffies = jiffies;
1215 error->status = priv->status;
1216 error->config = priv->config;
1217 error->elem_len = elem_len;
1218 error->log_len = log_len;
1219 error->elem = (struct ipw_error_elem *)error->payload;
1220 error->log = (struct ipw_event *)(error->elem + elem_len);
1222 ipw_capture_event_log(priv, log_len, error->log);
1224 if (elem_len)
1225 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1226 sizeof(*error->elem) * elem_len);
1228 return error;
1231 static ssize_t show_event_log(struct device *d,
1232 struct device_attribute *attr, char *buf)
1234 struct ipw_priv *priv = dev_get_drvdata(d);
1235 u32 log_len = ipw_get_event_log_len(priv);
1236 struct ipw_event log[log_len];
1237 u32 len = 0, i;
1239 ipw_capture_event_log(priv, log_len, log);
1241 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1242 for (i = 0; i < log_len; i++)
1243 len += snprintf(buf + len, PAGE_SIZE - len,
1244 "\n%08X%08X%08X",
1245 log[i].time, log[i].event, log[i].data);
1246 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1247 return len;
1250 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1252 static ssize_t show_error(struct device *d,
1253 struct device_attribute *attr, char *buf)
1255 struct ipw_priv *priv = dev_get_drvdata(d);
1256 u32 len = 0, i;
1257 if (!priv->error)
1258 return 0;
1259 len += snprintf(buf + len, PAGE_SIZE - len,
1260 "%08lX%08X%08X%08X",
1261 priv->error->jiffies,
1262 priv->error->status,
1263 priv->error->config, priv->error->elem_len);
1264 for (i = 0; i < priv->error->elem_len; i++)
1265 len += snprintf(buf + len, PAGE_SIZE - len,
1266 "\n%08X%08X%08X%08X%08X%08X%08X",
1267 priv->error->elem[i].time,
1268 priv->error->elem[i].desc,
1269 priv->error->elem[i].blink1,
1270 priv->error->elem[i].blink2,
1271 priv->error->elem[i].link1,
1272 priv->error->elem[i].link2,
1273 priv->error->elem[i].data);
1275 len += snprintf(buf + len, PAGE_SIZE - len,
1276 "\n%08X", priv->error->log_len);
1277 for (i = 0; i < priv->error->log_len; i++)
1278 len += snprintf(buf + len, PAGE_SIZE - len,
1279 "\n%08X%08X%08X",
1280 priv->error->log[i].time,
1281 priv->error->log[i].event,
1282 priv->error->log[i].data);
1283 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1284 return len;
1287 static ssize_t clear_error(struct device *d,
1288 struct device_attribute *attr,
1289 const char *buf, size_t count)
1291 struct ipw_priv *priv = dev_get_drvdata(d);
1293 kfree(priv->error);
1294 priv->error = NULL;
1295 return count;
1298 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1300 static ssize_t show_cmd_log(struct device *d,
1301 struct device_attribute *attr, char *buf)
1303 struct ipw_priv *priv = dev_get_drvdata(d);
1304 u32 len = 0, i;
1305 if (!priv->cmdlog)
1306 return 0;
1307 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1308 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1309 i = (i + 1) % priv->cmdlog_len) {
1310 len +=
1311 snprintf(buf + len, PAGE_SIZE - len,
1312 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1313 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1314 priv->cmdlog[i].cmd.len);
1315 len +=
1316 snprintk_buf(buf + len, PAGE_SIZE - len,
1317 (u8 *) priv->cmdlog[i].cmd.param,
1318 priv->cmdlog[i].cmd.len);
1319 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1321 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1322 return len;
1325 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1327 #ifdef CONFIG_IPW2200_PROMISCUOUS
1328 static void ipw_prom_free(struct ipw_priv *priv);
1329 static int ipw_prom_alloc(struct ipw_priv *priv);
1330 static ssize_t store_rtap_iface(struct device *d,
1331 struct device_attribute *attr,
1332 const char *buf, size_t count)
1334 struct ipw_priv *priv = dev_get_drvdata(d);
1335 int rc = 0;
1337 if (count < 1)
1338 return -EINVAL;
1340 switch (buf[0]) {
1341 case '0':
1342 if (!rtap_iface)
1343 return count;
1345 if (netif_running(priv->prom_net_dev)) {
1346 IPW_WARNING("Interface is up. Cannot unregister.\n");
1347 return count;
1350 ipw_prom_free(priv);
1351 rtap_iface = 0;
1352 break;
1354 case '1':
1355 if (rtap_iface)
1356 return count;
1358 rc = ipw_prom_alloc(priv);
1359 if (!rc)
1360 rtap_iface = 1;
1361 break;
1363 default:
1364 return -EINVAL;
1367 if (rc) {
1368 IPW_ERROR("Failed to register promiscuous network "
1369 "device (error %d).\n", rc);
1372 return count;
1375 static ssize_t show_rtap_iface(struct device *d,
1376 struct device_attribute *attr,
1377 char *buf)
1379 struct ipw_priv *priv = dev_get_drvdata(d);
1380 if (rtap_iface)
1381 return sprintf(buf, "%s", priv->prom_net_dev->name);
1382 else {
1383 buf[0] = '-';
1384 buf[1] = '1';
1385 buf[2] = '\0';
1386 return 3;
1390 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1391 store_rtap_iface);
1393 static ssize_t store_rtap_filter(struct device *d,
1394 struct device_attribute *attr,
1395 const char *buf, size_t count)
1397 struct ipw_priv *priv = dev_get_drvdata(d);
1399 if (!priv->prom_priv) {
1400 IPW_ERROR("Attempting to set filter without "
1401 "rtap_iface enabled.\n");
1402 return -EPERM;
1405 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1407 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1408 BIT_ARG16(priv->prom_priv->filter));
1410 return count;
1413 static ssize_t show_rtap_filter(struct device *d,
1414 struct device_attribute *attr,
1415 char *buf)
1417 struct ipw_priv *priv = dev_get_drvdata(d);
1418 return sprintf(buf, "0x%04X",
1419 priv->prom_priv ? priv->prom_priv->filter : 0);
1422 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1423 store_rtap_filter);
1424 #endif
1426 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1427 char *buf)
1429 struct ipw_priv *priv = dev_get_drvdata(d);
1430 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1433 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1434 const char *buf, size_t count)
1436 struct ipw_priv *priv = dev_get_drvdata(d);
1437 struct net_device *dev = priv->net_dev;
1438 char buffer[] = "00000000";
1439 unsigned long len =
1440 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1441 unsigned long val;
1442 char *p = buffer;
1444 IPW_DEBUG_INFO("enter\n");
1446 strncpy(buffer, buf, len);
1447 buffer[len] = 0;
1449 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1450 p++;
1451 if (p[0] == 'x' || p[0] == 'X')
1452 p++;
1453 val = simple_strtoul(p, &p, 16);
1454 } else
1455 val = simple_strtoul(p, &p, 10);
1456 if (p == buffer) {
1457 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1458 } else {
1459 priv->ieee->scan_age = val;
1460 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1463 IPW_DEBUG_INFO("exit\n");
1464 return len;
1467 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1469 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1470 char *buf)
1472 struct ipw_priv *priv = dev_get_drvdata(d);
1473 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1476 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1477 const char *buf, size_t count)
1479 struct ipw_priv *priv = dev_get_drvdata(d);
1481 IPW_DEBUG_INFO("enter\n");
1483 if (count == 0)
1484 return 0;
1486 if (*buf == 0) {
1487 IPW_DEBUG_LED("Disabling LED control.\n");
1488 priv->config |= CFG_NO_LED;
1489 ipw_led_shutdown(priv);
1490 } else {
1491 IPW_DEBUG_LED("Enabling LED control.\n");
1492 priv->config &= ~CFG_NO_LED;
1493 ipw_led_init(priv);
1496 IPW_DEBUG_INFO("exit\n");
1497 return count;
1500 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1502 static ssize_t show_status(struct device *d,
1503 struct device_attribute *attr, char *buf)
1505 struct ipw_priv *p = d->driver_data;
1506 return sprintf(buf, "0x%08x\n", (int)p->status);
1509 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1511 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1512 char *buf)
1514 struct ipw_priv *p = d->driver_data;
1515 return sprintf(buf, "0x%08x\n", (int)p->config);
1518 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1520 static ssize_t show_nic_type(struct device *d,
1521 struct device_attribute *attr, char *buf)
1523 struct ipw_priv *priv = d->driver_data;
1524 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1527 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1529 static ssize_t show_ucode_version(struct device *d,
1530 struct device_attribute *attr, char *buf)
1532 u32 len = sizeof(u32), tmp = 0;
1533 struct ipw_priv *p = d->driver_data;
1535 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1536 return 0;
1538 return sprintf(buf, "0x%08x\n", tmp);
1541 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1543 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1544 char *buf)
1546 u32 len = sizeof(u32), tmp = 0;
1547 struct ipw_priv *p = d->driver_data;
1549 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1550 return 0;
1552 return sprintf(buf, "0x%08x\n", tmp);
1555 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1558 * Add a device attribute to view/control the delay between eeprom
1559 * operations.
1561 static ssize_t show_eeprom_delay(struct device *d,
1562 struct device_attribute *attr, char *buf)
1564 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1565 return sprintf(buf, "%i\n", n);
1567 static ssize_t store_eeprom_delay(struct device *d,
1568 struct device_attribute *attr,
1569 const char *buf, size_t count)
1571 struct ipw_priv *p = d->driver_data;
1572 sscanf(buf, "%i", &p->eeprom_delay);
1573 return strnlen(buf, count);
1576 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1577 show_eeprom_delay, store_eeprom_delay);
1579 static ssize_t show_command_event_reg(struct device *d,
1580 struct device_attribute *attr, char *buf)
1582 u32 reg = 0;
1583 struct ipw_priv *p = d->driver_data;
1585 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1586 return sprintf(buf, "0x%08x\n", reg);
1588 static ssize_t store_command_event_reg(struct device *d,
1589 struct device_attribute *attr,
1590 const char *buf, size_t count)
1592 u32 reg;
1593 struct ipw_priv *p = d->driver_data;
1595 sscanf(buf, "%x", &reg);
1596 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1597 return strnlen(buf, count);
1600 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1601 show_command_event_reg, store_command_event_reg);
1603 static ssize_t show_mem_gpio_reg(struct device *d,
1604 struct device_attribute *attr, char *buf)
1606 u32 reg = 0;
1607 struct ipw_priv *p = d->driver_data;
1609 reg = ipw_read_reg32(p, 0x301100);
1610 return sprintf(buf, "0x%08x\n", reg);
1612 static ssize_t store_mem_gpio_reg(struct device *d,
1613 struct device_attribute *attr,
1614 const char *buf, size_t count)
1616 u32 reg;
1617 struct ipw_priv *p = d->driver_data;
1619 sscanf(buf, "%x", &reg);
1620 ipw_write_reg32(p, 0x301100, reg);
1621 return strnlen(buf, count);
1624 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1625 show_mem_gpio_reg, store_mem_gpio_reg);
1627 static ssize_t show_indirect_dword(struct device *d,
1628 struct device_attribute *attr, char *buf)
1630 u32 reg = 0;
1631 struct ipw_priv *priv = d->driver_data;
1633 if (priv->status & STATUS_INDIRECT_DWORD)
1634 reg = ipw_read_reg32(priv, priv->indirect_dword);
1635 else
1636 reg = 0;
1638 return sprintf(buf, "0x%08x\n", reg);
1640 static ssize_t store_indirect_dword(struct device *d,
1641 struct device_attribute *attr,
1642 const char *buf, size_t count)
1644 struct ipw_priv *priv = d->driver_data;
1646 sscanf(buf, "%x", &priv->indirect_dword);
1647 priv->status |= STATUS_INDIRECT_DWORD;
1648 return strnlen(buf, count);
1651 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1652 show_indirect_dword, store_indirect_dword);
1654 static ssize_t show_indirect_byte(struct device *d,
1655 struct device_attribute *attr, char *buf)
1657 u8 reg = 0;
1658 struct ipw_priv *priv = d->driver_data;
1660 if (priv->status & STATUS_INDIRECT_BYTE)
1661 reg = ipw_read_reg8(priv, priv->indirect_byte);
1662 else
1663 reg = 0;
1665 return sprintf(buf, "0x%02x\n", reg);
1667 static ssize_t store_indirect_byte(struct device *d,
1668 struct device_attribute *attr,
1669 const char *buf, size_t count)
1671 struct ipw_priv *priv = d->driver_data;
1673 sscanf(buf, "%x", &priv->indirect_byte);
1674 priv->status |= STATUS_INDIRECT_BYTE;
1675 return strnlen(buf, count);
1678 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1679 show_indirect_byte, store_indirect_byte);
1681 static ssize_t show_direct_dword(struct device *d,
1682 struct device_attribute *attr, char *buf)
1684 u32 reg = 0;
1685 struct ipw_priv *priv = d->driver_data;
1687 if (priv->status & STATUS_DIRECT_DWORD)
1688 reg = ipw_read32(priv, priv->direct_dword);
1689 else
1690 reg = 0;
1692 return sprintf(buf, "0x%08x\n", reg);
1694 static ssize_t store_direct_dword(struct device *d,
1695 struct device_attribute *attr,
1696 const char *buf, size_t count)
1698 struct ipw_priv *priv = d->driver_data;
1700 sscanf(buf, "%x", &priv->direct_dword);
1701 priv->status |= STATUS_DIRECT_DWORD;
1702 return strnlen(buf, count);
1705 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1706 show_direct_dword, store_direct_dword);
1708 static int rf_kill_active(struct ipw_priv *priv)
1710 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1711 priv->status |= STATUS_RF_KILL_HW;
1712 else
1713 priv->status &= ~STATUS_RF_KILL_HW;
1715 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1718 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1719 char *buf)
1721 /* 0 - RF kill not enabled
1722 1 - SW based RF kill active (sysfs)
1723 2 - HW based RF kill active
1724 3 - Both HW and SW baed RF kill active */
1725 struct ipw_priv *priv = d->driver_data;
1726 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1727 (rf_kill_active(priv) ? 0x2 : 0x0);
1728 return sprintf(buf, "%i\n", val);
1731 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1733 if ((disable_radio ? 1 : 0) ==
1734 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1735 return 0;
1737 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1738 disable_radio ? "OFF" : "ON");
1740 if (disable_radio) {
1741 priv->status |= STATUS_RF_KILL_SW;
1743 if (priv->workqueue) {
1744 cancel_delayed_work(&priv->request_scan);
1745 cancel_delayed_work(&priv->scan_event);
1747 queue_work(priv->workqueue, &priv->down);
1748 } else {
1749 priv->status &= ~STATUS_RF_KILL_SW;
1750 if (rf_kill_active(priv)) {
1751 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1752 "disabled by HW switch\n");
1753 /* Make sure the RF_KILL check timer is running */
1754 cancel_delayed_work(&priv->rf_kill);
1755 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1756 round_jiffies(2 * HZ));
1757 } else
1758 queue_work(priv->workqueue, &priv->up);
1761 return 1;
1764 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1765 const char *buf, size_t count)
1767 struct ipw_priv *priv = d->driver_data;
1769 ipw_radio_kill_sw(priv, buf[0] == '1');
1771 return count;
1774 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1776 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1777 char *buf)
1779 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1780 int pos = 0, len = 0;
1781 if (priv->config & CFG_SPEED_SCAN) {
1782 while (priv->speed_scan[pos] != 0)
1783 len += sprintf(&buf[len], "%d ",
1784 priv->speed_scan[pos++]);
1785 return len + sprintf(&buf[len], "\n");
1788 return sprintf(buf, "0\n");
1791 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1792 const char *buf, size_t count)
1794 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1795 int channel, pos = 0;
1796 const char *p = buf;
1798 /* list of space separated channels to scan, optionally ending with 0 */
1799 while ((channel = simple_strtol(p, NULL, 0))) {
1800 if (pos == MAX_SPEED_SCAN - 1) {
1801 priv->speed_scan[pos] = 0;
1802 break;
1805 if (ieee80211_is_valid_channel(priv->ieee, channel))
1806 priv->speed_scan[pos++] = channel;
1807 else
1808 IPW_WARNING("Skipping invalid channel request: %d\n",
1809 channel);
1810 p = strchr(p, ' ');
1811 if (!p)
1812 break;
1813 while (*p == ' ' || *p == '\t')
1814 p++;
1817 if (pos == 0)
1818 priv->config &= ~CFG_SPEED_SCAN;
1819 else {
1820 priv->speed_scan_pos = 0;
1821 priv->config |= CFG_SPEED_SCAN;
1824 return count;
1827 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1828 store_speed_scan);
1830 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1831 char *buf)
1833 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1834 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1837 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1838 const char *buf, size_t count)
1840 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1841 if (buf[0] == '1')
1842 priv->config |= CFG_NET_STATS;
1843 else
1844 priv->config &= ~CFG_NET_STATS;
1846 return count;
1849 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1850 show_net_stats, store_net_stats);
1852 static ssize_t show_channels(struct device *d,
1853 struct device_attribute *attr,
1854 char *buf)
1856 struct ipw_priv *priv = dev_get_drvdata(d);
1857 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
1858 int len = 0, i;
1860 len = sprintf(&buf[len],
1861 "Displaying %d channels in 2.4Ghz band "
1862 "(802.11bg):\n", geo->bg_channels);
1864 for (i = 0; i < geo->bg_channels; i++) {
1865 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1866 geo->bg[i].channel,
1867 geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT ?
1868 " (radar spectrum)" : "",
1869 ((geo->bg[i].flags & IEEE80211_CH_NO_IBSS) ||
1870 (geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT))
1871 ? "" : ", IBSS",
1872 geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1873 "passive only" : "active/passive",
1874 geo->bg[i].flags & IEEE80211_CH_B_ONLY ?
1875 "B" : "B/G");
1878 len += sprintf(&buf[len],
1879 "Displaying %d channels in 5.2Ghz band "
1880 "(802.11a):\n", geo->a_channels);
1881 for (i = 0; i < geo->a_channels; i++) {
1882 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1883 geo->a[i].channel,
1884 geo->a[i].flags & IEEE80211_CH_RADAR_DETECT ?
1885 " (radar spectrum)" : "",
1886 ((geo->a[i].flags & IEEE80211_CH_NO_IBSS) ||
1887 (geo->a[i].flags & IEEE80211_CH_RADAR_DETECT))
1888 ? "" : ", IBSS",
1889 geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1890 "passive only" : "active/passive");
1893 return len;
1896 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1898 static void notify_wx_assoc_event(struct ipw_priv *priv)
1900 union iwreq_data wrqu;
1901 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1902 if (priv->status & STATUS_ASSOCIATED)
1903 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1904 else
1905 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1906 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1909 static void ipw_irq_tasklet(struct ipw_priv *priv)
1911 u32 inta, inta_mask, handled = 0;
1912 unsigned long flags;
1913 int rc = 0;
1915 spin_lock_irqsave(&priv->irq_lock, flags);
1917 inta = ipw_read32(priv, IPW_INTA_RW);
1918 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1919 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1921 /* Add any cached INTA values that need to be handled */
1922 inta |= priv->isr_inta;
1924 spin_unlock_irqrestore(&priv->irq_lock, flags);
1926 spin_lock_irqsave(&priv->lock, flags);
1928 /* handle all the justifications for the interrupt */
1929 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1930 ipw_rx(priv);
1931 handled |= IPW_INTA_BIT_RX_TRANSFER;
1934 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1935 IPW_DEBUG_HC("Command completed.\n");
1936 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1937 priv->status &= ~STATUS_HCMD_ACTIVE;
1938 wake_up_interruptible(&priv->wait_command_queue);
1939 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1942 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1943 IPW_DEBUG_TX("TX_QUEUE_1\n");
1944 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1945 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1948 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1949 IPW_DEBUG_TX("TX_QUEUE_2\n");
1950 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1951 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1954 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1955 IPW_DEBUG_TX("TX_QUEUE_3\n");
1956 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1957 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1960 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1961 IPW_DEBUG_TX("TX_QUEUE_4\n");
1962 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1963 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1966 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1967 IPW_WARNING("STATUS_CHANGE\n");
1968 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1971 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1972 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1973 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1976 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1977 IPW_WARNING("HOST_CMD_DONE\n");
1978 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1981 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1982 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1983 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1986 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1987 IPW_WARNING("PHY_OFF_DONE\n");
1988 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1991 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1992 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1993 priv->status |= STATUS_RF_KILL_HW;
1994 wake_up_interruptible(&priv->wait_command_queue);
1995 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1996 cancel_delayed_work(&priv->request_scan);
1997 cancel_delayed_work(&priv->scan_event);
1998 schedule_work(&priv->link_down);
1999 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
2000 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2003 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2004 IPW_WARNING("Firmware error detected. Restarting.\n");
2005 if (priv->error) {
2006 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2007 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2008 struct ipw_fw_error *error =
2009 ipw_alloc_error_log(priv);
2010 ipw_dump_error_log(priv, error);
2011 kfree(error);
2013 } else {
2014 priv->error = ipw_alloc_error_log(priv);
2015 if (priv->error)
2016 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2017 else
2018 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2019 "log.\n");
2020 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2021 ipw_dump_error_log(priv, priv->error);
2024 /* XXX: If hardware encryption is for WPA/WPA2,
2025 * we have to notify the supplicant. */
2026 if (priv->ieee->sec.encrypt) {
2027 priv->status &= ~STATUS_ASSOCIATED;
2028 notify_wx_assoc_event(priv);
2031 /* Keep the restart process from trying to send host
2032 * commands by clearing the INIT status bit */
2033 priv->status &= ~STATUS_INIT;
2035 /* Cancel currently queued command. */
2036 priv->status &= ~STATUS_HCMD_ACTIVE;
2037 wake_up_interruptible(&priv->wait_command_queue);
2039 queue_work(priv->workqueue, &priv->adapter_restart);
2040 handled |= IPW_INTA_BIT_FATAL_ERROR;
2043 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2044 IPW_ERROR("Parity error\n");
2045 handled |= IPW_INTA_BIT_PARITY_ERROR;
2048 if (handled != inta) {
2049 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2052 spin_unlock_irqrestore(&priv->lock, flags);
2054 /* enable all interrupts */
2055 ipw_enable_interrupts(priv);
2058 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2059 static char *get_cmd_string(u8 cmd)
2061 switch (cmd) {
2062 IPW_CMD(HOST_COMPLETE);
2063 IPW_CMD(POWER_DOWN);
2064 IPW_CMD(SYSTEM_CONFIG);
2065 IPW_CMD(MULTICAST_ADDRESS);
2066 IPW_CMD(SSID);
2067 IPW_CMD(ADAPTER_ADDRESS);
2068 IPW_CMD(PORT_TYPE);
2069 IPW_CMD(RTS_THRESHOLD);
2070 IPW_CMD(FRAG_THRESHOLD);
2071 IPW_CMD(POWER_MODE);
2072 IPW_CMD(WEP_KEY);
2073 IPW_CMD(TGI_TX_KEY);
2074 IPW_CMD(SCAN_REQUEST);
2075 IPW_CMD(SCAN_REQUEST_EXT);
2076 IPW_CMD(ASSOCIATE);
2077 IPW_CMD(SUPPORTED_RATES);
2078 IPW_CMD(SCAN_ABORT);
2079 IPW_CMD(TX_FLUSH);
2080 IPW_CMD(QOS_PARAMETERS);
2081 IPW_CMD(DINO_CONFIG);
2082 IPW_CMD(RSN_CAPABILITIES);
2083 IPW_CMD(RX_KEY);
2084 IPW_CMD(CARD_DISABLE);
2085 IPW_CMD(SEED_NUMBER);
2086 IPW_CMD(TX_POWER);
2087 IPW_CMD(COUNTRY_INFO);
2088 IPW_CMD(AIRONET_INFO);
2089 IPW_CMD(AP_TX_POWER);
2090 IPW_CMD(CCKM_INFO);
2091 IPW_CMD(CCX_VER_INFO);
2092 IPW_CMD(SET_CALIBRATION);
2093 IPW_CMD(SENSITIVITY_CALIB);
2094 IPW_CMD(RETRY_LIMIT);
2095 IPW_CMD(IPW_PRE_POWER_DOWN);
2096 IPW_CMD(VAP_BEACON_TEMPLATE);
2097 IPW_CMD(VAP_DTIM_PERIOD);
2098 IPW_CMD(EXT_SUPPORTED_RATES);
2099 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2100 IPW_CMD(VAP_QUIET_INTERVALS);
2101 IPW_CMD(VAP_CHANNEL_SWITCH);
2102 IPW_CMD(VAP_MANDATORY_CHANNELS);
2103 IPW_CMD(VAP_CELL_PWR_LIMIT);
2104 IPW_CMD(VAP_CF_PARAM_SET);
2105 IPW_CMD(VAP_SET_BEACONING_STATE);
2106 IPW_CMD(MEASUREMENT);
2107 IPW_CMD(POWER_CAPABILITY);
2108 IPW_CMD(SUPPORTED_CHANNELS);
2109 IPW_CMD(TPC_REPORT);
2110 IPW_CMD(WME_INFO);
2111 IPW_CMD(PRODUCTION_COMMAND);
2112 default:
2113 return "UNKNOWN";
2117 #define HOST_COMPLETE_TIMEOUT HZ
2119 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2121 int rc = 0;
2122 unsigned long flags;
2124 spin_lock_irqsave(&priv->lock, flags);
2125 if (priv->status & STATUS_HCMD_ACTIVE) {
2126 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2127 get_cmd_string(cmd->cmd));
2128 spin_unlock_irqrestore(&priv->lock, flags);
2129 return -EAGAIN;
2132 priv->status |= STATUS_HCMD_ACTIVE;
2134 if (priv->cmdlog) {
2135 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2136 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2137 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2138 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2139 cmd->len);
2140 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2143 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2144 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2145 priv->status);
2147 #ifndef DEBUG_CMD_WEP_KEY
2148 if (cmd->cmd == IPW_CMD_WEP_KEY)
2149 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2150 else
2151 #endif
2152 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2154 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2155 if (rc) {
2156 priv->status &= ~STATUS_HCMD_ACTIVE;
2157 IPW_ERROR("Failed to send %s: Reason %d\n",
2158 get_cmd_string(cmd->cmd), rc);
2159 spin_unlock_irqrestore(&priv->lock, flags);
2160 goto exit;
2162 spin_unlock_irqrestore(&priv->lock, flags);
2164 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2165 !(priv->
2166 status & STATUS_HCMD_ACTIVE),
2167 HOST_COMPLETE_TIMEOUT);
2168 if (rc == 0) {
2169 spin_lock_irqsave(&priv->lock, flags);
2170 if (priv->status & STATUS_HCMD_ACTIVE) {
2171 IPW_ERROR("Failed to send %s: Command timed out.\n",
2172 get_cmd_string(cmd->cmd));
2173 priv->status &= ~STATUS_HCMD_ACTIVE;
2174 spin_unlock_irqrestore(&priv->lock, flags);
2175 rc = -EIO;
2176 goto exit;
2178 spin_unlock_irqrestore(&priv->lock, flags);
2179 } else
2180 rc = 0;
2182 if (priv->status & STATUS_RF_KILL_HW) {
2183 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2184 get_cmd_string(cmd->cmd));
2185 rc = -EIO;
2186 goto exit;
2189 exit:
2190 if (priv->cmdlog) {
2191 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2192 priv->cmdlog_pos %= priv->cmdlog_len;
2194 return rc;
2197 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2199 struct host_cmd cmd = {
2200 .cmd = command,
2203 return __ipw_send_cmd(priv, &cmd);
2206 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2207 void *data)
2209 struct host_cmd cmd = {
2210 .cmd = command,
2211 .len = len,
2212 .param = data,
2215 return __ipw_send_cmd(priv, &cmd);
2218 static int ipw_send_host_complete(struct ipw_priv *priv)
2220 if (!priv) {
2221 IPW_ERROR("Invalid args\n");
2222 return -1;
2225 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2228 static int ipw_send_system_config(struct ipw_priv *priv)
2230 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2231 sizeof(priv->sys_config),
2232 &priv->sys_config);
2235 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2237 if (!priv || !ssid) {
2238 IPW_ERROR("Invalid args\n");
2239 return -1;
2242 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2243 ssid);
2246 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2248 if (!priv || !mac) {
2249 IPW_ERROR("Invalid args\n");
2250 return -1;
2253 IPW_DEBUG_INFO("%s: Setting MAC to %s\n",
2254 priv->net_dev->name, print_mac(mac, mac));
2256 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2260 * NOTE: This must be executed from our workqueue as it results in udelay
2261 * being called which may corrupt the keyboard if executed on default
2262 * workqueue
2264 static void ipw_adapter_restart(void *adapter)
2266 struct ipw_priv *priv = adapter;
2268 if (priv->status & STATUS_RF_KILL_MASK)
2269 return;
2271 ipw_down(priv);
2273 if (priv->assoc_network &&
2274 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2275 ipw_remove_current_network(priv);
2277 if (ipw_up(priv)) {
2278 IPW_ERROR("Failed to up device\n");
2279 return;
2283 static void ipw_bg_adapter_restart(struct work_struct *work)
2285 struct ipw_priv *priv =
2286 container_of(work, struct ipw_priv, adapter_restart);
2287 mutex_lock(&priv->mutex);
2288 ipw_adapter_restart(priv);
2289 mutex_unlock(&priv->mutex);
2292 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2294 static void ipw_scan_check(void *data)
2296 struct ipw_priv *priv = data;
2297 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2298 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2299 "adapter after (%dms).\n",
2300 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2301 queue_work(priv->workqueue, &priv->adapter_restart);
2305 static void ipw_bg_scan_check(struct work_struct *work)
2307 struct ipw_priv *priv =
2308 container_of(work, struct ipw_priv, scan_check.work);
2309 mutex_lock(&priv->mutex);
2310 ipw_scan_check(priv);
2311 mutex_unlock(&priv->mutex);
2314 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2315 struct ipw_scan_request_ext *request)
2317 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2318 sizeof(*request), request);
2321 static int ipw_send_scan_abort(struct ipw_priv *priv)
2323 if (!priv) {
2324 IPW_ERROR("Invalid args\n");
2325 return -1;
2328 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2331 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2333 struct ipw_sensitivity_calib calib = {
2334 .beacon_rssi_raw = cpu_to_le16(sens),
2337 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2338 &calib);
2341 static int ipw_send_associate(struct ipw_priv *priv,
2342 struct ipw_associate *associate)
2344 struct ipw_associate tmp_associate;
2346 if (!priv || !associate) {
2347 IPW_ERROR("Invalid args\n");
2348 return -1;
2351 memcpy(&tmp_associate, associate, sizeof(*associate));
2352 tmp_associate.policy_support =
2353 cpu_to_le16(tmp_associate.policy_support);
2354 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2355 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2356 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2357 tmp_associate.listen_interval =
2358 cpu_to_le16(tmp_associate.listen_interval);
2359 tmp_associate.beacon_interval =
2360 cpu_to_le16(tmp_associate.beacon_interval);
2361 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2363 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2364 &tmp_associate);
2367 static int ipw_send_supported_rates(struct ipw_priv *priv,
2368 struct ipw_supported_rates *rates)
2370 if (!priv || !rates) {
2371 IPW_ERROR("Invalid args\n");
2372 return -1;
2375 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2376 rates);
2379 static int ipw_set_random_seed(struct ipw_priv *priv)
2381 u32 val;
2383 if (!priv) {
2384 IPW_ERROR("Invalid args\n");
2385 return -1;
2388 get_random_bytes(&val, sizeof(val));
2390 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2393 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2395 if (!priv) {
2396 IPW_ERROR("Invalid args\n");
2397 return -1;
2400 phy_off = cpu_to_le32(phy_off);
2401 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2402 &phy_off);
2405 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2407 if (!priv || !power) {
2408 IPW_ERROR("Invalid args\n");
2409 return -1;
2412 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2415 static int ipw_set_tx_power(struct ipw_priv *priv)
2417 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2418 struct ipw_tx_power tx_power;
2419 s8 max_power;
2420 int i;
2422 memset(&tx_power, 0, sizeof(tx_power));
2424 /* configure device for 'G' band */
2425 tx_power.ieee_mode = IPW_G_MODE;
2426 tx_power.num_channels = geo->bg_channels;
2427 for (i = 0; i < geo->bg_channels; i++) {
2428 max_power = geo->bg[i].max_power;
2429 tx_power.channels_tx_power[i].channel_number =
2430 geo->bg[i].channel;
2431 tx_power.channels_tx_power[i].tx_power = max_power ?
2432 min(max_power, priv->tx_power) : priv->tx_power;
2434 if (ipw_send_tx_power(priv, &tx_power))
2435 return -EIO;
2437 /* configure device to also handle 'B' band */
2438 tx_power.ieee_mode = IPW_B_MODE;
2439 if (ipw_send_tx_power(priv, &tx_power))
2440 return -EIO;
2442 /* configure device to also handle 'A' band */
2443 if (priv->ieee->abg_true) {
2444 tx_power.ieee_mode = IPW_A_MODE;
2445 tx_power.num_channels = geo->a_channels;
2446 for (i = 0; i < tx_power.num_channels; i++) {
2447 max_power = geo->a[i].max_power;
2448 tx_power.channels_tx_power[i].channel_number =
2449 geo->a[i].channel;
2450 tx_power.channels_tx_power[i].tx_power = max_power ?
2451 min(max_power, priv->tx_power) : priv->tx_power;
2453 if (ipw_send_tx_power(priv, &tx_power))
2454 return -EIO;
2456 return 0;
2459 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2461 struct ipw_rts_threshold rts_threshold = {
2462 .rts_threshold = cpu_to_le16(rts),
2465 if (!priv) {
2466 IPW_ERROR("Invalid args\n");
2467 return -1;
2470 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2471 sizeof(rts_threshold), &rts_threshold);
2474 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2476 struct ipw_frag_threshold frag_threshold = {
2477 .frag_threshold = cpu_to_le16(frag),
2480 if (!priv) {
2481 IPW_ERROR("Invalid args\n");
2482 return -1;
2485 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2486 sizeof(frag_threshold), &frag_threshold);
2489 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2491 u32 param;
2493 if (!priv) {
2494 IPW_ERROR("Invalid args\n");
2495 return -1;
2498 /* If on battery, set to 3, if AC set to CAM, else user
2499 * level */
2500 switch (mode) {
2501 case IPW_POWER_BATTERY:
2502 param = IPW_POWER_INDEX_3;
2503 break;
2504 case IPW_POWER_AC:
2505 param = IPW_POWER_MODE_CAM;
2506 break;
2507 default:
2508 param = mode;
2509 break;
2512 param = cpu_to_le32(param);
2513 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2514 &param);
2517 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2519 struct ipw_retry_limit retry_limit = {
2520 .short_retry_limit = slimit,
2521 .long_retry_limit = llimit
2524 if (!priv) {
2525 IPW_ERROR("Invalid args\n");
2526 return -1;
2529 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2530 &retry_limit);
2534 * The IPW device contains a Microwire compatible EEPROM that stores
2535 * various data like the MAC address. Usually the firmware has exclusive
2536 * access to the eeprom, but during device initialization (before the
2537 * device driver has sent the HostComplete command to the firmware) the
2538 * device driver has read access to the EEPROM by way of indirect addressing
2539 * through a couple of memory mapped registers.
2541 * The following is a simplified implementation for pulling data out of the
2542 * the eeprom, along with some helper functions to find information in
2543 * the per device private data's copy of the eeprom.
2545 * NOTE: To better understand how these functions work (i.e what is a chip
2546 * select and why do have to keep driving the eeprom clock?), read
2547 * just about any data sheet for a Microwire compatible EEPROM.
2550 /* write a 32 bit value into the indirect accessor register */
2551 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2553 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2555 /* the eeprom requires some time to complete the operation */
2556 udelay(p->eeprom_delay);
2558 return;
2561 /* perform a chip select operation */
2562 static void eeprom_cs(struct ipw_priv *priv)
2564 eeprom_write_reg(priv, 0);
2565 eeprom_write_reg(priv, EEPROM_BIT_CS);
2566 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2567 eeprom_write_reg(priv, EEPROM_BIT_CS);
2570 /* perform a chip select operation */
2571 static void eeprom_disable_cs(struct ipw_priv *priv)
2573 eeprom_write_reg(priv, EEPROM_BIT_CS);
2574 eeprom_write_reg(priv, 0);
2575 eeprom_write_reg(priv, EEPROM_BIT_SK);
2578 /* push a single bit down to the eeprom */
2579 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2581 int d = (bit ? EEPROM_BIT_DI : 0);
2582 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2583 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2586 /* push an opcode followed by an address down to the eeprom */
2587 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2589 int i;
2591 eeprom_cs(priv);
2592 eeprom_write_bit(priv, 1);
2593 eeprom_write_bit(priv, op & 2);
2594 eeprom_write_bit(priv, op & 1);
2595 for (i = 7; i >= 0; i--) {
2596 eeprom_write_bit(priv, addr & (1 << i));
2600 /* pull 16 bits off the eeprom, one bit at a time */
2601 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2603 int i;
2604 u16 r = 0;
2606 /* Send READ Opcode */
2607 eeprom_op(priv, EEPROM_CMD_READ, addr);
2609 /* Send dummy bit */
2610 eeprom_write_reg(priv, EEPROM_BIT_CS);
2612 /* Read the byte off the eeprom one bit at a time */
2613 for (i = 0; i < 16; i++) {
2614 u32 data = 0;
2615 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2616 eeprom_write_reg(priv, EEPROM_BIT_CS);
2617 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2618 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2621 /* Send another dummy bit */
2622 eeprom_write_reg(priv, 0);
2623 eeprom_disable_cs(priv);
2625 return r;
2628 /* helper function for pulling the mac address out of the private */
2629 /* data's copy of the eeprom data */
2630 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2632 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2636 * Either the device driver (i.e. the host) or the firmware can
2637 * load eeprom data into the designated region in SRAM. If neither
2638 * happens then the FW will shutdown with a fatal error.
2640 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2641 * bit needs region of shared SRAM needs to be non-zero.
2643 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2645 int i;
2646 u16 *eeprom = (u16 *) priv->eeprom;
2648 IPW_DEBUG_TRACE(">>\n");
2650 /* read entire contents of eeprom into private buffer */
2651 for (i = 0; i < 128; i++)
2652 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2655 If the data looks correct, then copy it to our private
2656 copy. Otherwise let the firmware know to perform the operation
2657 on its own.
2659 if (priv->eeprom[EEPROM_VERSION] != 0) {
2660 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2662 /* write the eeprom data to sram */
2663 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2664 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2666 /* Do not load eeprom data on fatal error or suspend */
2667 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2668 } else {
2669 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2671 /* Load eeprom data on fatal error or suspend */
2672 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2675 IPW_DEBUG_TRACE("<<\n");
2678 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2680 count >>= 2;
2681 if (!count)
2682 return;
2683 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2684 while (count--)
2685 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2688 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2690 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2691 CB_NUMBER_OF_ELEMENTS_SMALL *
2692 sizeof(struct command_block));
2695 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2696 { /* start dma engine but no transfers yet */
2698 IPW_DEBUG_FW(">> : \n");
2700 /* Start the dma */
2701 ipw_fw_dma_reset_command_blocks(priv);
2703 /* Write CB base address */
2704 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2706 IPW_DEBUG_FW("<< : \n");
2707 return 0;
2710 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2712 u32 control = 0;
2714 IPW_DEBUG_FW(">> :\n");
2716 /* set the Stop and Abort bit */
2717 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2718 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2719 priv->sram_desc.last_cb_index = 0;
2721 IPW_DEBUG_FW("<< \n");
2724 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2725 struct command_block *cb)
2727 u32 address =
2728 IPW_SHARED_SRAM_DMA_CONTROL +
2729 (sizeof(struct command_block) * index);
2730 IPW_DEBUG_FW(">> :\n");
2732 ipw_write_indirect(priv, address, (u8 *) cb,
2733 (int)sizeof(struct command_block));
2735 IPW_DEBUG_FW("<< :\n");
2736 return 0;
2740 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2742 u32 control = 0;
2743 u32 index = 0;
2745 IPW_DEBUG_FW(">> :\n");
2747 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2748 ipw_fw_dma_write_command_block(priv, index,
2749 &priv->sram_desc.cb_list[index]);
2751 /* Enable the DMA in the CSR register */
2752 ipw_clear_bit(priv, IPW_RESET_REG,
2753 IPW_RESET_REG_MASTER_DISABLED |
2754 IPW_RESET_REG_STOP_MASTER);
2756 /* Set the Start bit. */
2757 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2758 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2760 IPW_DEBUG_FW("<< :\n");
2761 return 0;
2764 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2766 u32 address;
2767 u32 register_value = 0;
2768 u32 cb_fields_address = 0;
2770 IPW_DEBUG_FW(">> :\n");
2771 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2772 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2774 /* Read the DMA Controlor register */
2775 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2776 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2778 /* Print the CB values */
2779 cb_fields_address = address;
2780 register_value = ipw_read_reg32(priv, cb_fields_address);
2781 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2783 cb_fields_address += sizeof(u32);
2784 register_value = ipw_read_reg32(priv, cb_fields_address);
2785 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2787 cb_fields_address += sizeof(u32);
2788 register_value = ipw_read_reg32(priv, cb_fields_address);
2789 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2790 register_value);
2792 cb_fields_address += sizeof(u32);
2793 register_value = ipw_read_reg32(priv, cb_fields_address);
2794 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2796 IPW_DEBUG_FW(">> :\n");
2799 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2801 u32 current_cb_address = 0;
2802 u32 current_cb_index = 0;
2804 IPW_DEBUG_FW("<< :\n");
2805 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2807 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2808 sizeof(struct command_block);
2810 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2811 current_cb_index, current_cb_address);
2813 IPW_DEBUG_FW(">> :\n");
2814 return current_cb_index;
2818 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2819 u32 src_address,
2820 u32 dest_address,
2821 u32 length,
2822 int interrupt_enabled, int is_last)
2825 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2826 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2827 CB_DEST_SIZE_LONG;
2828 struct command_block *cb;
2829 u32 last_cb_element = 0;
2831 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2832 src_address, dest_address, length);
2834 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2835 return -1;
2837 last_cb_element = priv->sram_desc.last_cb_index;
2838 cb = &priv->sram_desc.cb_list[last_cb_element];
2839 priv->sram_desc.last_cb_index++;
2841 /* Calculate the new CB control word */
2842 if (interrupt_enabled)
2843 control |= CB_INT_ENABLED;
2845 if (is_last)
2846 control |= CB_LAST_VALID;
2848 control |= length;
2850 /* Calculate the CB Element's checksum value */
2851 cb->status = control ^ src_address ^ dest_address;
2853 /* Copy the Source and Destination addresses */
2854 cb->dest_addr = dest_address;
2855 cb->source_addr = src_address;
2857 /* Copy the Control Word last */
2858 cb->control = control;
2860 return 0;
2863 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2864 u32 src_phys, u32 dest_address, u32 length)
2866 u32 bytes_left = length;
2867 u32 src_offset = 0;
2868 u32 dest_offset = 0;
2869 int status = 0;
2870 IPW_DEBUG_FW(">> \n");
2871 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2872 src_phys, dest_address, length);
2873 while (bytes_left > CB_MAX_LENGTH) {
2874 status = ipw_fw_dma_add_command_block(priv,
2875 src_phys + src_offset,
2876 dest_address +
2877 dest_offset,
2878 CB_MAX_LENGTH, 0, 0);
2879 if (status) {
2880 IPW_DEBUG_FW_INFO(": Failed\n");
2881 return -1;
2882 } else
2883 IPW_DEBUG_FW_INFO(": Added new cb\n");
2885 src_offset += CB_MAX_LENGTH;
2886 dest_offset += CB_MAX_LENGTH;
2887 bytes_left -= CB_MAX_LENGTH;
2890 /* add the buffer tail */
2891 if (bytes_left > 0) {
2892 status =
2893 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2894 dest_address + dest_offset,
2895 bytes_left, 0, 0);
2896 if (status) {
2897 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2898 return -1;
2899 } else
2900 IPW_DEBUG_FW_INFO
2901 (": Adding new cb - the buffer tail\n");
2904 IPW_DEBUG_FW("<< \n");
2905 return 0;
2908 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2910 u32 current_index = 0, previous_index;
2911 u32 watchdog = 0;
2913 IPW_DEBUG_FW(">> : \n");
2915 current_index = ipw_fw_dma_command_block_index(priv);
2916 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2917 (int)priv->sram_desc.last_cb_index);
2919 while (current_index < priv->sram_desc.last_cb_index) {
2920 udelay(50);
2921 previous_index = current_index;
2922 current_index = ipw_fw_dma_command_block_index(priv);
2924 if (previous_index < current_index) {
2925 watchdog = 0;
2926 continue;
2928 if (++watchdog > 400) {
2929 IPW_DEBUG_FW_INFO("Timeout\n");
2930 ipw_fw_dma_dump_command_block(priv);
2931 ipw_fw_dma_abort(priv);
2932 return -1;
2936 ipw_fw_dma_abort(priv);
2938 /*Disable the DMA in the CSR register */
2939 ipw_set_bit(priv, IPW_RESET_REG,
2940 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2942 IPW_DEBUG_FW("<< dmaWaitSync \n");
2943 return 0;
2946 static void ipw_remove_current_network(struct ipw_priv *priv)
2948 struct list_head *element, *safe;
2949 struct ieee80211_network *network = NULL;
2950 unsigned long flags;
2952 spin_lock_irqsave(&priv->ieee->lock, flags);
2953 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2954 network = list_entry(element, struct ieee80211_network, list);
2955 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2956 list_del(element);
2957 list_add_tail(&network->list,
2958 &priv->ieee->network_free_list);
2961 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2965 * Check that card is still alive.
2966 * Reads debug register from domain0.
2967 * If card is present, pre-defined value should
2968 * be found there.
2970 * @param priv
2971 * @return 1 if card is present, 0 otherwise
2973 static inline int ipw_alive(struct ipw_priv *priv)
2975 return ipw_read32(priv, 0x90) == 0xd55555d5;
2978 /* timeout in msec, attempted in 10-msec quanta */
2979 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2980 int timeout)
2982 int i = 0;
2984 do {
2985 if ((ipw_read32(priv, addr) & mask) == mask)
2986 return i;
2987 mdelay(10);
2988 i += 10;
2989 } while (i < timeout);
2991 return -ETIME;
2994 /* These functions load the firmware and micro code for the operation of
2995 * the ipw hardware. It assumes the buffer has all the bits for the
2996 * image and the caller is handling the memory allocation and clean up.
2999 static int ipw_stop_master(struct ipw_priv *priv)
3001 int rc;
3003 IPW_DEBUG_TRACE(">> \n");
3004 /* stop master. typical delay - 0 */
3005 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3007 /* timeout is in msec, polled in 10-msec quanta */
3008 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3009 IPW_RESET_REG_MASTER_DISABLED, 100);
3010 if (rc < 0) {
3011 IPW_ERROR("wait for stop master failed after 100ms\n");
3012 return -1;
3015 IPW_DEBUG_INFO("stop master %dms\n", rc);
3017 return rc;
3020 static void ipw_arc_release(struct ipw_priv *priv)
3022 IPW_DEBUG_TRACE(">> \n");
3023 mdelay(5);
3025 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3027 /* no one knows timing, for safety add some delay */
3028 mdelay(5);
3031 struct fw_chunk {
3032 u32 address;
3033 u32 length;
3036 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3038 int rc = 0, i, addr;
3039 u8 cr = 0;
3040 u16 *image;
3042 image = (u16 *) data;
3044 IPW_DEBUG_TRACE(">> \n");
3046 rc = ipw_stop_master(priv);
3048 if (rc < 0)
3049 return rc;
3051 for (addr = IPW_SHARED_LOWER_BOUND;
3052 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3053 ipw_write32(priv, addr, 0);
3056 /* no ucode (yet) */
3057 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3058 /* destroy DMA queues */
3059 /* reset sequence */
3061 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3062 ipw_arc_release(priv);
3063 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3064 mdelay(1);
3066 /* reset PHY */
3067 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3068 mdelay(1);
3070 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3071 mdelay(1);
3073 /* enable ucode store */
3074 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3075 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3076 mdelay(1);
3078 /* write ucode */
3080 * @bug
3081 * Do NOT set indirect address register once and then
3082 * store data to indirect data register in the loop.
3083 * It seems very reasonable, but in this case DINO do not
3084 * accept ucode. It is essential to set address each time.
3086 /* load new ipw uCode */
3087 for (i = 0; i < len / 2; i++)
3088 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3089 cpu_to_le16(image[i]));
3091 /* enable DINO */
3092 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3093 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3095 /* this is where the igx / win driver deveates from the VAP driver. */
3097 /* wait for alive response */
3098 for (i = 0; i < 100; i++) {
3099 /* poll for incoming data */
3100 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3101 if (cr & DINO_RXFIFO_DATA)
3102 break;
3103 mdelay(1);
3106 if (cr & DINO_RXFIFO_DATA) {
3107 /* alive_command_responce size is NOT multiple of 4 */
3108 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3110 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3111 response_buffer[i] =
3112 le32_to_cpu(ipw_read_reg32(priv,
3113 IPW_BASEBAND_RX_FIFO_READ));
3114 memcpy(&priv->dino_alive, response_buffer,
3115 sizeof(priv->dino_alive));
3116 if (priv->dino_alive.alive_command == 1
3117 && priv->dino_alive.ucode_valid == 1) {
3118 rc = 0;
3119 IPW_DEBUG_INFO
3120 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3121 "of %02d/%02d/%02d %02d:%02d\n",
3122 priv->dino_alive.software_revision,
3123 priv->dino_alive.software_revision,
3124 priv->dino_alive.device_identifier,
3125 priv->dino_alive.device_identifier,
3126 priv->dino_alive.time_stamp[0],
3127 priv->dino_alive.time_stamp[1],
3128 priv->dino_alive.time_stamp[2],
3129 priv->dino_alive.time_stamp[3],
3130 priv->dino_alive.time_stamp[4]);
3131 } else {
3132 IPW_DEBUG_INFO("Microcode is not alive\n");
3133 rc = -EINVAL;
3135 } else {
3136 IPW_DEBUG_INFO("No alive response from DINO\n");
3137 rc = -ETIME;
3140 /* disable DINO, otherwise for some reason
3141 firmware have problem getting alive resp. */
3142 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3144 return rc;
3147 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3149 int rc = -1;
3150 int offset = 0;
3151 struct fw_chunk *chunk;
3152 dma_addr_t shared_phys;
3153 u8 *shared_virt;
3155 IPW_DEBUG_TRACE("<< : \n");
3156 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3158 if (!shared_virt)
3159 return -ENOMEM;
3161 memmove(shared_virt, data, len);
3163 /* Start the Dma */
3164 rc = ipw_fw_dma_enable(priv);
3166 if (priv->sram_desc.last_cb_index > 0) {
3167 /* the DMA is already ready this would be a bug. */
3168 BUG();
3169 goto out;
3172 do {
3173 chunk = (struct fw_chunk *)(data + offset);
3174 offset += sizeof(struct fw_chunk);
3175 /* build DMA packet and queue up for sending */
3176 /* dma to chunk->address, the chunk->length bytes from data +
3177 * offeset*/
3178 /* Dma loading */
3179 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3180 le32_to_cpu(chunk->address),
3181 le32_to_cpu(chunk->length));
3182 if (rc) {
3183 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3184 goto out;
3187 offset += le32_to_cpu(chunk->length);
3188 } while (offset < len);
3190 /* Run the DMA and wait for the answer */
3191 rc = ipw_fw_dma_kick(priv);
3192 if (rc) {
3193 IPW_ERROR("dmaKick Failed\n");
3194 goto out;
3197 rc = ipw_fw_dma_wait(priv);
3198 if (rc) {
3199 IPW_ERROR("dmaWaitSync Failed\n");
3200 goto out;
3202 out:
3203 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3204 return rc;
3207 /* stop nic */
3208 static int ipw_stop_nic(struct ipw_priv *priv)
3210 int rc = 0;
3212 /* stop */
3213 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3215 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3216 IPW_RESET_REG_MASTER_DISABLED, 500);
3217 if (rc < 0) {
3218 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3219 return rc;
3222 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3224 return rc;
3227 static void ipw_start_nic(struct ipw_priv *priv)
3229 IPW_DEBUG_TRACE(">>\n");
3231 /* prvHwStartNic release ARC */
3232 ipw_clear_bit(priv, IPW_RESET_REG,
3233 IPW_RESET_REG_MASTER_DISABLED |
3234 IPW_RESET_REG_STOP_MASTER |
3235 CBD_RESET_REG_PRINCETON_RESET);
3237 /* enable power management */
3238 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3239 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3241 IPW_DEBUG_TRACE("<<\n");
3244 static int ipw_init_nic(struct ipw_priv *priv)
3246 int rc;
3248 IPW_DEBUG_TRACE(">>\n");
3249 /* reset */
3250 /*prvHwInitNic */
3251 /* set "initialization complete" bit to move adapter to D0 state */
3252 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3254 /* low-level PLL activation */
3255 ipw_write32(priv, IPW_READ_INT_REGISTER,
3256 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3258 /* wait for clock stabilization */
3259 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3260 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3261 if (rc < 0)
3262 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3264 /* assert SW reset */
3265 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3267 udelay(10);
3269 /* set "initialization complete" bit to move adapter to D0 state */
3270 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3272 IPW_DEBUG_TRACE(">>\n");
3273 return 0;
3276 /* Call this function from process context, it will sleep in request_firmware.
3277 * Probe is an ok place to call this from.
3279 static int ipw_reset_nic(struct ipw_priv *priv)
3281 int rc = 0;
3282 unsigned long flags;
3284 IPW_DEBUG_TRACE(">>\n");
3286 rc = ipw_init_nic(priv);
3288 spin_lock_irqsave(&priv->lock, flags);
3289 /* Clear the 'host command active' bit... */
3290 priv->status &= ~STATUS_HCMD_ACTIVE;
3291 wake_up_interruptible(&priv->wait_command_queue);
3292 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3293 wake_up_interruptible(&priv->wait_state);
3294 spin_unlock_irqrestore(&priv->lock, flags);
3296 IPW_DEBUG_TRACE("<<\n");
3297 return rc;
3301 struct ipw_fw {
3302 __le32 ver;
3303 __le32 boot_size;
3304 __le32 ucode_size;
3305 __le32 fw_size;
3306 u8 data[0];
3309 static int ipw_get_fw(struct ipw_priv *priv,
3310 const struct firmware **raw, const char *name)
3312 struct ipw_fw *fw;
3313 int rc;
3315 /* ask firmware_class module to get the boot firmware off disk */
3316 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3317 if (rc < 0) {
3318 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3319 return rc;
3322 if ((*raw)->size < sizeof(*fw)) {
3323 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3324 return -EINVAL;
3327 fw = (void *)(*raw)->data;
3329 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3330 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3331 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3332 name, (*raw)->size);
3333 return -EINVAL;
3336 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3337 name,
3338 le32_to_cpu(fw->ver) >> 16,
3339 le32_to_cpu(fw->ver) & 0xff,
3340 (*raw)->size - sizeof(*fw));
3341 return 0;
3344 #define IPW_RX_BUF_SIZE (3000)
3346 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3347 struct ipw_rx_queue *rxq)
3349 unsigned long flags;
3350 int i;
3352 spin_lock_irqsave(&rxq->lock, flags);
3354 INIT_LIST_HEAD(&rxq->rx_free);
3355 INIT_LIST_HEAD(&rxq->rx_used);
3357 /* Fill the rx_used queue with _all_ of the Rx buffers */
3358 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3359 /* In the reset function, these buffers may have been allocated
3360 * to an SKB, so we need to unmap and free potential storage */
3361 if (rxq->pool[i].skb != NULL) {
3362 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3363 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3364 dev_kfree_skb(rxq->pool[i].skb);
3365 rxq->pool[i].skb = NULL;
3367 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3370 /* Set us so that we have processed and used all buffers, but have
3371 * not restocked the Rx queue with fresh buffers */
3372 rxq->read = rxq->write = 0;
3373 rxq->processed = RX_QUEUE_SIZE - 1;
3374 rxq->free_count = 0;
3375 spin_unlock_irqrestore(&rxq->lock, flags);
3378 #ifdef CONFIG_PM
3379 static int fw_loaded = 0;
3380 static const struct firmware *raw = NULL;
3382 static void free_firmware(void)
3384 if (fw_loaded) {
3385 release_firmware(raw);
3386 raw = NULL;
3387 fw_loaded = 0;
3390 #else
3391 #define free_firmware() do {} while (0)
3392 #endif
3394 static int ipw_load(struct ipw_priv *priv)
3396 #ifndef CONFIG_PM
3397 const struct firmware *raw = NULL;
3398 #endif
3399 struct ipw_fw *fw;
3400 u8 *boot_img, *ucode_img, *fw_img;
3401 u8 *name = NULL;
3402 int rc = 0, retries = 3;
3404 switch (priv->ieee->iw_mode) {
3405 case IW_MODE_ADHOC:
3406 name = "ipw2200-ibss.fw";
3407 break;
3408 #ifdef CONFIG_IPW2200_MONITOR
3409 case IW_MODE_MONITOR:
3410 name = "ipw2200-sniffer.fw";
3411 break;
3412 #endif
3413 case IW_MODE_INFRA:
3414 name = "ipw2200-bss.fw";
3415 break;
3418 if (!name) {
3419 rc = -EINVAL;
3420 goto error;
3423 #ifdef CONFIG_PM
3424 if (!fw_loaded) {
3425 #endif
3426 rc = ipw_get_fw(priv, &raw, name);
3427 if (rc < 0)
3428 goto error;
3429 #ifdef CONFIG_PM
3431 #endif
3433 fw = (void *)raw->data;
3434 boot_img = &fw->data[0];
3435 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3436 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3437 le32_to_cpu(fw->ucode_size)];
3439 if (rc < 0)
3440 goto error;
3442 if (!priv->rxq)
3443 priv->rxq = ipw_rx_queue_alloc(priv);
3444 else
3445 ipw_rx_queue_reset(priv, priv->rxq);
3446 if (!priv->rxq) {
3447 IPW_ERROR("Unable to initialize Rx queue\n");
3448 goto error;
3451 retry:
3452 /* Ensure interrupts are disabled */
3453 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3454 priv->status &= ~STATUS_INT_ENABLED;
3456 /* ack pending interrupts */
3457 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3459 ipw_stop_nic(priv);
3461 rc = ipw_reset_nic(priv);
3462 if (rc < 0) {
3463 IPW_ERROR("Unable to reset NIC\n");
3464 goto error;
3467 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3468 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3470 /* DMA the initial boot firmware into the device */
3471 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3472 if (rc < 0) {
3473 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3474 goto error;
3477 /* kick start the device */
3478 ipw_start_nic(priv);
3480 /* wait for the device to finish its initial startup sequence */
3481 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3482 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3483 if (rc < 0) {
3484 IPW_ERROR("device failed to boot initial fw image\n");
3485 goto error;
3487 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3489 /* ack fw init done interrupt */
3490 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3492 /* DMA the ucode into the device */
3493 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3494 if (rc < 0) {
3495 IPW_ERROR("Unable to load ucode: %d\n", rc);
3496 goto error;
3499 /* stop nic */
3500 ipw_stop_nic(priv);
3502 /* DMA bss firmware into the device */
3503 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3504 if (rc < 0) {
3505 IPW_ERROR("Unable to load firmware: %d\n", rc);
3506 goto error;
3508 #ifdef CONFIG_PM
3509 fw_loaded = 1;
3510 #endif
3512 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3514 rc = ipw_queue_reset(priv);
3515 if (rc < 0) {
3516 IPW_ERROR("Unable to initialize queues\n");
3517 goto error;
3520 /* Ensure interrupts are disabled */
3521 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3522 /* ack pending interrupts */
3523 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3525 /* kick start the device */
3526 ipw_start_nic(priv);
3528 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3529 if (retries > 0) {
3530 IPW_WARNING("Parity error. Retrying init.\n");
3531 retries--;
3532 goto retry;
3535 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3536 rc = -EIO;
3537 goto error;
3540 /* wait for the device */
3541 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3542 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3543 if (rc < 0) {
3544 IPW_ERROR("device failed to start within 500ms\n");
3545 goto error;
3547 IPW_DEBUG_INFO("device response after %dms\n", rc);
3549 /* ack fw init done interrupt */
3550 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3552 /* read eeprom data and initialize the eeprom region of sram */
3553 priv->eeprom_delay = 1;
3554 ipw_eeprom_init_sram(priv);
3556 /* enable interrupts */
3557 ipw_enable_interrupts(priv);
3559 /* Ensure our queue has valid packets */
3560 ipw_rx_queue_replenish(priv);
3562 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3564 /* ack pending interrupts */
3565 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3567 #ifndef CONFIG_PM
3568 release_firmware(raw);
3569 #endif
3570 return 0;
3572 error:
3573 if (priv->rxq) {
3574 ipw_rx_queue_free(priv, priv->rxq);
3575 priv->rxq = NULL;
3577 ipw_tx_queue_free(priv);
3578 if (raw)
3579 release_firmware(raw);
3580 #ifdef CONFIG_PM
3581 fw_loaded = 0;
3582 raw = NULL;
3583 #endif
3585 return rc;
3589 * DMA services
3591 * Theory of operation
3593 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3594 * 2 empty entries always kept in the buffer to protect from overflow.
3596 * For Tx queue, there are low mark and high mark limits. If, after queuing
3597 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3598 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3599 * Tx queue resumed.
3601 * The IPW operates with six queues, one receive queue in the device's
3602 * sram, one transmit queue for sending commands to the device firmware,
3603 * and four transmit queues for data.
3605 * The four transmit queues allow for performing quality of service (qos)
3606 * transmissions as per the 802.11 protocol. Currently Linux does not
3607 * provide a mechanism to the user for utilizing prioritized queues, so
3608 * we only utilize the first data transmit queue (queue1).
3612 * Driver allocates buffers of this size for Rx
3615 static inline int ipw_queue_space(const struct clx2_queue *q)
3617 int s = q->last_used - q->first_empty;
3618 if (s <= 0)
3619 s += q->n_bd;
3620 s -= 2; /* keep some reserve to not confuse empty and full situations */
3621 if (s < 0)
3622 s = 0;
3623 return s;
3626 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3628 return (++index == n_bd) ? 0 : index;
3632 * Initialize common DMA queue structure
3634 * @param q queue to init
3635 * @param count Number of BD's to allocate. Should be power of 2
3636 * @param read_register Address for 'read' register
3637 * (not offset within BAR, full address)
3638 * @param write_register Address for 'write' register
3639 * (not offset within BAR, full address)
3640 * @param base_register Address for 'base' register
3641 * (not offset within BAR, full address)
3642 * @param size Address for 'size' register
3643 * (not offset within BAR, full address)
3645 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3646 int count, u32 read, u32 write, u32 base, u32 size)
3648 q->n_bd = count;
3650 q->low_mark = q->n_bd / 4;
3651 if (q->low_mark < 4)
3652 q->low_mark = 4;
3654 q->high_mark = q->n_bd / 8;
3655 if (q->high_mark < 2)
3656 q->high_mark = 2;
3658 q->first_empty = q->last_used = 0;
3659 q->reg_r = read;
3660 q->reg_w = write;
3662 ipw_write32(priv, base, q->dma_addr);
3663 ipw_write32(priv, size, count);
3664 ipw_write32(priv, read, 0);
3665 ipw_write32(priv, write, 0);
3667 _ipw_read32(priv, 0x90);
3670 static int ipw_queue_tx_init(struct ipw_priv *priv,
3671 struct clx2_tx_queue *q,
3672 int count, u32 read, u32 write, u32 base, u32 size)
3674 struct pci_dev *dev = priv->pci_dev;
3676 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3677 if (!q->txb) {
3678 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3679 return -ENOMEM;
3682 q->bd =
3683 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3684 if (!q->bd) {
3685 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3686 sizeof(q->bd[0]) * count);
3687 kfree(q->txb);
3688 q->txb = NULL;
3689 return -ENOMEM;
3692 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3693 return 0;
3697 * Free one TFD, those at index [txq->q.last_used].
3698 * Do NOT advance any indexes
3700 * @param dev
3701 * @param txq
3703 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3704 struct clx2_tx_queue *txq)
3706 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3707 struct pci_dev *dev = priv->pci_dev;
3708 int i;
3710 /* classify bd */
3711 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3712 /* nothing to cleanup after for host commands */
3713 return;
3715 /* sanity check */
3716 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3717 IPW_ERROR("Too many chunks: %i\n",
3718 le32_to_cpu(bd->u.data.num_chunks));
3719 /** @todo issue fatal error, it is quite serious situation */
3720 return;
3723 /* unmap chunks if any */
3724 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3725 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3726 le16_to_cpu(bd->u.data.chunk_len[i]),
3727 PCI_DMA_TODEVICE);
3728 if (txq->txb[txq->q.last_used]) {
3729 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3730 txq->txb[txq->q.last_used] = NULL;
3736 * Deallocate DMA queue.
3738 * Empty queue by removing and destroying all BD's.
3739 * Free all buffers.
3741 * @param dev
3742 * @param q
3744 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3746 struct clx2_queue *q = &txq->q;
3747 struct pci_dev *dev = priv->pci_dev;
3749 if (q->n_bd == 0)
3750 return;
3752 /* first, empty all BD's */
3753 for (; q->first_empty != q->last_used;
3754 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3755 ipw_queue_tx_free_tfd(priv, txq);
3758 /* free buffers belonging to queue itself */
3759 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3760 q->dma_addr);
3761 kfree(txq->txb);
3763 /* 0 fill whole structure */
3764 memset(txq, 0, sizeof(*txq));
3768 * Destroy all DMA queues and structures
3770 * @param priv
3772 static void ipw_tx_queue_free(struct ipw_priv *priv)
3774 /* Tx CMD queue */
3775 ipw_queue_tx_free(priv, &priv->txq_cmd);
3777 /* Tx queues */
3778 ipw_queue_tx_free(priv, &priv->txq[0]);
3779 ipw_queue_tx_free(priv, &priv->txq[1]);
3780 ipw_queue_tx_free(priv, &priv->txq[2]);
3781 ipw_queue_tx_free(priv, &priv->txq[3]);
3784 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3786 /* First 3 bytes are manufacturer */
3787 bssid[0] = priv->mac_addr[0];
3788 bssid[1] = priv->mac_addr[1];
3789 bssid[2] = priv->mac_addr[2];
3791 /* Last bytes are random */
3792 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3794 bssid[0] &= 0xfe; /* clear multicast bit */
3795 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3798 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3800 struct ipw_station_entry entry;
3801 int i;
3802 DECLARE_MAC_BUF(mac);
3804 for (i = 0; i < priv->num_stations; i++) {
3805 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3806 /* Another node is active in network */
3807 priv->missed_adhoc_beacons = 0;
3808 if (!(priv->config & CFG_STATIC_CHANNEL))
3809 /* when other nodes drop out, we drop out */
3810 priv->config &= ~CFG_ADHOC_PERSIST;
3812 return i;
3816 if (i == MAX_STATIONS)
3817 return IPW_INVALID_STATION;
3819 IPW_DEBUG_SCAN("Adding AdHoc station: %s\n", print_mac(mac, bssid));
3821 entry.reserved = 0;
3822 entry.support_mode = 0;
3823 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3824 memcpy(priv->stations[i], bssid, ETH_ALEN);
3825 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3826 &entry, sizeof(entry));
3827 priv->num_stations++;
3829 return i;
3832 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3834 int i;
3836 for (i = 0; i < priv->num_stations; i++)
3837 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3838 return i;
3840 return IPW_INVALID_STATION;
3843 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3845 int err;
3846 DECLARE_MAC_BUF(mac);
3848 if (priv->status & STATUS_ASSOCIATING) {
3849 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3850 queue_work(priv->workqueue, &priv->disassociate);
3851 return;
3854 if (!(priv->status & STATUS_ASSOCIATED)) {
3855 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3856 return;
3859 IPW_DEBUG_ASSOC("Disassocation attempt from %s "
3860 "on channel %d.\n",
3861 print_mac(mac, priv->assoc_request.bssid),
3862 priv->assoc_request.channel);
3864 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3865 priv->status |= STATUS_DISASSOCIATING;
3867 if (quiet)
3868 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3869 else
3870 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3872 err = ipw_send_associate(priv, &priv->assoc_request);
3873 if (err) {
3874 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3875 "failed.\n");
3876 return;
3881 static int ipw_disassociate(void *data)
3883 struct ipw_priv *priv = data;
3884 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3885 return 0;
3886 ipw_send_disassociate(data, 0);
3887 return 1;
3890 static void ipw_bg_disassociate(struct work_struct *work)
3892 struct ipw_priv *priv =
3893 container_of(work, struct ipw_priv, disassociate);
3894 mutex_lock(&priv->mutex);
3895 ipw_disassociate(priv);
3896 mutex_unlock(&priv->mutex);
3899 static void ipw_system_config(struct work_struct *work)
3901 struct ipw_priv *priv =
3902 container_of(work, struct ipw_priv, system_config);
3904 #ifdef CONFIG_IPW2200_PROMISCUOUS
3905 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3906 priv->sys_config.accept_all_data_frames = 1;
3907 priv->sys_config.accept_non_directed_frames = 1;
3908 priv->sys_config.accept_all_mgmt_bcpr = 1;
3909 priv->sys_config.accept_all_mgmt_frames = 1;
3911 #endif
3913 ipw_send_system_config(priv);
3916 struct ipw_status_code {
3917 u16 status;
3918 const char *reason;
3921 static const struct ipw_status_code ipw_status_codes[] = {
3922 {0x00, "Successful"},
3923 {0x01, "Unspecified failure"},
3924 {0x0A, "Cannot support all requested capabilities in the "
3925 "Capability information field"},
3926 {0x0B, "Reassociation denied due to inability to confirm that "
3927 "association exists"},
3928 {0x0C, "Association denied due to reason outside the scope of this "
3929 "standard"},
3930 {0x0D,
3931 "Responding station does not support the specified authentication "
3932 "algorithm"},
3933 {0x0E,
3934 "Received an Authentication frame with authentication sequence "
3935 "transaction sequence number out of expected sequence"},
3936 {0x0F, "Authentication rejected because of challenge failure"},
3937 {0x10, "Authentication rejected due to timeout waiting for next "
3938 "frame in sequence"},
3939 {0x11, "Association denied because AP is unable to handle additional "
3940 "associated stations"},
3941 {0x12,
3942 "Association denied due to requesting station not supporting all "
3943 "of the datarates in the BSSBasicServiceSet Parameter"},
3944 {0x13,
3945 "Association denied due to requesting station not supporting "
3946 "short preamble operation"},
3947 {0x14,
3948 "Association denied due to requesting station not supporting "
3949 "PBCC encoding"},
3950 {0x15,
3951 "Association denied due to requesting station not supporting "
3952 "channel agility"},
3953 {0x19,
3954 "Association denied due to requesting station not supporting "
3955 "short slot operation"},
3956 {0x1A,
3957 "Association denied due to requesting station not supporting "
3958 "DSSS-OFDM operation"},
3959 {0x28, "Invalid Information Element"},
3960 {0x29, "Group Cipher is not valid"},
3961 {0x2A, "Pairwise Cipher is not valid"},
3962 {0x2B, "AKMP is not valid"},
3963 {0x2C, "Unsupported RSN IE version"},
3964 {0x2D, "Invalid RSN IE Capabilities"},
3965 {0x2E, "Cipher suite is rejected per security policy"},
3968 static const char *ipw_get_status_code(u16 status)
3970 int i;
3971 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3972 if (ipw_status_codes[i].status == (status & 0xff))
3973 return ipw_status_codes[i].reason;
3974 return "Unknown status value.";
3977 static void inline average_init(struct average *avg)
3979 memset(avg, 0, sizeof(*avg));
3982 #define DEPTH_RSSI 8
3983 #define DEPTH_NOISE 16
3984 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3986 return ((depth-1)*prev_avg + val)/depth;
3989 static void average_add(struct average *avg, s16 val)
3991 avg->sum -= avg->entries[avg->pos];
3992 avg->sum += val;
3993 avg->entries[avg->pos++] = val;
3994 if (unlikely(avg->pos == AVG_ENTRIES)) {
3995 avg->init = 1;
3996 avg->pos = 0;
4000 static s16 average_value(struct average *avg)
4002 if (!unlikely(avg->init)) {
4003 if (avg->pos)
4004 return avg->sum / avg->pos;
4005 return 0;
4008 return avg->sum / AVG_ENTRIES;
4011 static void ipw_reset_stats(struct ipw_priv *priv)
4013 u32 len = sizeof(u32);
4015 priv->quality = 0;
4017 average_init(&priv->average_missed_beacons);
4018 priv->exp_avg_rssi = -60;
4019 priv->exp_avg_noise = -85 + 0x100;
4021 priv->last_rate = 0;
4022 priv->last_missed_beacons = 0;
4023 priv->last_rx_packets = 0;
4024 priv->last_tx_packets = 0;
4025 priv->last_tx_failures = 0;
4027 /* Firmware managed, reset only when NIC is restarted, so we have to
4028 * normalize on the current value */
4029 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4030 &priv->last_rx_err, &len);
4031 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4032 &priv->last_tx_failures, &len);
4034 /* Driver managed, reset with each association */
4035 priv->missed_adhoc_beacons = 0;
4036 priv->missed_beacons = 0;
4037 priv->tx_packets = 0;
4038 priv->rx_packets = 0;
4042 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4044 u32 i = 0x80000000;
4045 u32 mask = priv->rates_mask;
4046 /* If currently associated in B mode, restrict the maximum
4047 * rate match to B rates */
4048 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4049 mask &= IEEE80211_CCK_RATES_MASK;
4051 /* TODO: Verify that the rate is supported by the current rates
4052 * list. */
4054 while (i && !(mask & i))
4055 i >>= 1;
4056 switch (i) {
4057 case IEEE80211_CCK_RATE_1MB_MASK:
4058 return 1000000;
4059 case IEEE80211_CCK_RATE_2MB_MASK:
4060 return 2000000;
4061 case IEEE80211_CCK_RATE_5MB_MASK:
4062 return 5500000;
4063 case IEEE80211_OFDM_RATE_6MB_MASK:
4064 return 6000000;
4065 case IEEE80211_OFDM_RATE_9MB_MASK:
4066 return 9000000;
4067 case IEEE80211_CCK_RATE_11MB_MASK:
4068 return 11000000;
4069 case IEEE80211_OFDM_RATE_12MB_MASK:
4070 return 12000000;
4071 case IEEE80211_OFDM_RATE_18MB_MASK:
4072 return 18000000;
4073 case IEEE80211_OFDM_RATE_24MB_MASK:
4074 return 24000000;
4075 case IEEE80211_OFDM_RATE_36MB_MASK:
4076 return 36000000;
4077 case IEEE80211_OFDM_RATE_48MB_MASK:
4078 return 48000000;
4079 case IEEE80211_OFDM_RATE_54MB_MASK:
4080 return 54000000;
4083 if (priv->ieee->mode == IEEE_B)
4084 return 11000000;
4085 else
4086 return 54000000;
4089 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4091 u32 rate, len = sizeof(rate);
4092 int err;
4094 if (!(priv->status & STATUS_ASSOCIATED))
4095 return 0;
4097 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4098 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4099 &len);
4100 if (err) {
4101 IPW_DEBUG_INFO("failed querying ordinals.\n");
4102 return 0;
4104 } else
4105 return ipw_get_max_rate(priv);
4107 switch (rate) {
4108 case IPW_TX_RATE_1MB:
4109 return 1000000;
4110 case IPW_TX_RATE_2MB:
4111 return 2000000;
4112 case IPW_TX_RATE_5MB:
4113 return 5500000;
4114 case IPW_TX_RATE_6MB:
4115 return 6000000;
4116 case IPW_TX_RATE_9MB:
4117 return 9000000;
4118 case IPW_TX_RATE_11MB:
4119 return 11000000;
4120 case IPW_TX_RATE_12MB:
4121 return 12000000;
4122 case IPW_TX_RATE_18MB:
4123 return 18000000;
4124 case IPW_TX_RATE_24MB:
4125 return 24000000;
4126 case IPW_TX_RATE_36MB:
4127 return 36000000;
4128 case IPW_TX_RATE_48MB:
4129 return 48000000;
4130 case IPW_TX_RATE_54MB:
4131 return 54000000;
4134 return 0;
4137 #define IPW_STATS_INTERVAL (2 * HZ)
4138 static void ipw_gather_stats(struct ipw_priv *priv)
4140 u32 rx_err, rx_err_delta, rx_packets_delta;
4141 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4142 u32 missed_beacons_percent, missed_beacons_delta;
4143 u32 quality = 0;
4144 u32 len = sizeof(u32);
4145 s16 rssi;
4146 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4147 rate_quality;
4148 u32 max_rate;
4150 if (!(priv->status & STATUS_ASSOCIATED)) {
4151 priv->quality = 0;
4152 return;
4155 /* Update the statistics */
4156 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4157 &priv->missed_beacons, &len);
4158 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4159 priv->last_missed_beacons = priv->missed_beacons;
4160 if (priv->assoc_request.beacon_interval) {
4161 missed_beacons_percent = missed_beacons_delta *
4162 (HZ * priv->assoc_request.beacon_interval) /
4163 (IPW_STATS_INTERVAL * 10);
4164 } else {
4165 missed_beacons_percent = 0;
4167 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4169 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4170 rx_err_delta = rx_err - priv->last_rx_err;
4171 priv->last_rx_err = rx_err;
4173 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4174 tx_failures_delta = tx_failures - priv->last_tx_failures;
4175 priv->last_tx_failures = tx_failures;
4177 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4178 priv->last_rx_packets = priv->rx_packets;
4180 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4181 priv->last_tx_packets = priv->tx_packets;
4183 /* Calculate quality based on the following:
4185 * Missed beacon: 100% = 0, 0% = 70% missed
4186 * Rate: 60% = 1Mbs, 100% = Max
4187 * Rx and Tx errors represent a straight % of total Rx/Tx
4188 * RSSI: 100% = > -50, 0% = < -80
4189 * Rx errors: 100% = 0, 0% = 50% missed
4191 * The lowest computed quality is used.
4194 #define BEACON_THRESHOLD 5
4195 beacon_quality = 100 - missed_beacons_percent;
4196 if (beacon_quality < BEACON_THRESHOLD)
4197 beacon_quality = 0;
4198 else
4199 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4200 (100 - BEACON_THRESHOLD);
4201 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4202 beacon_quality, missed_beacons_percent);
4204 priv->last_rate = ipw_get_current_rate(priv);
4205 max_rate = ipw_get_max_rate(priv);
4206 rate_quality = priv->last_rate * 40 / max_rate + 60;
4207 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4208 rate_quality, priv->last_rate / 1000000);
4210 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4211 rx_quality = 100 - (rx_err_delta * 100) /
4212 (rx_packets_delta + rx_err_delta);
4213 else
4214 rx_quality = 100;
4215 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4216 rx_quality, rx_err_delta, rx_packets_delta);
4218 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4219 tx_quality = 100 - (tx_failures_delta * 100) /
4220 (tx_packets_delta + tx_failures_delta);
4221 else
4222 tx_quality = 100;
4223 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4224 tx_quality, tx_failures_delta, tx_packets_delta);
4226 rssi = priv->exp_avg_rssi;
4227 signal_quality =
4228 (100 *
4229 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4230 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4231 (priv->ieee->perfect_rssi - rssi) *
4232 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4233 62 * (priv->ieee->perfect_rssi - rssi))) /
4234 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4235 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4236 if (signal_quality > 100)
4237 signal_quality = 100;
4238 else if (signal_quality < 1)
4239 signal_quality = 0;
4241 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4242 signal_quality, rssi);
4244 quality = min(beacon_quality,
4245 min(rate_quality,
4246 min(tx_quality, min(rx_quality, signal_quality))));
4247 if (quality == beacon_quality)
4248 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4249 quality);
4250 if (quality == rate_quality)
4251 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4252 quality);
4253 if (quality == tx_quality)
4254 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4255 quality);
4256 if (quality == rx_quality)
4257 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4258 quality);
4259 if (quality == signal_quality)
4260 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4261 quality);
4263 priv->quality = quality;
4265 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4266 IPW_STATS_INTERVAL);
4269 static void ipw_bg_gather_stats(struct work_struct *work)
4271 struct ipw_priv *priv =
4272 container_of(work, struct ipw_priv, gather_stats.work);
4273 mutex_lock(&priv->mutex);
4274 ipw_gather_stats(priv);
4275 mutex_unlock(&priv->mutex);
4278 /* Missed beacon behavior:
4279 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4280 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4281 * Above disassociate threshold, give up and stop scanning.
4282 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4283 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4284 int missed_count)
4286 priv->notif_missed_beacons = missed_count;
4288 if (missed_count > priv->disassociate_threshold &&
4289 priv->status & STATUS_ASSOCIATED) {
4290 /* If associated and we've hit the missed
4291 * beacon threshold, disassociate, turn
4292 * off roaming, and abort any active scans */
4293 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4294 IPW_DL_STATE | IPW_DL_ASSOC,
4295 "Missed beacon: %d - disassociate\n", missed_count);
4296 priv->status &= ~STATUS_ROAMING;
4297 if (priv->status & STATUS_SCANNING) {
4298 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4299 IPW_DL_STATE,
4300 "Aborting scan with missed beacon.\n");
4301 queue_work(priv->workqueue, &priv->abort_scan);
4304 queue_work(priv->workqueue, &priv->disassociate);
4305 return;
4308 if (priv->status & STATUS_ROAMING) {
4309 /* If we are currently roaming, then just
4310 * print a debug statement... */
4311 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4312 "Missed beacon: %d - roam in progress\n",
4313 missed_count);
4314 return;
4317 if (roaming &&
4318 (missed_count > priv->roaming_threshold &&
4319 missed_count <= priv->disassociate_threshold)) {
4320 /* If we are not already roaming, set the ROAM
4321 * bit in the status and kick off a scan.
4322 * This can happen several times before we reach
4323 * disassociate_threshold. */
4324 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4325 "Missed beacon: %d - initiate "
4326 "roaming\n", missed_count);
4327 if (!(priv->status & STATUS_ROAMING)) {
4328 priv->status |= STATUS_ROAMING;
4329 if (!(priv->status & STATUS_SCANNING))
4330 queue_delayed_work(priv->workqueue,
4331 &priv->request_scan, 0);
4333 return;
4336 if (priv->status & STATUS_SCANNING) {
4337 /* Stop scan to keep fw from getting
4338 * stuck (only if we aren't roaming --
4339 * otherwise we'll never scan more than 2 or 3
4340 * channels..) */
4341 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4342 "Aborting scan with missed beacon.\n");
4343 queue_work(priv->workqueue, &priv->abort_scan);
4346 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4349 static void ipw_scan_event(struct work_struct *work)
4351 union iwreq_data wrqu;
4353 struct ipw_priv *priv =
4354 container_of(work, struct ipw_priv, scan_event.work);
4356 wrqu.data.length = 0;
4357 wrqu.data.flags = 0;
4358 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4361 static void handle_scan_event(struct ipw_priv *priv)
4363 /* Only userspace-requested scan completion events go out immediately */
4364 if (!priv->user_requested_scan) {
4365 if (!delayed_work_pending(&priv->scan_event))
4366 queue_delayed_work(priv->workqueue, &priv->scan_event,
4367 round_jiffies(msecs_to_jiffies(4000)));
4368 } else {
4369 union iwreq_data wrqu;
4371 priv->user_requested_scan = 0;
4372 cancel_delayed_work(&priv->scan_event);
4374 wrqu.data.length = 0;
4375 wrqu.data.flags = 0;
4376 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4381 * Handle host notification packet.
4382 * Called from interrupt routine
4384 static void ipw_rx_notification(struct ipw_priv *priv,
4385 struct ipw_rx_notification *notif)
4387 DECLARE_MAC_BUF(mac);
4388 notif->size = le16_to_cpu(notif->size);
4390 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4392 switch (notif->subtype) {
4393 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4394 struct notif_association *assoc = &notif->u.assoc;
4396 switch (assoc->state) {
4397 case CMAS_ASSOCIATED:{
4398 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4399 IPW_DL_ASSOC,
4400 "associated: '%s' %s"
4401 " \n",
4402 escape_essid(priv->essid,
4403 priv->essid_len),
4404 print_mac(mac, priv->bssid));
4406 switch (priv->ieee->iw_mode) {
4407 case IW_MODE_INFRA:
4408 memcpy(priv->ieee->bssid,
4409 priv->bssid, ETH_ALEN);
4410 break;
4412 case IW_MODE_ADHOC:
4413 memcpy(priv->ieee->bssid,
4414 priv->bssid, ETH_ALEN);
4416 /* clear out the station table */
4417 priv->num_stations = 0;
4419 IPW_DEBUG_ASSOC
4420 ("queueing adhoc check\n");
4421 queue_delayed_work(priv->
4422 workqueue,
4423 &priv->
4424 adhoc_check,
4425 priv->
4426 assoc_request.
4427 beacon_interval);
4428 break;
4431 priv->status &= ~STATUS_ASSOCIATING;
4432 priv->status |= STATUS_ASSOCIATED;
4433 queue_work(priv->workqueue,
4434 &priv->system_config);
4436 #ifdef CONFIG_IPW2200_QOS
4437 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4438 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4439 if ((priv->status & STATUS_AUTH) &&
4440 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4441 == IEEE80211_STYPE_ASSOC_RESP)) {
4442 if ((sizeof
4443 (struct
4444 ieee80211_assoc_response)
4445 <= notif->size)
4446 && (notif->size <= 2314)) {
4447 struct
4448 ieee80211_rx_stats
4449 stats = {
4450 .len =
4451 notif->
4452 size - 1,
4455 IPW_DEBUG_QOS
4456 ("QoS Associate "
4457 "size %d\n",
4458 notif->size);
4459 ieee80211_rx_mgt(priv->
4460 ieee,
4461 (struct
4462 ieee80211_hdr_4addr
4464 &notif->u.raw, &stats);
4467 #endif
4469 schedule_work(&priv->link_up);
4471 break;
4474 case CMAS_AUTHENTICATED:{
4475 if (priv->
4476 status & (STATUS_ASSOCIATED |
4477 STATUS_AUTH)) {
4478 struct notif_authenticate *auth
4479 = &notif->u.auth;
4480 IPW_DEBUG(IPW_DL_NOTIF |
4481 IPW_DL_STATE |
4482 IPW_DL_ASSOC,
4483 "deauthenticated: '%s' "
4484 "%s"
4485 ": (0x%04X) - %s \n",
4486 escape_essid(priv->
4487 essid,
4488 priv->
4489 essid_len),
4490 print_mac(mac, priv->bssid),
4491 ntohs(auth->status),
4492 ipw_get_status_code
4493 (ntohs
4494 (auth->status)));
4496 priv->status &=
4497 ~(STATUS_ASSOCIATING |
4498 STATUS_AUTH |
4499 STATUS_ASSOCIATED);
4501 schedule_work(&priv->link_down);
4502 break;
4505 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4506 IPW_DL_ASSOC,
4507 "authenticated: '%s' %s"
4508 "\n",
4509 escape_essid(priv->essid,
4510 priv->essid_len),
4511 print_mac(mac, priv->bssid));
4512 break;
4515 case CMAS_INIT:{
4516 if (priv->status & STATUS_AUTH) {
4517 struct
4518 ieee80211_assoc_response
4519 *resp;
4520 resp =
4521 (struct
4522 ieee80211_assoc_response
4523 *)&notif->u.raw;
4524 IPW_DEBUG(IPW_DL_NOTIF |
4525 IPW_DL_STATE |
4526 IPW_DL_ASSOC,
4527 "association failed (0x%04X): %s\n",
4528 ntohs(resp->status),
4529 ipw_get_status_code
4530 (ntohs
4531 (resp->status)));
4534 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4535 IPW_DL_ASSOC,
4536 "disassociated: '%s' %s"
4537 " \n",
4538 escape_essid(priv->essid,
4539 priv->essid_len),
4540 print_mac(mac, priv->bssid));
4542 priv->status &=
4543 ~(STATUS_DISASSOCIATING |
4544 STATUS_ASSOCIATING |
4545 STATUS_ASSOCIATED | STATUS_AUTH);
4546 if (priv->assoc_network
4547 && (priv->assoc_network->
4548 capability &
4549 WLAN_CAPABILITY_IBSS))
4550 ipw_remove_current_network
4551 (priv);
4553 schedule_work(&priv->link_down);
4555 break;
4558 case CMAS_RX_ASSOC_RESP:
4559 break;
4561 default:
4562 IPW_ERROR("assoc: unknown (%d)\n",
4563 assoc->state);
4564 break;
4567 break;
4570 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4571 struct notif_authenticate *auth = &notif->u.auth;
4572 switch (auth->state) {
4573 case CMAS_AUTHENTICATED:
4574 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4575 "authenticated: '%s' %s \n",
4576 escape_essid(priv->essid,
4577 priv->essid_len),
4578 print_mac(mac, priv->bssid));
4579 priv->status |= STATUS_AUTH;
4580 break;
4582 case CMAS_INIT:
4583 if (priv->status & STATUS_AUTH) {
4584 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4585 IPW_DL_ASSOC,
4586 "authentication failed (0x%04X): %s\n",
4587 ntohs(auth->status),
4588 ipw_get_status_code(ntohs
4589 (auth->
4590 status)));
4592 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4593 IPW_DL_ASSOC,
4594 "deauthenticated: '%s' %s\n",
4595 escape_essid(priv->essid,
4596 priv->essid_len),
4597 print_mac(mac, priv->bssid));
4599 priv->status &= ~(STATUS_ASSOCIATING |
4600 STATUS_AUTH |
4601 STATUS_ASSOCIATED);
4603 schedule_work(&priv->link_down);
4604 break;
4606 case CMAS_TX_AUTH_SEQ_1:
4607 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4608 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4609 break;
4610 case CMAS_RX_AUTH_SEQ_2:
4611 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4612 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4613 break;
4614 case CMAS_AUTH_SEQ_1_PASS:
4615 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4616 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4617 break;
4618 case CMAS_AUTH_SEQ_1_FAIL:
4619 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4620 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4621 break;
4622 case CMAS_TX_AUTH_SEQ_3:
4623 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4624 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4625 break;
4626 case CMAS_RX_AUTH_SEQ_4:
4627 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4628 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4629 break;
4630 case CMAS_AUTH_SEQ_2_PASS:
4631 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4632 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4633 break;
4634 case CMAS_AUTH_SEQ_2_FAIL:
4635 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4636 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4637 break;
4638 case CMAS_TX_ASSOC:
4639 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4640 IPW_DL_ASSOC, "TX_ASSOC\n");
4641 break;
4642 case CMAS_RX_ASSOC_RESP:
4643 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4644 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4646 break;
4647 case CMAS_ASSOCIATED:
4648 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4649 IPW_DL_ASSOC, "ASSOCIATED\n");
4650 break;
4651 default:
4652 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4653 auth->state);
4654 break;
4656 break;
4659 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4660 struct notif_channel_result *x =
4661 &notif->u.channel_result;
4663 if (notif->size == sizeof(*x)) {
4664 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4665 x->channel_num);
4666 } else {
4667 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4668 "(should be %zd)\n",
4669 notif->size, sizeof(*x));
4671 break;
4674 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4675 struct notif_scan_complete *x = &notif->u.scan_complete;
4676 if (notif->size == sizeof(*x)) {
4677 IPW_DEBUG_SCAN
4678 ("Scan completed: type %d, %d channels, "
4679 "%d status\n", x->scan_type,
4680 x->num_channels, x->status);
4681 } else {
4682 IPW_ERROR("Scan completed of wrong size %d "
4683 "(should be %zd)\n",
4684 notif->size, sizeof(*x));
4687 priv->status &=
4688 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4690 wake_up_interruptible(&priv->wait_state);
4691 cancel_delayed_work(&priv->scan_check);
4693 if (priv->status & STATUS_EXIT_PENDING)
4694 break;
4696 priv->ieee->scans++;
4698 #ifdef CONFIG_IPW2200_MONITOR
4699 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4700 priv->status |= STATUS_SCAN_FORCED;
4701 queue_delayed_work(priv->workqueue,
4702 &priv->request_scan, 0);
4703 break;
4705 priv->status &= ~STATUS_SCAN_FORCED;
4706 #endif /* CONFIG_IPW2200_MONITOR */
4708 if (!(priv->status & (STATUS_ASSOCIATED |
4709 STATUS_ASSOCIATING |
4710 STATUS_ROAMING |
4711 STATUS_DISASSOCIATING)))
4712 queue_work(priv->workqueue, &priv->associate);
4713 else if (priv->status & STATUS_ROAMING) {
4714 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4715 /* If a scan completed and we are in roam mode, then
4716 * the scan that completed was the one requested as a
4717 * result of entering roam... so, schedule the
4718 * roam work */
4719 queue_work(priv->workqueue,
4720 &priv->roam);
4721 else
4722 /* Don't schedule if we aborted the scan */
4723 priv->status &= ~STATUS_ROAMING;
4724 } else if (priv->status & STATUS_SCAN_PENDING)
4725 queue_delayed_work(priv->workqueue,
4726 &priv->request_scan, 0);
4727 else if (priv->config & CFG_BACKGROUND_SCAN
4728 && priv->status & STATUS_ASSOCIATED)
4729 queue_delayed_work(priv->workqueue,
4730 &priv->request_scan,
4731 round_jiffies(HZ));
4733 /* Send an empty event to user space.
4734 * We don't send the received data on the event because
4735 * it would require us to do complex transcoding, and
4736 * we want to minimise the work done in the irq handler
4737 * Use a request to extract the data.
4738 * Also, we generate this even for any scan, regardless
4739 * on how the scan was initiated. User space can just
4740 * sync on periodic scan to get fresh data...
4741 * Jean II */
4742 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4743 handle_scan_event(priv);
4744 break;
4747 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4748 struct notif_frag_length *x = &notif->u.frag_len;
4750 if (notif->size == sizeof(*x))
4751 IPW_ERROR("Frag length: %d\n",
4752 le16_to_cpu(x->frag_length));
4753 else
4754 IPW_ERROR("Frag length of wrong size %d "
4755 "(should be %zd)\n",
4756 notif->size, sizeof(*x));
4757 break;
4760 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4761 struct notif_link_deterioration *x =
4762 &notif->u.link_deterioration;
4764 if (notif->size == sizeof(*x)) {
4765 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4766 "link deterioration: type %d, cnt %d\n",
4767 x->silence_notification_type,
4768 x->silence_count);
4769 memcpy(&priv->last_link_deterioration, x,
4770 sizeof(*x));
4771 } else {
4772 IPW_ERROR("Link Deterioration of wrong size %d "
4773 "(should be %zd)\n",
4774 notif->size, sizeof(*x));
4776 break;
4779 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4780 IPW_ERROR("Dino config\n");
4781 if (priv->hcmd
4782 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4783 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4785 break;
4788 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4789 struct notif_beacon_state *x = &notif->u.beacon_state;
4790 if (notif->size != sizeof(*x)) {
4791 IPW_ERROR
4792 ("Beacon state of wrong size %d (should "
4793 "be %zd)\n", notif->size, sizeof(*x));
4794 break;
4797 if (le32_to_cpu(x->state) ==
4798 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4799 ipw_handle_missed_beacon(priv,
4800 le32_to_cpu(x->
4801 number));
4803 break;
4806 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4807 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4808 if (notif->size == sizeof(*x)) {
4809 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4810 "0x%02x station %d\n",
4811 x->key_state, x->security_type,
4812 x->station_index);
4813 break;
4816 IPW_ERROR
4817 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4818 notif->size, sizeof(*x));
4819 break;
4822 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4823 struct notif_calibration *x = &notif->u.calibration;
4825 if (notif->size == sizeof(*x)) {
4826 memcpy(&priv->calib, x, sizeof(*x));
4827 IPW_DEBUG_INFO("TODO: Calibration\n");
4828 break;
4831 IPW_ERROR
4832 ("Calibration of wrong size %d (should be %zd)\n",
4833 notif->size, sizeof(*x));
4834 break;
4837 case HOST_NOTIFICATION_NOISE_STATS:{
4838 if (notif->size == sizeof(u32)) {
4839 priv->exp_avg_noise =
4840 exponential_average(priv->exp_avg_noise,
4841 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4842 DEPTH_NOISE);
4843 break;
4846 IPW_ERROR
4847 ("Noise stat is wrong size %d (should be %zd)\n",
4848 notif->size, sizeof(u32));
4849 break;
4852 default:
4853 IPW_DEBUG_NOTIF("Unknown notification: "
4854 "subtype=%d,flags=0x%2x,size=%d\n",
4855 notif->subtype, notif->flags, notif->size);
4860 * Destroys all DMA structures and initialise them again
4862 * @param priv
4863 * @return error code
4865 static int ipw_queue_reset(struct ipw_priv *priv)
4867 int rc = 0;
4868 /** @todo customize queue sizes */
4869 int nTx = 64, nTxCmd = 8;
4870 ipw_tx_queue_free(priv);
4871 /* Tx CMD queue */
4872 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4873 IPW_TX_CMD_QUEUE_READ_INDEX,
4874 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4875 IPW_TX_CMD_QUEUE_BD_BASE,
4876 IPW_TX_CMD_QUEUE_BD_SIZE);
4877 if (rc) {
4878 IPW_ERROR("Tx Cmd queue init failed\n");
4879 goto error;
4881 /* Tx queue(s) */
4882 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4883 IPW_TX_QUEUE_0_READ_INDEX,
4884 IPW_TX_QUEUE_0_WRITE_INDEX,
4885 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4886 if (rc) {
4887 IPW_ERROR("Tx 0 queue init failed\n");
4888 goto error;
4890 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4891 IPW_TX_QUEUE_1_READ_INDEX,
4892 IPW_TX_QUEUE_1_WRITE_INDEX,
4893 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4894 if (rc) {
4895 IPW_ERROR("Tx 1 queue init failed\n");
4896 goto error;
4898 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4899 IPW_TX_QUEUE_2_READ_INDEX,
4900 IPW_TX_QUEUE_2_WRITE_INDEX,
4901 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4902 if (rc) {
4903 IPW_ERROR("Tx 2 queue init failed\n");
4904 goto error;
4906 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4907 IPW_TX_QUEUE_3_READ_INDEX,
4908 IPW_TX_QUEUE_3_WRITE_INDEX,
4909 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4910 if (rc) {
4911 IPW_ERROR("Tx 3 queue init failed\n");
4912 goto error;
4914 /* statistics */
4915 priv->rx_bufs_min = 0;
4916 priv->rx_pend_max = 0;
4917 return rc;
4919 error:
4920 ipw_tx_queue_free(priv);
4921 return rc;
4925 * Reclaim Tx queue entries no more used by NIC.
4927 * When FW adwances 'R' index, all entries between old and
4928 * new 'R' index need to be reclaimed. As result, some free space
4929 * forms. If there is enough free space (> low mark), wake Tx queue.
4931 * @note Need to protect against garbage in 'R' index
4932 * @param priv
4933 * @param txq
4934 * @param qindex
4935 * @return Number of used entries remains in the queue
4937 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4938 struct clx2_tx_queue *txq, int qindex)
4940 u32 hw_tail;
4941 int used;
4942 struct clx2_queue *q = &txq->q;
4944 hw_tail = ipw_read32(priv, q->reg_r);
4945 if (hw_tail >= q->n_bd) {
4946 IPW_ERROR
4947 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4948 hw_tail, q->n_bd);
4949 goto done;
4951 for (; q->last_used != hw_tail;
4952 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4953 ipw_queue_tx_free_tfd(priv, txq);
4954 priv->tx_packets++;
4956 done:
4957 if ((ipw_queue_space(q) > q->low_mark) &&
4958 (qindex >= 0) &&
4959 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4960 netif_wake_queue(priv->net_dev);
4961 used = q->first_empty - q->last_used;
4962 if (used < 0)
4963 used += q->n_bd;
4965 return used;
4968 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4969 int len, int sync)
4971 struct clx2_tx_queue *txq = &priv->txq_cmd;
4972 struct clx2_queue *q = &txq->q;
4973 struct tfd_frame *tfd;
4975 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4976 IPW_ERROR("No space for Tx\n");
4977 return -EBUSY;
4980 tfd = &txq->bd[q->first_empty];
4981 txq->txb[q->first_empty] = NULL;
4983 memset(tfd, 0, sizeof(*tfd));
4984 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4985 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4986 priv->hcmd_seq++;
4987 tfd->u.cmd.index = hcmd;
4988 tfd->u.cmd.length = len;
4989 memcpy(tfd->u.cmd.payload, buf, len);
4990 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4991 ipw_write32(priv, q->reg_w, q->first_empty);
4992 _ipw_read32(priv, 0x90);
4994 return 0;
4998 * Rx theory of operation
5000 * The host allocates 32 DMA target addresses and passes the host address
5001 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5002 * 0 to 31
5004 * Rx Queue Indexes
5005 * The host/firmware share two index registers for managing the Rx buffers.
5007 * The READ index maps to the first position that the firmware may be writing
5008 * to -- the driver can read up to (but not including) this position and get
5009 * good data.
5010 * The READ index is managed by the firmware once the card is enabled.
5012 * The WRITE index maps to the last position the driver has read from -- the
5013 * position preceding WRITE is the last slot the firmware can place a packet.
5015 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5016 * WRITE = READ.
5018 * During initialization the host sets up the READ queue position to the first
5019 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5021 * When the firmware places a packet in a buffer it will advance the READ index
5022 * and fire the RX interrupt. The driver can then query the READ index and
5023 * process as many packets as possible, moving the WRITE index forward as it
5024 * resets the Rx queue buffers with new memory.
5026 * The management in the driver is as follows:
5027 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5028 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5029 * to replensish the ipw->rxq->rx_free.
5030 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5031 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5032 * 'processed' and 'read' driver indexes as well)
5033 * + A received packet is processed and handed to the kernel network stack,
5034 * detached from the ipw->rxq. The driver 'processed' index is updated.
5035 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5036 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5037 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5038 * were enough free buffers and RX_STALLED is set it is cleared.
5041 * Driver sequence:
5043 * ipw_rx_queue_alloc() Allocates rx_free
5044 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5045 * ipw_rx_queue_restock
5046 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5047 * queue, updates firmware pointers, and updates
5048 * the WRITE index. If insufficient rx_free buffers
5049 * are available, schedules ipw_rx_queue_replenish
5051 * -- enable interrupts --
5052 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5053 * READ INDEX, detaching the SKB from the pool.
5054 * Moves the packet buffer from queue to rx_used.
5055 * Calls ipw_rx_queue_restock to refill any empty
5056 * slots.
5057 * ...
5062 * If there are slots in the RX queue that need to be restocked,
5063 * and we have free pre-allocated buffers, fill the ranks as much
5064 * as we can pulling from rx_free.
5066 * This moves the 'write' index forward to catch up with 'processed', and
5067 * also updates the memory address in the firmware to reference the new
5068 * target buffer.
5070 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5072 struct ipw_rx_queue *rxq = priv->rxq;
5073 struct list_head *element;
5074 struct ipw_rx_mem_buffer *rxb;
5075 unsigned long flags;
5076 int write;
5078 spin_lock_irqsave(&rxq->lock, flags);
5079 write = rxq->write;
5080 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
5081 element = rxq->rx_free.next;
5082 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5083 list_del(element);
5085 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5086 rxb->dma_addr);
5087 rxq->queue[rxq->write] = rxb;
5088 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5089 rxq->free_count--;
5091 spin_unlock_irqrestore(&rxq->lock, flags);
5093 /* If the pre-allocated buffer pool is dropping low, schedule to
5094 * refill it */
5095 if (rxq->free_count <= RX_LOW_WATERMARK)
5096 queue_work(priv->workqueue, &priv->rx_replenish);
5098 /* If we've added more space for the firmware to place data, tell it */
5099 if (write != rxq->write)
5100 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5104 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5105 * Also restock the Rx queue via ipw_rx_queue_restock.
5107 * This is called as a scheduled work item (except for during intialization)
5109 static void ipw_rx_queue_replenish(void *data)
5111 struct ipw_priv *priv = data;
5112 struct ipw_rx_queue *rxq = priv->rxq;
5113 struct list_head *element;
5114 struct ipw_rx_mem_buffer *rxb;
5115 unsigned long flags;
5117 spin_lock_irqsave(&rxq->lock, flags);
5118 while (!list_empty(&rxq->rx_used)) {
5119 element = rxq->rx_used.next;
5120 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5121 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5122 if (!rxb->skb) {
5123 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5124 priv->net_dev->name);
5125 /* We don't reschedule replenish work here -- we will
5126 * call the restock method and if it still needs
5127 * more buffers it will schedule replenish */
5128 break;
5130 list_del(element);
5132 rxb->dma_addr =
5133 pci_map_single(priv->pci_dev, rxb->skb->data,
5134 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5136 list_add_tail(&rxb->list, &rxq->rx_free);
5137 rxq->free_count++;
5139 spin_unlock_irqrestore(&rxq->lock, flags);
5141 ipw_rx_queue_restock(priv);
5144 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5146 struct ipw_priv *priv =
5147 container_of(work, struct ipw_priv, rx_replenish);
5148 mutex_lock(&priv->mutex);
5149 ipw_rx_queue_replenish(priv);
5150 mutex_unlock(&priv->mutex);
5153 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5154 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5155 * This free routine walks the list of POOL entries and if SKB is set to
5156 * non NULL it is unmapped and freed
5158 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5160 int i;
5162 if (!rxq)
5163 return;
5165 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5166 if (rxq->pool[i].skb != NULL) {
5167 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5168 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5169 dev_kfree_skb(rxq->pool[i].skb);
5173 kfree(rxq);
5176 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5178 struct ipw_rx_queue *rxq;
5179 int i;
5181 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5182 if (unlikely(!rxq)) {
5183 IPW_ERROR("memory allocation failed\n");
5184 return NULL;
5186 spin_lock_init(&rxq->lock);
5187 INIT_LIST_HEAD(&rxq->rx_free);
5188 INIT_LIST_HEAD(&rxq->rx_used);
5190 /* Fill the rx_used queue with _all_ of the Rx buffers */
5191 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5192 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5194 /* Set us so that we have processed and used all buffers, but have
5195 * not restocked the Rx queue with fresh buffers */
5196 rxq->read = rxq->write = 0;
5197 rxq->processed = RX_QUEUE_SIZE - 1;
5198 rxq->free_count = 0;
5200 return rxq;
5203 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5205 rate &= ~IEEE80211_BASIC_RATE_MASK;
5206 if (ieee_mode == IEEE_A) {
5207 switch (rate) {
5208 case IEEE80211_OFDM_RATE_6MB:
5209 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5210 1 : 0;
5211 case IEEE80211_OFDM_RATE_9MB:
5212 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5213 1 : 0;
5214 case IEEE80211_OFDM_RATE_12MB:
5215 return priv->
5216 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5217 case IEEE80211_OFDM_RATE_18MB:
5218 return priv->
5219 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5220 case IEEE80211_OFDM_RATE_24MB:
5221 return priv->
5222 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5223 case IEEE80211_OFDM_RATE_36MB:
5224 return priv->
5225 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5226 case IEEE80211_OFDM_RATE_48MB:
5227 return priv->
5228 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5229 case IEEE80211_OFDM_RATE_54MB:
5230 return priv->
5231 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5232 default:
5233 return 0;
5237 /* B and G mixed */
5238 switch (rate) {
5239 case IEEE80211_CCK_RATE_1MB:
5240 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5241 case IEEE80211_CCK_RATE_2MB:
5242 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5243 case IEEE80211_CCK_RATE_5MB:
5244 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5245 case IEEE80211_CCK_RATE_11MB:
5246 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5249 /* If we are limited to B modulations, bail at this point */
5250 if (ieee_mode == IEEE_B)
5251 return 0;
5253 /* G */
5254 switch (rate) {
5255 case IEEE80211_OFDM_RATE_6MB:
5256 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5257 case IEEE80211_OFDM_RATE_9MB:
5258 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5259 case IEEE80211_OFDM_RATE_12MB:
5260 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5261 case IEEE80211_OFDM_RATE_18MB:
5262 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5263 case IEEE80211_OFDM_RATE_24MB:
5264 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5265 case IEEE80211_OFDM_RATE_36MB:
5266 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5267 case IEEE80211_OFDM_RATE_48MB:
5268 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5269 case IEEE80211_OFDM_RATE_54MB:
5270 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5273 return 0;
5276 static int ipw_compatible_rates(struct ipw_priv *priv,
5277 const struct ieee80211_network *network,
5278 struct ipw_supported_rates *rates)
5280 int num_rates, i;
5282 memset(rates, 0, sizeof(*rates));
5283 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5284 rates->num_rates = 0;
5285 for (i = 0; i < num_rates; i++) {
5286 if (!ipw_is_rate_in_mask(priv, network->mode,
5287 network->rates[i])) {
5289 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5290 IPW_DEBUG_SCAN("Adding masked mandatory "
5291 "rate %02X\n",
5292 network->rates[i]);
5293 rates->supported_rates[rates->num_rates++] =
5294 network->rates[i];
5295 continue;
5298 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5299 network->rates[i], priv->rates_mask);
5300 continue;
5303 rates->supported_rates[rates->num_rates++] = network->rates[i];
5306 num_rates = min(network->rates_ex_len,
5307 (u8) (IPW_MAX_RATES - num_rates));
5308 for (i = 0; i < num_rates; i++) {
5309 if (!ipw_is_rate_in_mask(priv, network->mode,
5310 network->rates_ex[i])) {
5311 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5312 IPW_DEBUG_SCAN("Adding masked mandatory "
5313 "rate %02X\n",
5314 network->rates_ex[i]);
5315 rates->supported_rates[rates->num_rates++] =
5316 network->rates[i];
5317 continue;
5320 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5321 network->rates_ex[i], priv->rates_mask);
5322 continue;
5325 rates->supported_rates[rates->num_rates++] =
5326 network->rates_ex[i];
5329 return 1;
5332 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5333 const struct ipw_supported_rates *src)
5335 u8 i;
5336 for (i = 0; i < src->num_rates; i++)
5337 dest->supported_rates[i] = src->supported_rates[i];
5338 dest->num_rates = src->num_rates;
5341 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5342 * mask should ever be used -- right now all callers to add the scan rates are
5343 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5344 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5345 u8 modulation, u32 rate_mask)
5347 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5348 IEEE80211_BASIC_RATE_MASK : 0;
5350 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5351 rates->supported_rates[rates->num_rates++] =
5352 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5354 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5355 rates->supported_rates[rates->num_rates++] =
5356 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5358 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5359 rates->supported_rates[rates->num_rates++] = basic_mask |
5360 IEEE80211_CCK_RATE_5MB;
5362 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5363 rates->supported_rates[rates->num_rates++] = basic_mask |
5364 IEEE80211_CCK_RATE_11MB;
5367 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5368 u8 modulation, u32 rate_mask)
5370 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5371 IEEE80211_BASIC_RATE_MASK : 0;
5373 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5374 rates->supported_rates[rates->num_rates++] = basic_mask |
5375 IEEE80211_OFDM_RATE_6MB;
5377 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5378 rates->supported_rates[rates->num_rates++] =
5379 IEEE80211_OFDM_RATE_9MB;
5381 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5382 rates->supported_rates[rates->num_rates++] = basic_mask |
5383 IEEE80211_OFDM_RATE_12MB;
5385 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5386 rates->supported_rates[rates->num_rates++] =
5387 IEEE80211_OFDM_RATE_18MB;
5389 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5390 rates->supported_rates[rates->num_rates++] = basic_mask |
5391 IEEE80211_OFDM_RATE_24MB;
5393 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5394 rates->supported_rates[rates->num_rates++] =
5395 IEEE80211_OFDM_RATE_36MB;
5397 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5398 rates->supported_rates[rates->num_rates++] =
5399 IEEE80211_OFDM_RATE_48MB;
5401 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5402 rates->supported_rates[rates->num_rates++] =
5403 IEEE80211_OFDM_RATE_54MB;
5406 struct ipw_network_match {
5407 struct ieee80211_network *network;
5408 struct ipw_supported_rates rates;
5411 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5412 struct ipw_network_match *match,
5413 struct ieee80211_network *network,
5414 int roaming)
5416 struct ipw_supported_rates rates;
5417 DECLARE_MAC_BUF(mac);
5418 DECLARE_MAC_BUF(mac2);
5420 /* Verify that this network's capability is compatible with the
5421 * current mode (AdHoc or Infrastructure) */
5422 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5423 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5424 IPW_DEBUG_MERGE("Network '%s (%s)' excluded due to "
5425 "capability mismatch.\n",
5426 escape_essid(network->ssid, network->ssid_len),
5427 print_mac(mac, network->bssid));
5428 return 0;
5431 /* If we do not have an ESSID for this AP, we can not associate with
5432 * it */
5433 if (network->flags & NETWORK_EMPTY_ESSID) {
5434 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5435 "because of hidden ESSID.\n",
5436 escape_essid(network->ssid, network->ssid_len),
5437 print_mac(mac, network->bssid));
5438 return 0;
5441 if (unlikely(roaming)) {
5442 /* If we are roaming, then ensure check if this is a valid
5443 * network to try and roam to */
5444 if ((network->ssid_len != match->network->ssid_len) ||
5445 memcmp(network->ssid, match->network->ssid,
5446 network->ssid_len)) {
5447 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5448 "because of non-network ESSID.\n",
5449 escape_essid(network->ssid,
5450 network->ssid_len),
5451 print_mac(mac, network->bssid));
5452 return 0;
5454 } else {
5455 /* If an ESSID has been configured then compare the broadcast
5456 * ESSID to ours */
5457 if ((priv->config & CFG_STATIC_ESSID) &&
5458 ((network->ssid_len != priv->essid_len) ||
5459 memcmp(network->ssid, priv->essid,
5460 min(network->ssid_len, priv->essid_len)))) {
5461 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5463 strncpy(escaped,
5464 escape_essid(network->ssid, network->ssid_len),
5465 sizeof(escaped));
5466 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5467 "because of ESSID mismatch: '%s'.\n",
5468 escaped, print_mac(mac, network->bssid),
5469 escape_essid(priv->essid,
5470 priv->essid_len));
5471 return 0;
5475 /* If the old network rate is better than this one, don't bother
5476 * testing everything else. */
5478 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5479 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5480 "current network.\n",
5481 escape_essid(match->network->ssid,
5482 match->network->ssid_len));
5483 return 0;
5484 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5485 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5486 "current network.\n",
5487 escape_essid(match->network->ssid,
5488 match->network->ssid_len));
5489 return 0;
5492 /* Now go through and see if the requested network is valid... */
5493 if (priv->ieee->scan_age != 0 &&
5494 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5495 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5496 "because of age: %ums.\n",
5497 escape_essid(network->ssid, network->ssid_len),
5498 print_mac(mac, network->bssid),
5499 jiffies_to_msecs(jiffies -
5500 network->last_scanned));
5501 return 0;
5504 if ((priv->config & CFG_STATIC_CHANNEL) &&
5505 (network->channel != priv->channel)) {
5506 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5507 "because of channel mismatch: %d != %d.\n",
5508 escape_essid(network->ssid, network->ssid_len),
5509 print_mac(mac, network->bssid),
5510 network->channel, priv->channel);
5511 return 0;
5514 /* Verify privacy compatability */
5515 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5516 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5517 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5518 "because of privacy mismatch: %s != %s.\n",
5519 escape_essid(network->ssid, network->ssid_len),
5520 print_mac(mac, network->bssid),
5521 priv->
5522 capability & CAP_PRIVACY_ON ? "on" : "off",
5523 network->
5524 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5525 "off");
5526 return 0;
5529 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5530 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5531 "because of the same BSSID match: %s"
5532 ".\n", escape_essid(network->ssid,
5533 network->ssid_len),
5534 print_mac(mac, network->bssid),
5535 print_mac(mac2, priv->bssid));
5536 return 0;
5539 /* Filter out any incompatible freq / mode combinations */
5540 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5541 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5542 "because of invalid frequency/mode "
5543 "combination.\n",
5544 escape_essid(network->ssid, network->ssid_len),
5545 print_mac(mac, network->bssid));
5546 return 0;
5549 /* Ensure that the rates supported by the driver are compatible with
5550 * this AP, including verification of basic rates (mandatory) */
5551 if (!ipw_compatible_rates(priv, network, &rates)) {
5552 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5553 "because configured rate mask excludes "
5554 "AP mandatory rate.\n",
5555 escape_essid(network->ssid, network->ssid_len),
5556 print_mac(mac, network->bssid));
5557 return 0;
5560 if (rates.num_rates == 0) {
5561 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5562 "because of no compatible rates.\n",
5563 escape_essid(network->ssid, network->ssid_len),
5564 print_mac(mac, network->bssid));
5565 return 0;
5568 /* TODO: Perform any further minimal comparititive tests. We do not
5569 * want to put too much policy logic here; intelligent scan selection
5570 * should occur within a generic IEEE 802.11 user space tool. */
5572 /* Set up 'new' AP to this network */
5573 ipw_copy_rates(&match->rates, &rates);
5574 match->network = network;
5575 IPW_DEBUG_MERGE("Network '%s (%s)' is a viable match.\n",
5576 escape_essid(network->ssid, network->ssid_len),
5577 print_mac(mac, network->bssid));
5579 return 1;
5582 static void ipw_merge_adhoc_network(struct work_struct *work)
5584 struct ipw_priv *priv =
5585 container_of(work, struct ipw_priv, merge_networks);
5586 struct ieee80211_network *network = NULL;
5587 struct ipw_network_match match = {
5588 .network = priv->assoc_network
5591 if ((priv->status & STATUS_ASSOCIATED) &&
5592 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5593 /* First pass through ROAM process -- look for a better
5594 * network */
5595 unsigned long flags;
5597 spin_lock_irqsave(&priv->ieee->lock, flags);
5598 list_for_each_entry(network, &priv->ieee->network_list, list) {
5599 if (network != priv->assoc_network)
5600 ipw_find_adhoc_network(priv, &match, network,
5603 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5605 if (match.network == priv->assoc_network) {
5606 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5607 "merge to.\n");
5608 return;
5611 mutex_lock(&priv->mutex);
5612 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5613 IPW_DEBUG_MERGE("remove network %s\n",
5614 escape_essid(priv->essid,
5615 priv->essid_len));
5616 ipw_remove_current_network(priv);
5619 ipw_disassociate(priv);
5620 priv->assoc_network = match.network;
5621 mutex_unlock(&priv->mutex);
5622 return;
5626 static int ipw_best_network(struct ipw_priv *priv,
5627 struct ipw_network_match *match,
5628 struct ieee80211_network *network, int roaming)
5630 struct ipw_supported_rates rates;
5631 DECLARE_MAC_BUF(mac);
5633 /* Verify that this network's capability is compatible with the
5634 * current mode (AdHoc or Infrastructure) */
5635 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5636 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5637 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5638 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5639 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded due to "
5640 "capability mismatch.\n",
5641 escape_essid(network->ssid, network->ssid_len),
5642 print_mac(mac, network->bssid));
5643 return 0;
5646 /* If we do not have an ESSID for this AP, we can not associate with
5647 * it */
5648 if (network->flags & NETWORK_EMPTY_ESSID) {
5649 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5650 "because of hidden ESSID.\n",
5651 escape_essid(network->ssid, network->ssid_len),
5652 print_mac(mac, network->bssid));
5653 return 0;
5656 if (unlikely(roaming)) {
5657 /* If we are roaming, then ensure check if this is a valid
5658 * network to try and roam to */
5659 if ((network->ssid_len != match->network->ssid_len) ||
5660 memcmp(network->ssid, match->network->ssid,
5661 network->ssid_len)) {
5662 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5663 "because of non-network ESSID.\n",
5664 escape_essid(network->ssid,
5665 network->ssid_len),
5666 print_mac(mac, network->bssid));
5667 return 0;
5669 } else {
5670 /* If an ESSID has been configured then compare the broadcast
5671 * ESSID to ours */
5672 if ((priv->config & CFG_STATIC_ESSID) &&
5673 ((network->ssid_len != priv->essid_len) ||
5674 memcmp(network->ssid, priv->essid,
5675 min(network->ssid_len, priv->essid_len)))) {
5676 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5677 strncpy(escaped,
5678 escape_essid(network->ssid, network->ssid_len),
5679 sizeof(escaped));
5680 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5681 "because of ESSID mismatch: '%s'.\n",
5682 escaped, print_mac(mac, network->bssid),
5683 escape_essid(priv->essid,
5684 priv->essid_len));
5685 return 0;
5689 /* If the old network rate is better than this one, don't bother
5690 * testing everything else. */
5691 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5692 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5693 strncpy(escaped,
5694 escape_essid(network->ssid, network->ssid_len),
5695 sizeof(escaped));
5696 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded because "
5697 "'%s (%s)' has a stronger signal.\n",
5698 escaped, print_mac(mac, network->bssid),
5699 escape_essid(match->network->ssid,
5700 match->network->ssid_len),
5701 print_mac(mac, match->network->bssid));
5702 return 0;
5705 /* If this network has already had an association attempt within the
5706 * last 3 seconds, do not try and associate again... */
5707 if (network->last_associate &&
5708 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5709 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5710 "because of storming (%ums since last "
5711 "assoc attempt).\n",
5712 escape_essid(network->ssid, network->ssid_len),
5713 print_mac(mac, network->bssid),
5714 jiffies_to_msecs(jiffies -
5715 network->last_associate));
5716 return 0;
5719 /* Now go through and see if the requested network is valid... */
5720 if (priv->ieee->scan_age != 0 &&
5721 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5722 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5723 "because of age: %ums.\n",
5724 escape_essid(network->ssid, network->ssid_len),
5725 print_mac(mac, network->bssid),
5726 jiffies_to_msecs(jiffies -
5727 network->last_scanned));
5728 return 0;
5731 if ((priv->config & CFG_STATIC_CHANNEL) &&
5732 (network->channel != priv->channel)) {
5733 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5734 "because of channel mismatch: %d != %d.\n",
5735 escape_essid(network->ssid, network->ssid_len),
5736 print_mac(mac, network->bssid),
5737 network->channel, priv->channel);
5738 return 0;
5741 /* Verify privacy compatability */
5742 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5743 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5744 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5745 "because of privacy mismatch: %s != %s.\n",
5746 escape_essid(network->ssid, network->ssid_len),
5747 print_mac(mac, network->bssid),
5748 priv->capability & CAP_PRIVACY_ON ? "on" :
5749 "off",
5750 network->capability &
5751 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5752 return 0;
5755 if ((priv->config & CFG_STATIC_BSSID) &&
5756 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5757 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5758 "because of BSSID mismatch: %s.\n",
5759 escape_essid(network->ssid, network->ssid_len),
5760 print_mac(mac, network->bssid), print_mac(mac, priv->bssid));
5761 return 0;
5764 /* Filter out any incompatible freq / mode combinations */
5765 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5766 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5767 "because of invalid frequency/mode "
5768 "combination.\n",
5769 escape_essid(network->ssid, network->ssid_len),
5770 print_mac(mac, network->bssid));
5771 return 0;
5774 /* Filter out invalid channel in current GEO */
5775 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5776 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5777 "because of invalid channel in current GEO\n",
5778 escape_essid(network->ssid, network->ssid_len),
5779 print_mac(mac, network->bssid));
5780 return 0;
5783 /* Ensure that the rates supported by the driver are compatible with
5784 * this AP, including verification of basic rates (mandatory) */
5785 if (!ipw_compatible_rates(priv, network, &rates)) {
5786 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5787 "because configured rate mask excludes "
5788 "AP mandatory rate.\n",
5789 escape_essid(network->ssid, network->ssid_len),
5790 print_mac(mac, network->bssid));
5791 return 0;
5794 if (rates.num_rates == 0) {
5795 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5796 "because of no compatible rates.\n",
5797 escape_essid(network->ssid, network->ssid_len),
5798 print_mac(mac, network->bssid));
5799 return 0;
5802 /* TODO: Perform any further minimal comparititive tests. We do not
5803 * want to put too much policy logic here; intelligent scan selection
5804 * should occur within a generic IEEE 802.11 user space tool. */
5806 /* Set up 'new' AP to this network */
5807 ipw_copy_rates(&match->rates, &rates);
5808 match->network = network;
5810 IPW_DEBUG_ASSOC("Network '%s (%s)' is a viable match.\n",
5811 escape_essid(network->ssid, network->ssid_len),
5812 print_mac(mac, network->bssid));
5814 return 1;
5817 static void ipw_adhoc_create(struct ipw_priv *priv,
5818 struct ieee80211_network *network)
5820 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5821 int i;
5824 * For the purposes of scanning, we can set our wireless mode
5825 * to trigger scans across combinations of bands, but when it
5826 * comes to creating a new ad-hoc network, we have tell the FW
5827 * exactly which band to use.
5829 * We also have the possibility of an invalid channel for the
5830 * chossen band. Attempting to create a new ad-hoc network
5831 * with an invalid channel for wireless mode will trigger a
5832 * FW fatal error.
5835 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5836 case IEEE80211_52GHZ_BAND:
5837 network->mode = IEEE_A;
5838 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5839 BUG_ON(i == -1);
5840 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5841 IPW_WARNING("Overriding invalid channel\n");
5842 priv->channel = geo->a[0].channel;
5844 break;
5846 case IEEE80211_24GHZ_BAND:
5847 if (priv->ieee->mode & IEEE_G)
5848 network->mode = IEEE_G;
5849 else
5850 network->mode = IEEE_B;
5851 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5852 BUG_ON(i == -1);
5853 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5854 IPW_WARNING("Overriding invalid channel\n");
5855 priv->channel = geo->bg[0].channel;
5857 break;
5859 default:
5860 IPW_WARNING("Overriding invalid channel\n");
5861 if (priv->ieee->mode & IEEE_A) {
5862 network->mode = IEEE_A;
5863 priv->channel = geo->a[0].channel;
5864 } else if (priv->ieee->mode & IEEE_G) {
5865 network->mode = IEEE_G;
5866 priv->channel = geo->bg[0].channel;
5867 } else {
5868 network->mode = IEEE_B;
5869 priv->channel = geo->bg[0].channel;
5871 break;
5874 network->channel = priv->channel;
5875 priv->config |= CFG_ADHOC_PERSIST;
5876 ipw_create_bssid(priv, network->bssid);
5877 network->ssid_len = priv->essid_len;
5878 memcpy(network->ssid, priv->essid, priv->essid_len);
5879 memset(&network->stats, 0, sizeof(network->stats));
5880 network->capability = WLAN_CAPABILITY_IBSS;
5881 if (!(priv->config & CFG_PREAMBLE_LONG))
5882 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5883 if (priv->capability & CAP_PRIVACY_ON)
5884 network->capability |= WLAN_CAPABILITY_PRIVACY;
5885 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5886 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5887 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5888 memcpy(network->rates_ex,
5889 &priv->rates.supported_rates[network->rates_len],
5890 network->rates_ex_len);
5891 network->last_scanned = 0;
5892 network->flags = 0;
5893 network->last_associate = 0;
5894 network->time_stamp[0] = 0;
5895 network->time_stamp[1] = 0;
5896 network->beacon_interval = 100; /* Default */
5897 network->listen_interval = 10; /* Default */
5898 network->atim_window = 0; /* Default */
5899 network->wpa_ie_len = 0;
5900 network->rsn_ie_len = 0;
5903 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5905 struct ipw_tgi_tx_key key;
5907 if (!(priv->ieee->sec.flags & (1 << index)))
5908 return;
5910 key.key_id = index;
5911 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5912 key.security_type = type;
5913 key.station_index = 0; /* always 0 for BSS */
5914 key.flags = 0;
5915 /* 0 for new key; previous value of counter (after fatal error) */
5916 key.tx_counter[0] = cpu_to_le32(0);
5917 key.tx_counter[1] = cpu_to_le32(0);
5919 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5922 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5924 struct ipw_wep_key key;
5925 int i;
5927 key.cmd_id = DINO_CMD_WEP_KEY;
5928 key.seq_num = 0;
5930 /* Note: AES keys cannot be set for multiple times.
5931 * Only set it at the first time. */
5932 for (i = 0; i < 4; i++) {
5933 key.key_index = i | type;
5934 if (!(priv->ieee->sec.flags & (1 << i))) {
5935 key.key_size = 0;
5936 continue;
5939 key.key_size = priv->ieee->sec.key_sizes[i];
5940 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5942 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5946 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5948 if (priv->ieee->host_encrypt)
5949 return;
5951 switch (level) {
5952 case SEC_LEVEL_3:
5953 priv->sys_config.disable_unicast_decryption = 0;
5954 priv->ieee->host_decrypt = 0;
5955 break;
5956 case SEC_LEVEL_2:
5957 priv->sys_config.disable_unicast_decryption = 1;
5958 priv->ieee->host_decrypt = 1;
5959 break;
5960 case SEC_LEVEL_1:
5961 priv->sys_config.disable_unicast_decryption = 0;
5962 priv->ieee->host_decrypt = 0;
5963 break;
5964 case SEC_LEVEL_0:
5965 priv->sys_config.disable_unicast_decryption = 1;
5966 break;
5967 default:
5968 break;
5972 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5974 if (priv->ieee->host_encrypt)
5975 return;
5977 switch (level) {
5978 case SEC_LEVEL_3:
5979 priv->sys_config.disable_multicast_decryption = 0;
5980 break;
5981 case SEC_LEVEL_2:
5982 priv->sys_config.disable_multicast_decryption = 1;
5983 break;
5984 case SEC_LEVEL_1:
5985 priv->sys_config.disable_multicast_decryption = 0;
5986 break;
5987 case SEC_LEVEL_0:
5988 priv->sys_config.disable_multicast_decryption = 1;
5989 break;
5990 default:
5991 break;
5995 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5997 switch (priv->ieee->sec.level) {
5998 case SEC_LEVEL_3:
5999 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6000 ipw_send_tgi_tx_key(priv,
6001 DCT_FLAG_EXT_SECURITY_CCM,
6002 priv->ieee->sec.active_key);
6004 if (!priv->ieee->host_mc_decrypt)
6005 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6006 break;
6007 case SEC_LEVEL_2:
6008 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6009 ipw_send_tgi_tx_key(priv,
6010 DCT_FLAG_EXT_SECURITY_TKIP,
6011 priv->ieee->sec.active_key);
6012 break;
6013 case SEC_LEVEL_1:
6014 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6015 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6016 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6017 break;
6018 case SEC_LEVEL_0:
6019 default:
6020 break;
6024 static void ipw_adhoc_check(void *data)
6026 struct ipw_priv *priv = data;
6028 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6029 !(priv->config & CFG_ADHOC_PERSIST)) {
6030 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6031 IPW_DL_STATE | IPW_DL_ASSOC,
6032 "Missed beacon: %d - disassociate\n",
6033 priv->missed_adhoc_beacons);
6034 ipw_remove_current_network(priv);
6035 ipw_disassociate(priv);
6036 return;
6039 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
6040 priv->assoc_request.beacon_interval);
6043 static void ipw_bg_adhoc_check(struct work_struct *work)
6045 struct ipw_priv *priv =
6046 container_of(work, struct ipw_priv, adhoc_check.work);
6047 mutex_lock(&priv->mutex);
6048 ipw_adhoc_check(priv);
6049 mutex_unlock(&priv->mutex);
6052 static void ipw_debug_config(struct ipw_priv *priv)
6054 DECLARE_MAC_BUF(mac);
6055 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6056 "[CFG 0x%08X]\n", priv->config);
6057 if (priv->config & CFG_STATIC_CHANNEL)
6058 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6059 else
6060 IPW_DEBUG_INFO("Channel unlocked.\n");
6061 if (priv->config & CFG_STATIC_ESSID)
6062 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6063 escape_essid(priv->essid, priv->essid_len));
6064 else
6065 IPW_DEBUG_INFO("ESSID unlocked.\n");
6066 if (priv->config & CFG_STATIC_BSSID)
6067 IPW_DEBUG_INFO("BSSID locked to %s\n",
6068 print_mac(mac, priv->bssid));
6069 else
6070 IPW_DEBUG_INFO("BSSID unlocked.\n");
6071 if (priv->capability & CAP_PRIVACY_ON)
6072 IPW_DEBUG_INFO("PRIVACY on\n");
6073 else
6074 IPW_DEBUG_INFO("PRIVACY off\n");
6075 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6078 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6080 /* TODO: Verify that this works... */
6081 struct ipw_fixed_rate fr = {
6082 .tx_rates = priv->rates_mask
6084 u32 reg;
6085 u16 mask = 0;
6087 /* Identify 'current FW band' and match it with the fixed
6088 * Tx rates */
6090 switch (priv->ieee->freq_band) {
6091 case IEEE80211_52GHZ_BAND: /* A only */
6092 /* IEEE_A */
6093 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6094 /* Invalid fixed rate mask */
6095 IPW_DEBUG_WX
6096 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6097 fr.tx_rates = 0;
6098 break;
6101 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6102 break;
6104 default: /* 2.4Ghz or Mixed */
6105 /* IEEE_B */
6106 if (mode == IEEE_B) {
6107 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6108 /* Invalid fixed rate mask */
6109 IPW_DEBUG_WX
6110 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6111 fr.tx_rates = 0;
6113 break;
6116 /* IEEE_G */
6117 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6118 IEEE80211_OFDM_RATES_MASK)) {
6119 /* Invalid fixed rate mask */
6120 IPW_DEBUG_WX
6121 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6122 fr.tx_rates = 0;
6123 break;
6126 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6127 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6128 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6131 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6132 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6133 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6136 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6137 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6138 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6141 fr.tx_rates |= mask;
6142 break;
6145 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6146 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6149 static void ipw_abort_scan(struct ipw_priv *priv)
6151 int err;
6153 if (priv->status & STATUS_SCAN_ABORTING) {
6154 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6155 return;
6157 priv->status |= STATUS_SCAN_ABORTING;
6159 err = ipw_send_scan_abort(priv);
6160 if (err)
6161 IPW_DEBUG_HC("Request to abort scan failed.\n");
6164 static void ipw_add_scan_channels(struct ipw_priv *priv,
6165 struct ipw_scan_request_ext *scan,
6166 int scan_type)
6168 int channel_index = 0;
6169 const struct ieee80211_geo *geo;
6170 int i;
6172 geo = ieee80211_get_geo(priv->ieee);
6174 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6175 int start = channel_index;
6176 for (i = 0; i < geo->a_channels; i++) {
6177 if ((priv->status & STATUS_ASSOCIATED) &&
6178 geo->a[i].channel == priv->channel)
6179 continue;
6180 channel_index++;
6181 scan->channels_list[channel_index] = geo->a[i].channel;
6182 ipw_set_scan_type(scan, channel_index,
6183 geo->a[i].
6184 flags & IEEE80211_CH_PASSIVE_ONLY ?
6185 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6186 scan_type);
6189 if (start != channel_index) {
6190 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6191 (channel_index - start);
6192 channel_index++;
6196 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6197 int start = channel_index;
6198 if (priv->config & CFG_SPEED_SCAN) {
6199 int index;
6200 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6201 /* nop out the list */
6202 [0] = 0
6205 u8 channel;
6206 while (channel_index < IPW_SCAN_CHANNELS) {
6207 channel =
6208 priv->speed_scan[priv->speed_scan_pos];
6209 if (channel == 0) {
6210 priv->speed_scan_pos = 0;
6211 channel = priv->speed_scan[0];
6213 if ((priv->status & STATUS_ASSOCIATED) &&
6214 channel == priv->channel) {
6215 priv->speed_scan_pos++;
6216 continue;
6219 /* If this channel has already been
6220 * added in scan, break from loop
6221 * and this will be the first channel
6222 * in the next scan.
6224 if (channels[channel - 1] != 0)
6225 break;
6227 channels[channel - 1] = 1;
6228 priv->speed_scan_pos++;
6229 channel_index++;
6230 scan->channels_list[channel_index] = channel;
6231 index =
6232 ieee80211_channel_to_index(priv->ieee, channel);
6233 ipw_set_scan_type(scan, channel_index,
6234 geo->bg[index].
6235 flags &
6236 IEEE80211_CH_PASSIVE_ONLY ?
6237 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6238 : scan_type);
6240 } else {
6241 for (i = 0; i < geo->bg_channels; i++) {
6242 if ((priv->status & STATUS_ASSOCIATED) &&
6243 geo->bg[i].channel == priv->channel)
6244 continue;
6245 channel_index++;
6246 scan->channels_list[channel_index] =
6247 geo->bg[i].channel;
6248 ipw_set_scan_type(scan, channel_index,
6249 geo->bg[i].
6250 flags &
6251 IEEE80211_CH_PASSIVE_ONLY ?
6252 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6253 : scan_type);
6257 if (start != channel_index) {
6258 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6259 (channel_index - start);
6264 static int ipw_request_scan_helper(struct ipw_priv *priv, int type)
6266 struct ipw_scan_request_ext scan;
6267 int err = 0, scan_type;
6269 if (!(priv->status & STATUS_INIT) ||
6270 (priv->status & STATUS_EXIT_PENDING))
6271 return 0;
6273 mutex_lock(&priv->mutex);
6275 if (priv->status & STATUS_SCANNING) {
6276 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6277 priv->status |= STATUS_SCAN_PENDING;
6278 goto done;
6281 if (!(priv->status & STATUS_SCAN_FORCED) &&
6282 priv->status & STATUS_SCAN_ABORTING) {
6283 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6284 priv->status |= STATUS_SCAN_PENDING;
6285 goto done;
6288 if (priv->status & STATUS_RF_KILL_MASK) {
6289 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6290 priv->status |= STATUS_SCAN_PENDING;
6291 goto done;
6294 memset(&scan, 0, sizeof(scan));
6295 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6297 if (type == IW_SCAN_TYPE_PASSIVE) {
6298 IPW_DEBUG_WX("use passive scanning\n");
6299 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6300 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6301 cpu_to_le16(120);
6302 ipw_add_scan_channels(priv, &scan, scan_type);
6303 goto send_request;
6306 /* Use active scan by default. */
6307 if (priv->config & CFG_SPEED_SCAN)
6308 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6309 cpu_to_le16(30);
6310 else
6311 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6312 cpu_to_le16(20);
6314 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6315 cpu_to_le16(20);
6317 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6319 #ifdef CONFIG_IPW2200_MONITOR
6320 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6321 u8 channel;
6322 u8 band = 0;
6324 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6325 case IEEE80211_52GHZ_BAND:
6326 band = (u8) (IPW_A_MODE << 6) | 1;
6327 channel = priv->channel;
6328 break;
6330 case IEEE80211_24GHZ_BAND:
6331 band = (u8) (IPW_B_MODE << 6) | 1;
6332 channel = priv->channel;
6333 break;
6335 default:
6336 band = (u8) (IPW_B_MODE << 6) | 1;
6337 channel = 9;
6338 break;
6341 scan.channels_list[0] = band;
6342 scan.channels_list[1] = channel;
6343 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6345 /* NOTE: The card will sit on this channel for this time
6346 * period. Scan aborts are timing sensitive and frequently
6347 * result in firmware restarts. As such, it is best to
6348 * set a small dwell_time here and just keep re-issuing
6349 * scans. Otherwise fast channel hopping will not actually
6350 * hop channels.
6352 * TODO: Move SPEED SCAN support to all modes and bands */
6353 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6354 cpu_to_le16(2000);
6355 } else {
6356 #endif /* CONFIG_IPW2200_MONITOR */
6357 /* If we are roaming, then make this a directed scan for the
6358 * current network. Otherwise, ensure that every other scan
6359 * is a fast channel hop scan */
6360 if ((priv->status & STATUS_ROAMING)
6361 || (!(priv->status & STATUS_ASSOCIATED)
6362 && (priv->config & CFG_STATIC_ESSID)
6363 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6364 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6365 if (err) {
6366 IPW_DEBUG_HC("Attempt to send SSID command "
6367 "failed.\n");
6368 goto done;
6371 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6372 } else
6373 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6375 ipw_add_scan_channels(priv, &scan, scan_type);
6376 #ifdef CONFIG_IPW2200_MONITOR
6378 #endif
6380 send_request:
6381 err = ipw_send_scan_request_ext(priv, &scan);
6382 if (err) {
6383 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6384 goto done;
6387 priv->status |= STATUS_SCANNING;
6388 priv->status &= ~STATUS_SCAN_PENDING;
6389 queue_delayed_work(priv->workqueue, &priv->scan_check,
6390 IPW_SCAN_CHECK_WATCHDOG);
6391 done:
6392 mutex_unlock(&priv->mutex);
6393 return err;
6396 static void ipw_request_passive_scan(struct work_struct *work)
6398 struct ipw_priv *priv =
6399 container_of(work, struct ipw_priv, request_passive_scan);
6400 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
6403 static void ipw_request_scan(struct work_struct *work)
6405 struct ipw_priv *priv =
6406 container_of(work, struct ipw_priv, request_scan.work);
6407 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
6410 static void ipw_bg_abort_scan(struct work_struct *work)
6412 struct ipw_priv *priv =
6413 container_of(work, struct ipw_priv, abort_scan);
6414 mutex_lock(&priv->mutex);
6415 ipw_abort_scan(priv);
6416 mutex_unlock(&priv->mutex);
6419 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6421 /* This is called when wpa_supplicant loads and closes the driver
6422 * interface. */
6423 priv->ieee->wpa_enabled = value;
6424 return 0;
6427 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6429 struct ieee80211_device *ieee = priv->ieee;
6430 struct ieee80211_security sec = {
6431 .flags = SEC_AUTH_MODE,
6433 int ret = 0;
6435 if (value & IW_AUTH_ALG_SHARED_KEY) {
6436 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6437 ieee->open_wep = 0;
6438 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6439 sec.auth_mode = WLAN_AUTH_OPEN;
6440 ieee->open_wep = 1;
6441 } else if (value & IW_AUTH_ALG_LEAP) {
6442 sec.auth_mode = WLAN_AUTH_LEAP;
6443 ieee->open_wep = 1;
6444 } else
6445 return -EINVAL;
6447 if (ieee->set_security)
6448 ieee->set_security(ieee->dev, &sec);
6449 else
6450 ret = -EOPNOTSUPP;
6452 return ret;
6455 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6456 int wpa_ie_len)
6458 /* make sure WPA is enabled */
6459 ipw_wpa_enable(priv, 1);
6462 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6463 char *capabilities, int length)
6465 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6467 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6468 capabilities);
6472 * WE-18 support
6475 /* SIOCSIWGENIE */
6476 static int ipw_wx_set_genie(struct net_device *dev,
6477 struct iw_request_info *info,
6478 union iwreq_data *wrqu, char *extra)
6480 struct ipw_priv *priv = ieee80211_priv(dev);
6481 struct ieee80211_device *ieee = priv->ieee;
6482 u8 *buf;
6483 int err = 0;
6485 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6486 (wrqu->data.length && extra == NULL))
6487 return -EINVAL;
6489 if (wrqu->data.length) {
6490 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6491 if (buf == NULL) {
6492 err = -ENOMEM;
6493 goto out;
6496 memcpy(buf, extra, wrqu->data.length);
6497 kfree(ieee->wpa_ie);
6498 ieee->wpa_ie = buf;
6499 ieee->wpa_ie_len = wrqu->data.length;
6500 } else {
6501 kfree(ieee->wpa_ie);
6502 ieee->wpa_ie = NULL;
6503 ieee->wpa_ie_len = 0;
6506 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6507 out:
6508 return err;
6511 /* SIOCGIWGENIE */
6512 static int ipw_wx_get_genie(struct net_device *dev,
6513 struct iw_request_info *info,
6514 union iwreq_data *wrqu, char *extra)
6516 struct ipw_priv *priv = ieee80211_priv(dev);
6517 struct ieee80211_device *ieee = priv->ieee;
6518 int err = 0;
6520 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6521 wrqu->data.length = 0;
6522 goto out;
6525 if (wrqu->data.length < ieee->wpa_ie_len) {
6526 err = -E2BIG;
6527 goto out;
6530 wrqu->data.length = ieee->wpa_ie_len;
6531 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6533 out:
6534 return err;
6537 static int wext_cipher2level(int cipher)
6539 switch (cipher) {
6540 case IW_AUTH_CIPHER_NONE:
6541 return SEC_LEVEL_0;
6542 case IW_AUTH_CIPHER_WEP40:
6543 case IW_AUTH_CIPHER_WEP104:
6544 return SEC_LEVEL_1;
6545 case IW_AUTH_CIPHER_TKIP:
6546 return SEC_LEVEL_2;
6547 case IW_AUTH_CIPHER_CCMP:
6548 return SEC_LEVEL_3;
6549 default:
6550 return -1;
6554 /* SIOCSIWAUTH */
6555 static int ipw_wx_set_auth(struct net_device *dev,
6556 struct iw_request_info *info,
6557 union iwreq_data *wrqu, char *extra)
6559 struct ipw_priv *priv = ieee80211_priv(dev);
6560 struct ieee80211_device *ieee = priv->ieee;
6561 struct iw_param *param = &wrqu->param;
6562 struct ieee80211_crypt_data *crypt;
6563 unsigned long flags;
6564 int ret = 0;
6566 switch (param->flags & IW_AUTH_INDEX) {
6567 case IW_AUTH_WPA_VERSION:
6568 break;
6569 case IW_AUTH_CIPHER_PAIRWISE:
6570 ipw_set_hw_decrypt_unicast(priv,
6571 wext_cipher2level(param->value));
6572 break;
6573 case IW_AUTH_CIPHER_GROUP:
6574 ipw_set_hw_decrypt_multicast(priv,
6575 wext_cipher2level(param->value));
6576 break;
6577 case IW_AUTH_KEY_MGMT:
6579 * ipw2200 does not use these parameters
6581 break;
6583 case IW_AUTH_TKIP_COUNTERMEASURES:
6584 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6585 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6586 break;
6588 flags = crypt->ops->get_flags(crypt->priv);
6590 if (param->value)
6591 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6592 else
6593 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6595 crypt->ops->set_flags(flags, crypt->priv);
6597 break;
6599 case IW_AUTH_DROP_UNENCRYPTED:{
6600 /* HACK:
6602 * wpa_supplicant calls set_wpa_enabled when the driver
6603 * is loaded and unloaded, regardless of if WPA is being
6604 * used. No other calls are made which can be used to
6605 * determine if encryption will be used or not prior to
6606 * association being expected. If encryption is not being
6607 * used, drop_unencrypted is set to false, else true -- we
6608 * can use this to determine if the CAP_PRIVACY_ON bit should
6609 * be set.
6611 struct ieee80211_security sec = {
6612 .flags = SEC_ENABLED,
6613 .enabled = param->value,
6615 priv->ieee->drop_unencrypted = param->value;
6616 /* We only change SEC_LEVEL for open mode. Others
6617 * are set by ipw_wpa_set_encryption.
6619 if (!param->value) {
6620 sec.flags |= SEC_LEVEL;
6621 sec.level = SEC_LEVEL_0;
6622 } else {
6623 sec.flags |= SEC_LEVEL;
6624 sec.level = SEC_LEVEL_1;
6626 if (priv->ieee->set_security)
6627 priv->ieee->set_security(priv->ieee->dev, &sec);
6628 break;
6631 case IW_AUTH_80211_AUTH_ALG:
6632 ret = ipw_wpa_set_auth_algs(priv, param->value);
6633 break;
6635 case IW_AUTH_WPA_ENABLED:
6636 ret = ipw_wpa_enable(priv, param->value);
6637 ipw_disassociate(priv);
6638 break;
6640 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6641 ieee->ieee802_1x = param->value;
6642 break;
6644 case IW_AUTH_PRIVACY_INVOKED:
6645 ieee->privacy_invoked = param->value;
6646 break;
6648 default:
6649 return -EOPNOTSUPP;
6651 return ret;
6654 /* SIOCGIWAUTH */
6655 static int ipw_wx_get_auth(struct net_device *dev,
6656 struct iw_request_info *info,
6657 union iwreq_data *wrqu, char *extra)
6659 struct ipw_priv *priv = ieee80211_priv(dev);
6660 struct ieee80211_device *ieee = priv->ieee;
6661 struct ieee80211_crypt_data *crypt;
6662 struct iw_param *param = &wrqu->param;
6663 int ret = 0;
6665 switch (param->flags & IW_AUTH_INDEX) {
6666 case IW_AUTH_WPA_VERSION:
6667 case IW_AUTH_CIPHER_PAIRWISE:
6668 case IW_AUTH_CIPHER_GROUP:
6669 case IW_AUTH_KEY_MGMT:
6671 * wpa_supplicant will control these internally
6673 ret = -EOPNOTSUPP;
6674 break;
6676 case IW_AUTH_TKIP_COUNTERMEASURES:
6677 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6678 if (!crypt || !crypt->ops->get_flags)
6679 break;
6681 param->value = (crypt->ops->get_flags(crypt->priv) &
6682 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6684 break;
6686 case IW_AUTH_DROP_UNENCRYPTED:
6687 param->value = ieee->drop_unencrypted;
6688 break;
6690 case IW_AUTH_80211_AUTH_ALG:
6691 param->value = ieee->sec.auth_mode;
6692 break;
6694 case IW_AUTH_WPA_ENABLED:
6695 param->value = ieee->wpa_enabled;
6696 break;
6698 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6699 param->value = ieee->ieee802_1x;
6700 break;
6702 case IW_AUTH_ROAMING_CONTROL:
6703 case IW_AUTH_PRIVACY_INVOKED:
6704 param->value = ieee->privacy_invoked;
6705 break;
6707 default:
6708 return -EOPNOTSUPP;
6710 return 0;
6713 /* SIOCSIWENCODEEXT */
6714 static int ipw_wx_set_encodeext(struct net_device *dev,
6715 struct iw_request_info *info,
6716 union iwreq_data *wrqu, char *extra)
6718 struct ipw_priv *priv = ieee80211_priv(dev);
6719 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6721 if (hwcrypto) {
6722 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6723 /* IPW HW can't build TKIP MIC,
6724 host decryption still needed */
6725 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6726 priv->ieee->host_mc_decrypt = 1;
6727 else {
6728 priv->ieee->host_encrypt = 0;
6729 priv->ieee->host_encrypt_msdu = 1;
6730 priv->ieee->host_decrypt = 1;
6732 } else {
6733 priv->ieee->host_encrypt = 0;
6734 priv->ieee->host_encrypt_msdu = 0;
6735 priv->ieee->host_decrypt = 0;
6736 priv->ieee->host_mc_decrypt = 0;
6740 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6743 /* SIOCGIWENCODEEXT */
6744 static int ipw_wx_get_encodeext(struct net_device *dev,
6745 struct iw_request_info *info,
6746 union iwreq_data *wrqu, char *extra)
6748 struct ipw_priv *priv = ieee80211_priv(dev);
6749 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6752 /* SIOCSIWMLME */
6753 static int ipw_wx_set_mlme(struct net_device *dev,
6754 struct iw_request_info *info,
6755 union iwreq_data *wrqu, char *extra)
6757 struct ipw_priv *priv = ieee80211_priv(dev);
6758 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6759 u16 reason;
6761 reason = cpu_to_le16(mlme->reason_code);
6763 switch (mlme->cmd) {
6764 case IW_MLME_DEAUTH:
6765 /* silently ignore */
6766 break;
6768 case IW_MLME_DISASSOC:
6769 ipw_disassociate(priv);
6770 break;
6772 default:
6773 return -EOPNOTSUPP;
6775 return 0;
6778 #ifdef CONFIG_IPW2200_QOS
6780 /* QoS */
6782 * get the modulation type of the current network or
6783 * the card current mode
6785 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6787 u8 mode = 0;
6789 if (priv->status & STATUS_ASSOCIATED) {
6790 unsigned long flags;
6792 spin_lock_irqsave(&priv->ieee->lock, flags);
6793 mode = priv->assoc_network->mode;
6794 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6795 } else {
6796 mode = priv->ieee->mode;
6798 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6799 return mode;
6803 * Handle management frame beacon and probe response
6805 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6806 int active_network,
6807 struct ieee80211_network *network)
6809 u32 size = sizeof(struct ieee80211_qos_parameters);
6811 if (network->capability & WLAN_CAPABILITY_IBSS)
6812 network->qos_data.active = network->qos_data.supported;
6814 if (network->flags & NETWORK_HAS_QOS_MASK) {
6815 if (active_network &&
6816 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6817 network->qos_data.active = network->qos_data.supported;
6819 if ((network->qos_data.active == 1) && (active_network == 1) &&
6820 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6821 (network->qos_data.old_param_count !=
6822 network->qos_data.param_count)) {
6823 network->qos_data.old_param_count =
6824 network->qos_data.param_count;
6825 schedule_work(&priv->qos_activate);
6826 IPW_DEBUG_QOS("QoS parameters change call "
6827 "qos_activate\n");
6829 } else {
6830 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6831 memcpy(&network->qos_data.parameters,
6832 &def_parameters_CCK, size);
6833 else
6834 memcpy(&network->qos_data.parameters,
6835 &def_parameters_OFDM, size);
6837 if ((network->qos_data.active == 1) && (active_network == 1)) {
6838 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6839 schedule_work(&priv->qos_activate);
6842 network->qos_data.active = 0;
6843 network->qos_data.supported = 0;
6845 if ((priv->status & STATUS_ASSOCIATED) &&
6846 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6847 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6848 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6849 !(network->flags & NETWORK_EMPTY_ESSID))
6850 if ((network->ssid_len ==
6851 priv->assoc_network->ssid_len) &&
6852 !memcmp(network->ssid,
6853 priv->assoc_network->ssid,
6854 network->ssid_len)) {
6855 queue_work(priv->workqueue,
6856 &priv->merge_networks);
6860 return 0;
6864 * This function set up the firmware to support QoS. It sends
6865 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6867 static int ipw_qos_activate(struct ipw_priv *priv,
6868 struct ieee80211_qos_data *qos_network_data)
6870 int err;
6871 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6872 struct ieee80211_qos_parameters *active_one = NULL;
6873 u32 size = sizeof(struct ieee80211_qos_parameters);
6874 u32 burst_duration;
6875 int i;
6876 u8 type;
6878 type = ipw_qos_current_mode(priv);
6880 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6881 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6882 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6883 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6885 if (qos_network_data == NULL) {
6886 if (type == IEEE_B) {
6887 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6888 active_one = &def_parameters_CCK;
6889 } else
6890 active_one = &def_parameters_OFDM;
6892 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6893 burst_duration = ipw_qos_get_burst_duration(priv);
6894 for (i = 0; i < QOS_QUEUE_NUM; i++)
6895 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6896 (u16)burst_duration;
6897 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6898 if (type == IEEE_B) {
6899 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6900 type);
6901 if (priv->qos_data.qos_enable == 0)
6902 active_one = &def_parameters_CCK;
6903 else
6904 active_one = priv->qos_data.def_qos_parm_CCK;
6905 } else {
6906 if (priv->qos_data.qos_enable == 0)
6907 active_one = &def_parameters_OFDM;
6908 else
6909 active_one = priv->qos_data.def_qos_parm_OFDM;
6911 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6912 } else {
6913 unsigned long flags;
6914 int active;
6916 spin_lock_irqsave(&priv->ieee->lock, flags);
6917 active_one = &(qos_network_data->parameters);
6918 qos_network_data->old_param_count =
6919 qos_network_data->param_count;
6920 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6921 active = qos_network_data->supported;
6922 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6924 if (active == 0) {
6925 burst_duration = ipw_qos_get_burst_duration(priv);
6926 for (i = 0; i < QOS_QUEUE_NUM; i++)
6927 qos_parameters[QOS_PARAM_SET_ACTIVE].
6928 tx_op_limit[i] = (u16)burst_duration;
6932 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6933 for (i = 0; i < 3; i++) {
6934 int j;
6935 for (j = 0; j < QOS_QUEUE_NUM; j++) {
6936 qos_parameters[i].cw_min[j] = cpu_to_le16(qos_parameters[i].cw_min[j]);
6937 qos_parameters[i].cw_max[j] = cpu_to_le16(qos_parameters[i].cw_max[j]);
6938 qos_parameters[i].tx_op_limit[j] = cpu_to_le16(qos_parameters[i].tx_op_limit[j]);
6942 err = ipw_send_qos_params_command(priv,
6943 (struct ieee80211_qos_parameters *)
6944 &(qos_parameters[0]));
6945 if (err)
6946 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6948 return err;
6952 * send IPW_CMD_WME_INFO to the firmware
6954 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6956 int ret = 0;
6957 struct ieee80211_qos_information_element qos_info;
6959 if (priv == NULL)
6960 return -1;
6962 qos_info.elementID = QOS_ELEMENT_ID;
6963 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6965 qos_info.version = QOS_VERSION_1;
6966 qos_info.ac_info = 0;
6968 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6969 qos_info.qui_type = QOS_OUI_TYPE;
6970 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6972 ret = ipw_send_qos_info_command(priv, &qos_info);
6973 if (ret != 0) {
6974 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6976 return ret;
6980 * Set the QoS parameter with the association request structure
6982 static int ipw_qos_association(struct ipw_priv *priv,
6983 struct ieee80211_network *network)
6985 int err = 0;
6986 struct ieee80211_qos_data *qos_data = NULL;
6987 struct ieee80211_qos_data ibss_data = {
6988 .supported = 1,
6989 .active = 1,
6992 switch (priv->ieee->iw_mode) {
6993 case IW_MODE_ADHOC:
6994 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6996 qos_data = &ibss_data;
6997 break;
6999 case IW_MODE_INFRA:
7000 qos_data = &network->qos_data;
7001 break;
7003 default:
7004 BUG();
7005 break;
7008 err = ipw_qos_activate(priv, qos_data);
7009 if (err) {
7010 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7011 return err;
7014 if (priv->qos_data.qos_enable && qos_data->supported) {
7015 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7016 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7017 return ipw_qos_set_info_element(priv);
7020 return 0;
7024 * handling the beaconing responses. if we get different QoS setting
7025 * off the network from the associated setting, adjust the QoS
7026 * setting
7028 static int ipw_qos_association_resp(struct ipw_priv *priv,
7029 struct ieee80211_network *network)
7031 int ret = 0;
7032 unsigned long flags;
7033 u32 size = sizeof(struct ieee80211_qos_parameters);
7034 int set_qos_param = 0;
7036 if ((priv == NULL) || (network == NULL) ||
7037 (priv->assoc_network == NULL))
7038 return ret;
7040 if (!(priv->status & STATUS_ASSOCIATED))
7041 return ret;
7043 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7044 return ret;
7046 spin_lock_irqsave(&priv->ieee->lock, flags);
7047 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7048 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7049 sizeof(struct ieee80211_qos_data));
7050 priv->assoc_network->qos_data.active = 1;
7051 if ((network->qos_data.old_param_count !=
7052 network->qos_data.param_count)) {
7053 set_qos_param = 1;
7054 network->qos_data.old_param_count =
7055 network->qos_data.param_count;
7058 } else {
7059 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7060 memcpy(&priv->assoc_network->qos_data.parameters,
7061 &def_parameters_CCK, size);
7062 else
7063 memcpy(&priv->assoc_network->qos_data.parameters,
7064 &def_parameters_OFDM, size);
7065 priv->assoc_network->qos_data.active = 0;
7066 priv->assoc_network->qos_data.supported = 0;
7067 set_qos_param = 1;
7070 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7072 if (set_qos_param == 1)
7073 schedule_work(&priv->qos_activate);
7075 return ret;
7078 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7080 u32 ret = 0;
7082 if ((priv == NULL))
7083 return 0;
7085 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
7086 ret = priv->qos_data.burst_duration_CCK;
7087 else
7088 ret = priv->qos_data.burst_duration_OFDM;
7090 return ret;
7094 * Initialize the setting of QoS global
7096 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7097 int burst_enable, u32 burst_duration_CCK,
7098 u32 burst_duration_OFDM)
7100 priv->qos_data.qos_enable = enable;
7102 if (priv->qos_data.qos_enable) {
7103 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7104 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7105 IPW_DEBUG_QOS("QoS is enabled\n");
7106 } else {
7107 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7108 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7109 IPW_DEBUG_QOS("QoS is not enabled\n");
7112 priv->qos_data.burst_enable = burst_enable;
7114 if (burst_enable) {
7115 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7116 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7117 } else {
7118 priv->qos_data.burst_duration_CCK = 0;
7119 priv->qos_data.burst_duration_OFDM = 0;
7124 * map the packet priority to the right TX Queue
7126 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7128 if (priority > 7 || !priv->qos_data.qos_enable)
7129 priority = 0;
7131 return from_priority_to_tx_queue[priority] - 1;
7134 static int ipw_is_qos_active(struct net_device *dev,
7135 struct sk_buff *skb)
7137 struct ipw_priv *priv = ieee80211_priv(dev);
7138 struct ieee80211_qos_data *qos_data = NULL;
7139 int active, supported;
7140 u8 *daddr = skb->data + ETH_ALEN;
7141 int unicast = !is_multicast_ether_addr(daddr);
7143 if (!(priv->status & STATUS_ASSOCIATED))
7144 return 0;
7146 qos_data = &priv->assoc_network->qos_data;
7148 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7149 if (unicast == 0)
7150 qos_data->active = 0;
7151 else
7152 qos_data->active = qos_data->supported;
7154 active = qos_data->active;
7155 supported = qos_data->supported;
7156 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7157 "unicast %d\n",
7158 priv->qos_data.qos_enable, active, supported, unicast);
7159 if (active && priv->qos_data.qos_enable)
7160 return 1;
7162 return 0;
7166 * add QoS parameter to the TX command
7168 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7169 u16 priority,
7170 struct tfd_data *tfd)
7172 int tx_queue_id = 0;
7175 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7176 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7178 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7179 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7180 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7182 return 0;
7186 * background support to run QoS activate functionality
7188 static void ipw_bg_qos_activate(struct work_struct *work)
7190 struct ipw_priv *priv =
7191 container_of(work, struct ipw_priv, qos_activate);
7193 if (priv == NULL)
7194 return;
7196 mutex_lock(&priv->mutex);
7198 if (priv->status & STATUS_ASSOCIATED)
7199 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7201 mutex_unlock(&priv->mutex);
7204 static int ipw_handle_probe_response(struct net_device *dev,
7205 struct ieee80211_probe_response *resp,
7206 struct ieee80211_network *network)
7208 struct ipw_priv *priv = ieee80211_priv(dev);
7209 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7210 (network == priv->assoc_network));
7212 ipw_qos_handle_probe_response(priv, active_network, network);
7214 return 0;
7217 static int ipw_handle_beacon(struct net_device *dev,
7218 struct ieee80211_beacon *resp,
7219 struct ieee80211_network *network)
7221 struct ipw_priv *priv = ieee80211_priv(dev);
7222 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7223 (network == priv->assoc_network));
7225 ipw_qos_handle_probe_response(priv, active_network, network);
7227 return 0;
7230 static int ipw_handle_assoc_response(struct net_device *dev,
7231 struct ieee80211_assoc_response *resp,
7232 struct ieee80211_network *network)
7234 struct ipw_priv *priv = ieee80211_priv(dev);
7235 ipw_qos_association_resp(priv, network);
7236 return 0;
7239 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7240 *qos_param)
7242 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7243 sizeof(*qos_param) * 3, qos_param);
7246 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7247 *qos_param)
7249 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7250 qos_param);
7253 #endif /* CONFIG_IPW2200_QOS */
7255 static int ipw_associate_network(struct ipw_priv *priv,
7256 struct ieee80211_network *network,
7257 struct ipw_supported_rates *rates, int roaming)
7259 int err;
7260 DECLARE_MAC_BUF(mac);
7262 if (priv->config & CFG_FIXED_RATE)
7263 ipw_set_fixed_rate(priv, network->mode);
7265 if (!(priv->config & CFG_STATIC_ESSID)) {
7266 priv->essid_len = min(network->ssid_len,
7267 (u8) IW_ESSID_MAX_SIZE);
7268 memcpy(priv->essid, network->ssid, priv->essid_len);
7271 network->last_associate = jiffies;
7273 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7274 priv->assoc_request.channel = network->channel;
7275 priv->assoc_request.auth_key = 0;
7277 if ((priv->capability & CAP_PRIVACY_ON) &&
7278 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7279 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7280 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7282 if (priv->ieee->sec.level == SEC_LEVEL_1)
7283 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7285 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7286 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7287 priv->assoc_request.auth_type = AUTH_LEAP;
7288 else
7289 priv->assoc_request.auth_type = AUTH_OPEN;
7291 if (priv->ieee->wpa_ie_len) {
7292 priv->assoc_request.policy_support = 0x02; /* RSN active */
7293 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7294 priv->ieee->wpa_ie_len);
7298 * It is valid for our ieee device to support multiple modes, but
7299 * when it comes to associating to a given network we have to choose
7300 * just one mode.
7302 if (network->mode & priv->ieee->mode & IEEE_A)
7303 priv->assoc_request.ieee_mode = IPW_A_MODE;
7304 else if (network->mode & priv->ieee->mode & IEEE_G)
7305 priv->assoc_request.ieee_mode = IPW_G_MODE;
7306 else if (network->mode & priv->ieee->mode & IEEE_B)
7307 priv->assoc_request.ieee_mode = IPW_B_MODE;
7309 priv->assoc_request.capability = network->capability;
7310 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7311 && !(priv->config & CFG_PREAMBLE_LONG)) {
7312 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7313 } else {
7314 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7316 /* Clear the short preamble if we won't be supporting it */
7317 priv->assoc_request.capability &=
7318 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7321 /* Clear capability bits that aren't used in Ad Hoc */
7322 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7323 priv->assoc_request.capability &=
7324 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7326 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7327 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7328 roaming ? "Rea" : "A",
7329 escape_essid(priv->essid, priv->essid_len),
7330 network->channel,
7331 ipw_modes[priv->assoc_request.ieee_mode],
7332 rates->num_rates,
7333 (priv->assoc_request.preamble_length ==
7334 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7335 network->capability &
7336 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7337 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7338 priv->capability & CAP_PRIVACY_ON ?
7339 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7340 "(open)") : "",
7341 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7342 priv->capability & CAP_PRIVACY_ON ?
7343 '1' + priv->ieee->sec.active_key : '.',
7344 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7346 priv->assoc_request.beacon_interval = network->beacon_interval;
7347 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7348 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7349 priv->assoc_request.assoc_type = HC_IBSS_START;
7350 priv->assoc_request.assoc_tsf_msw = 0;
7351 priv->assoc_request.assoc_tsf_lsw = 0;
7352 } else {
7353 if (unlikely(roaming))
7354 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7355 else
7356 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7357 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7358 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7361 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7363 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7364 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7365 priv->assoc_request.atim_window = network->atim_window;
7366 } else {
7367 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7368 priv->assoc_request.atim_window = 0;
7371 priv->assoc_request.listen_interval = network->listen_interval;
7373 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7374 if (err) {
7375 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7376 return err;
7379 rates->ieee_mode = priv->assoc_request.ieee_mode;
7380 rates->purpose = IPW_RATE_CONNECT;
7381 ipw_send_supported_rates(priv, rates);
7383 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7384 priv->sys_config.dot11g_auto_detection = 1;
7385 else
7386 priv->sys_config.dot11g_auto_detection = 0;
7388 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7389 priv->sys_config.answer_broadcast_ssid_probe = 1;
7390 else
7391 priv->sys_config.answer_broadcast_ssid_probe = 0;
7393 err = ipw_send_system_config(priv);
7394 if (err) {
7395 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7396 return err;
7399 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7400 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7401 if (err) {
7402 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7403 return err;
7407 * If preemption is enabled, it is possible for the association
7408 * to complete before we return from ipw_send_associate. Therefore
7409 * we have to be sure and update our priviate data first.
7411 priv->channel = network->channel;
7412 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7413 priv->status |= STATUS_ASSOCIATING;
7414 priv->status &= ~STATUS_SECURITY_UPDATED;
7416 priv->assoc_network = network;
7418 #ifdef CONFIG_IPW2200_QOS
7419 ipw_qos_association(priv, network);
7420 #endif
7422 err = ipw_send_associate(priv, &priv->assoc_request);
7423 if (err) {
7424 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7425 return err;
7428 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %s \n",
7429 escape_essid(priv->essid, priv->essid_len),
7430 print_mac(mac, priv->bssid));
7432 return 0;
7435 static void ipw_roam(void *data)
7437 struct ipw_priv *priv = data;
7438 struct ieee80211_network *network = NULL;
7439 struct ipw_network_match match = {
7440 .network = priv->assoc_network
7443 /* The roaming process is as follows:
7445 * 1. Missed beacon threshold triggers the roaming process by
7446 * setting the status ROAM bit and requesting a scan.
7447 * 2. When the scan completes, it schedules the ROAM work
7448 * 3. The ROAM work looks at all of the known networks for one that
7449 * is a better network than the currently associated. If none
7450 * found, the ROAM process is over (ROAM bit cleared)
7451 * 4. If a better network is found, a disassociation request is
7452 * sent.
7453 * 5. When the disassociation completes, the roam work is again
7454 * scheduled. The second time through, the driver is no longer
7455 * associated, and the newly selected network is sent an
7456 * association request.
7457 * 6. At this point ,the roaming process is complete and the ROAM
7458 * status bit is cleared.
7461 /* If we are no longer associated, and the roaming bit is no longer
7462 * set, then we are not actively roaming, so just return */
7463 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7464 return;
7466 if (priv->status & STATUS_ASSOCIATED) {
7467 /* First pass through ROAM process -- look for a better
7468 * network */
7469 unsigned long flags;
7470 u8 rssi = priv->assoc_network->stats.rssi;
7471 priv->assoc_network->stats.rssi = -128;
7472 spin_lock_irqsave(&priv->ieee->lock, flags);
7473 list_for_each_entry(network, &priv->ieee->network_list, list) {
7474 if (network != priv->assoc_network)
7475 ipw_best_network(priv, &match, network, 1);
7477 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7478 priv->assoc_network->stats.rssi = rssi;
7480 if (match.network == priv->assoc_network) {
7481 IPW_DEBUG_ASSOC("No better APs in this network to "
7482 "roam to.\n");
7483 priv->status &= ~STATUS_ROAMING;
7484 ipw_debug_config(priv);
7485 return;
7488 ipw_send_disassociate(priv, 1);
7489 priv->assoc_network = match.network;
7491 return;
7494 /* Second pass through ROAM process -- request association */
7495 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7496 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7497 priv->status &= ~STATUS_ROAMING;
7500 static void ipw_bg_roam(struct work_struct *work)
7502 struct ipw_priv *priv =
7503 container_of(work, struct ipw_priv, roam);
7504 mutex_lock(&priv->mutex);
7505 ipw_roam(priv);
7506 mutex_unlock(&priv->mutex);
7509 static int ipw_associate(void *data)
7511 struct ipw_priv *priv = data;
7513 struct ieee80211_network *network = NULL;
7514 struct ipw_network_match match = {
7515 .network = NULL
7517 struct ipw_supported_rates *rates;
7518 struct list_head *element;
7519 unsigned long flags;
7521 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7522 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7523 return 0;
7526 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7527 IPW_DEBUG_ASSOC("Not attempting association (already in "
7528 "progress)\n");
7529 return 0;
7532 if (priv->status & STATUS_DISASSOCIATING) {
7533 IPW_DEBUG_ASSOC("Not attempting association (in "
7534 "disassociating)\n ");
7535 queue_work(priv->workqueue, &priv->associate);
7536 return 0;
7539 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7540 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7541 "initialized)\n");
7542 return 0;
7545 if (!(priv->config & CFG_ASSOCIATE) &&
7546 !(priv->config & (CFG_STATIC_ESSID |
7547 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7548 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7549 return 0;
7552 /* Protect our use of the network_list */
7553 spin_lock_irqsave(&priv->ieee->lock, flags);
7554 list_for_each_entry(network, &priv->ieee->network_list, list)
7555 ipw_best_network(priv, &match, network, 0);
7557 network = match.network;
7558 rates = &match.rates;
7560 if (network == NULL &&
7561 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7562 priv->config & CFG_ADHOC_CREATE &&
7563 priv->config & CFG_STATIC_ESSID &&
7564 priv->config & CFG_STATIC_CHANNEL &&
7565 !list_empty(&priv->ieee->network_free_list)) {
7566 element = priv->ieee->network_free_list.next;
7567 network = list_entry(element, struct ieee80211_network, list);
7568 ipw_adhoc_create(priv, network);
7569 rates = &priv->rates;
7570 list_del(element);
7571 list_add_tail(&network->list, &priv->ieee->network_list);
7573 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7575 /* If we reached the end of the list, then we don't have any valid
7576 * matching APs */
7577 if (!network) {
7578 ipw_debug_config(priv);
7580 if (!(priv->status & STATUS_SCANNING)) {
7581 if (!(priv->config & CFG_SPEED_SCAN))
7582 queue_delayed_work(priv->workqueue,
7583 &priv->request_scan,
7584 SCAN_INTERVAL);
7585 else
7586 queue_delayed_work(priv->workqueue,
7587 &priv->request_scan, 0);
7590 return 0;
7593 ipw_associate_network(priv, network, rates, 0);
7595 return 1;
7598 static void ipw_bg_associate(struct work_struct *work)
7600 struct ipw_priv *priv =
7601 container_of(work, struct ipw_priv, associate);
7602 mutex_lock(&priv->mutex);
7603 ipw_associate(priv);
7604 mutex_unlock(&priv->mutex);
7607 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7608 struct sk_buff *skb)
7610 struct ieee80211_hdr *hdr;
7611 u16 fc;
7613 hdr = (struct ieee80211_hdr *)skb->data;
7614 fc = le16_to_cpu(hdr->frame_ctl);
7615 if (!(fc & IEEE80211_FCTL_PROTECTED))
7616 return;
7618 fc &= ~IEEE80211_FCTL_PROTECTED;
7619 hdr->frame_ctl = cpu_to_le16(fc);
7620 switch (priv->ieee->sec.level) {
7621 case SEC_LEVEL_3:
7622 /* Remove CCMP HDR */
7623 memmove(skb->data + IEEE80211_3ADDR_LEN,
7624 skb->data + IEEE80211_3ADDR_LEN + 8,
7625 skb->len - IEEE80211_3ADDR_LEN - 8);
7626 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7627 break;
7628 case SEC_LEVEL_2:
7629 break;
7630 case SEC_LEVEL_1:
7631 /* Remove IV */
7632 memmove(skb->data + IEEE80211_3ADDR_LEN,
7633 skb->data + IEEE80211_3ADDR_LEN + 4,
7634 skb->len - IEEE80211_3ADDR_LEN - 4);
7635 skb_trim(skb, skb->len - 8); /* IV + ICV */
7636 break;
7637 case SEC_LEVEL_0:
7638 break;
7639 default:
7640 printk(KERN_ERR "Unknow security level %d\n",
7641 priv->ieee->sec.level);
7642 break;
7646 static void ipw_handle_data_packet(struct ipw_priv *priv,
7647 struct ipw_rx_mem_buffer *rxb,
7648 struct ieee80211_rx_stats *stats)
7650 struct ieee80211_hdr_4addr *hdr;
7651 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7653 /* We received data from the HW, so stop the watchdog */
7654 priv->net_dev->trans_start = jiffies;
7656 /* We only process data packets if the
7657 * interface is open */
7658 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7659 skb_tailroom(rxb->skb))) {
7660 priv->ieee->stats.rx_errors++;
7661 priv->wstats.discard.misc++;
7662 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7663 return;
7664 } else if (unlikely(!netif_running(priv->net_dev))) {
7665 priv->ieee->stats.rx_dropped++;
7666 priv->wstats.discard.misc++;
7667 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7668 return;
7671 /* Advance skb->data to the start of the actual payload */
7672 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7674 /* Set the size of the skb to the size of the frame */
7675 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7677 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7679 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7680 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7681 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7682 (is_multicast_ether_addr(hdr->addr1) ?
7683 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7684 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7686 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7687 priv->ieee->stats.rx_errors++;
7688 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7689 rxb->skb = NULL;
7690 __ipw_led_activity_on(priv);
7694 #ifdef CONFIG_IPW2200_RADIOTAP
7695 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7696 struct ipw_rx_mem_buffer *rxb,
7697 struct ieee80211_rx_stats *stats)
7699 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7700 struct ipw_rx_frame *frame = &pkt->u.frame;
7702 /* initial pull of some data */
7703 u16 received_channel = frame->received_channel;
7704 u8 antennaAndPhy = frame->antennaAndPhy;
7705 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7706 u16 pktrate = frame->rate;
7708 /* Magic struct that slots into the radiotap header -- no reason
7709 * to build this manually element by element, we can write it much
7710 * more efficiently than we can parse it. ORDER MATTERS HERE */
7711 struct ipw_rt_hdr *ipw_rt;
7713 short len = le16_to_cpu(pkt->u.frame.length);
7715 /* We received data from the HW, so stop the watchdog */
7716 priv->net_dev->trans_start = jiffies;
7718 /* We only process data packets if the
7719 * interface is open */
7720 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7721 skb_tailroom(rxb->skb))) {
7722 priv->ieee->stats.rx_errors++;
7723 priv->wstats.discard.misc++;
7724 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7725 return;
7726 } else if (unlikely(!netif_running(priv->net_dev))) {
7727 priv->ieee->stats.rx_dropped++;
7728 priv->wstats.discard.misc++;
7729 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7730 return;
7733 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7734 * that now */
7735 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7736 /* FIXME: Should alloc bigger skb instead */
7737 priv->ieee->stats.rx_dropped++;
7738 priv->wstats.discard.misc++;
7739 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7740 return;
7743 /* copy the frame itself */
7744 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7745 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7747 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7748 * part of our real header, saves a little time.
7750 * No longer necessary since we fill in all our data. Purge before merging
7751 * patch officially.
7752 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7753 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7756 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7758 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7759 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7760 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7762 /* Big bitfield of all the fields we provide in radiotap */
7763 ipw_rt->rt_hdr.it_present =
7764 ((1 << IEEE80211_RADIOTAP_TSFT) |
7765 (1 << IEEE80211_RADIOTAP_FLAGS) |
7766 (1 << IEEE80211_RADIOTAP_RATE) |
7767 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7768 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7769 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7770 (1 << IEEE80211_RADIOTAP_ANTENNA));
7772 /* Zero the flags, we'll add to them as we go */
7773 ipw_rt->rt_flags = 0;
7774 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7775 frame->parent_tsf[2] << 16 |
7776 frame->parent_tsf[1] << 8 |
7777 frame->parent_tsf[0]);
7779 /* Convert signal to DBM */
7780 ipw_rt->rt_dbmsignal = antsignal;
7781 ipw_rt->rt_dbmnoise = frame->noise;
7783 /* Convert the channel data and set the flags */
7784 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7785 if (received_channel > 14) { /* 802.11a */
7786 ipw_rt->rt_chbitmask =
7787 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7788 } else if (antennaAndPhy & 32) { /* 802.11b */
7789 ipw_rt->rt_chbitmask =
7790 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7791 } else { /* 802.11g */
7792 ipw_rt->rt_chbitmask =
7793 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7796 /* set the rate in multiples of 500k/s */
7797 switch (pktrate) {
7798 case IPW_TX_RATE_1MB:
7799 ipw_rt->rt_rate = 2;
7800 break;
7801 case IPW_TX_RATE_2MB:
7802 ipw_rt->rt_rate = 4;
7803 break;
7804 case IPW_TX_RATE_5MB:
7805 ipw_rt->rt_rate = 10;
7806 break;
7807 case IPW_TX_RATE_6MB:
7808 ipw_rt->rt_rate = 12;
7809 break;
7810 case IPW_TX_RATE_9MB:
7811 ipw_rt->rt_rate = 18;
7812 break;
7813 case IPW_TX_RATE_11MB:
7814 ipw_rt->rt_rate = 22;
7815 break;
7816 case IPW_TX_RATE_12MB:
7817 ipw_rt->rt_rate = 24;
7818 break;
7819 case IPW_TX_RATE_18MB:
7820 ipw_rt->rt_rate = 36;
7821 break;
7822 case IPW_TX_RATE_24MB:
7823 ipw_rt->rt_rate = 48;
7824 break;
7825 case IPW_TX_RATE_36MB:
7826 ipw_rt->rt_rate = 72;
7827 break;
7828 case IPW_TX_RATE_48MB:
7829 ipw_rt->rt_rate = 96;
7830 break;
7831 case IPW_TX_RATE_54MB:
7832 ipw_rt->rt_rate = 108;
7833 break;
7834 default:
7835 ipw_rt->rt_rate = 0;
7836 break;
7839 /* antenna number */
7840 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7842 /* set the preamble flag if we have it */
7843 if ((antennaAndPhy & 64))
7844 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7846 /* Set the size of the skb to the size of the frame */
7847 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7849 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7851 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7852 priv->ieee->stats.rx_errors++;
7853 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7854 rxb->skb = NULL;
7855 /* no LED during capture */
7858 #endif
7860 #ifdef CONFIG_IPW2200_PROMISCUOUS
7861 #define ieee80211_is_probe_response(fc) \
7862 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7863 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7865 #define ieee80211_is_management(fc) \
7866 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7868 #define ieee80211_is_control(fc) \
7869 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7871 #define ieee80211_is_data(fc) \
7872 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7874 #define ieee80211_is_assoc_request(fc) \
7875 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7877 #define ieee80211_is_reassoc_request(fc) \
7878 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7880 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7881 struct ipw_rx_mem_buffer *rxb,
7882 struct ieee80211_rx_stats *stats)
7884 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7885 struct ipw_rx_frame *frame = &pkt->u.frame;
7886 struct ipw_rt_hdr *ipw_rt;
7888 /* First cache any information we need before we overwrite
7889 * the information provided in the skb from the hardware */
7890 struct ieee80211_hdr *hdr;
7891 u16 channel = frame->received_channel;
7892 u8 phy_flags = frame->antennaAndPhy;
7893 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7894 s8 noise = frame->noise;
7895 u8 rate = frame->rate;
7896 short len = le16_to_cpu(pkt->u.frame.length);
7897 struct sk_buff *skb;
7898 int hdr_only = 0;
7899 u16 filter = priv->prom_priv->filter;
7901 /* If the filter is set to not include Rx frames then return */
7902 if (filter & IPW_PROM_NO_RX)
7903 return;
7905 /* We received data from the HW, so stop the watchdog */
7906 priv->prom_net_dev->trans_start = jiffies;
7908 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7909 priv->prom_priv->ieee->stats.rx_errors++;
7910 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7911 return;
7914 /* We only process data packets if the interface is open */
7915 if (unlikely(!netif_running(priv->prom_net_dev))) {
7916 priv->prom_priv->ieee->stats.rx_dropped++;
7917 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7918 return;
7921 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7922 * that now */
7923 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7924 /* FIXME: Should alloc bigger skb instead */
7925 priv->prom_priv->ieee->stats.rx_dropped++;
7926 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7927 return;
7930 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7931 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
7932 if (filter & IPW_PROM_NO_MGMT)
7933 return;
7934 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7935 hdr_only = 1;
7936 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
7937 if (filter & IPW_PROM_NO_CTL)
7938 return;
7939 if (filter & IPW_PROM_CTL_HEADER_ONLY)
7940 hdr_only = 1;
7941 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
7942 if (filter & IPW_PROM_NO_DATA)
7943 return;
7944 if (filter & IPW_PROM_DATA_HEADER_ONLY)
7945 hdr_only = 1;
7948 /* Copy the SKB since this is for the promiscuous side */
7949 skb = skb_copy(rxb->skb, GFP_ATOMIC);
7950 if (skb == NULL) {
7951 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7952 return;
7955 /* copy the frame data to write after where the radiotap header goes */
7956 ipw_rt = (void *)skb->data;
7958 if (hdr_only)
7959 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
7961 memcpy(ipw_rt->payload, hdr, len);
7963 /* Zero the radiotap static buffer ... We only need to zero the bytes
7964 * NOT part of our real header, saves a little time.
7966 * No longer necessary since we fill in all our data. Purge before
7967 * merging patch officially.
7968 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7969 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7972 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7973 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7974 ipw_rt->rt_hdr.it_len = sizeof(*ipw_rt); /* total header+data */
7976 /* Set the size of the skb to the size of the frame */
7977 skb_put(skb, ipw_rt->rt_hdr.it_len + len);
7979 /* Big bitfield of all the fields we provide in radiotap */
7980 ipw_rt->rt_hdr.it_present =
7981 ((1 << IEEE80211_RADIOTAP_TSFT) |
7982 (1 << IEEE80211_RADIOTAP_FLAGS) |
7983 (1 << IEEE80211_RADIOTAP_RATE) |
7984 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7985 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7986 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7987 (1 << IEEE80211_RADIOTAP_ANTENNA));
7989 /* Zero the flags, we'll add to them as we go */
7990 ipw_rt->rt_flags = 0;
7991 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7992 frame->parent_tsf[2] << 16 |
7993 frame->parent_tsf[1] << 8 |
7994 frame->parent_tsf[0]);
7996 /* Convert to DBM */
7997 ipw_rt->rt_dbmsignal = signal;
7998 ipw_rt->rt_dbmnoise = noise;
8000 /* Convert the channel data and set the flags */
8001 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8002 if (channel > 14) { /* 802.11a */
8003 ipw_rt->rt_chbitmask =
8004 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8005 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8006 ipw_rt->rt_chbitmask =
8007 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8008 } else { /* 802.11g */
8009 ipw_rt->rt_chbitmask =
8010 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8013 /* set the rate in multiples of 500k/s */
8014 switch (rate) {
8015 case IPW_TX_RATE_1MB:
8016 ipw_rt->rt_rate = 2;
8017 break;
8018 case IPW_TX_RATE_2MB:
8019 ipw_rt->rt_rate = 4;
8020 break;
8021 case IPW_TX_RATE_5MB:
8022 ipw_rt->rt_rate = 10;
8023 break;
8024 case IPW_TX_RATE_6MB:
8025 ipw_rt->rt_rate = 12;
8026 break;
8027 case IPW_TX_RATE_9MB:
8028 ipw_rt->rt_rate = 18;
8029 break;
8030 case IPW_TX_RATE_11MB:
8031 ipw_rt->rt_rate = 22;
8032 break;
8033 case IPW_TX_RATE_12MB:
8034 ipw_rt->rt_rate = 24;
8035 break;
8036 case IPW_TX_RATE_18MB:
8037 ipw_rt->rt_rate = 36;
8038 break;
8039 case IPW_TX_RATE_24MB:
8040 ipw_rt->rt_rate = 48;
8041 break;
8042 case IPW_TX_RATE_36MB:
8043 ipw_rt->rt_rate = 72;
8044 break;
8045 case IPW_TX_RATE_48MB:
8046 ipw_rt->rt_rate = 96;
8047 break;
8048 case IPW_TX_RATE_54MB:
8049 ipw_rt->rt_rate = 108;
8050 break;
8051 default:
8052 ipw_rt->rt_rate = 0;
8053 break;
8056 /* antenna number */
8057 ipw_rt->rt_antenna = (phy_flags & 3);
8059 /* set the preamble flag if we have it */
8060 if (phy_flags & (1 << 6))
8061 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8063 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8065 if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
8066 priv->prom_priv->ieee->stats.rx_errors++;
8067 dev_kfree_skb_any(skb);
8070 #endif
8072 static int is_network_packet(struct ipw_priv *priv,
8073 struct ieee80211_hdr_4addr *header)
8075 /* Filter incoming packets to determine if they are targetted toward
8076 * this network, discarding packets coming from ourselves */
8077 switch (priv->ieee->iw_mode) {
8078 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8079 /* packets from our adapter are dropped (echo) */
8080 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8081 return 0;
8083 /* {broad,multi}cast packets to our BSSID go through */
8084 if (is_multicast_ether_addr(header->addr1))
8085 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8087 /* packets to our adapter go through */
8088 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8089 ETH_ALEN);
8091 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8092 /* packets from our adapter are dropped (echo) */
8093 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8094 return 0;
8096 /* {broad,multi}cast packets to our BSS go through */
8097 if (is_multicast_ether_addr(header->addr1))
8098 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8100 /* packets to our adapter go through */
8101 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8102 ETH_ALEN);
8105 return 1;
8108 #define IPW_PACKET_RETRY_TIME HZ
8110 static int is_duplicate_packet(struct ipw_priv *priv,
8111 struct ieee80211_hdr_4addr *header)
8113 u16 sc = le16_to_cpu(header->seq_ctl);
8114 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8115 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8116 u16 *last_seq, *last_frag;
8117 unsigned long *last_time;
8119 switch (priv->ieee->iw_mode) {
8120 case IW_MODE_ADHOC:
8122 struct list_head *p;
8123 struct ipw_ibss_seq *entry = NULL;
8124 u8 *mac = header->addr2;
8125 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8127 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8128 entry =
8129 list_entry(p, struct ipw_ibss_seq, list);
8130 if (!memcmp(entry->mac, mac, ETH_ALEN))
8131 break;
8133 if (p == &priv->ibss_mac_hash[index]) {
8134 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8135 if (!entry) {
8136 IPW_ERROR
8137 ("Cannot malloc new mac entry\n");
8138 return 0;
8140 memcpy(entry->mac, mac, ETH_ALEN);
8141 entry->seq_num = seq;
8142 entry->frag_num = frag;
8143 entry->packet_time = jiffies;
8144 list_add(&entry->list,
8145 &priv->ibss_mac_hash[index]);
8146 return 0;
8148 last_seq = &entry->seq_num;
8149 last_frag = &entry->frag_num;
8150 last_time = &entry->packet_time;
8151 break;
8153 case IW_MODE_INFRA:
8154 last_seq = &priv->last_seq_num;
8155 last_frag = &priv->last_frag_num;
8156 last_time = &priv->last_packet_time;
8157 break;
8158 default:
8159 return 0;
8161 if ((*last_seq == seq) &&
8162 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8163 if (*last_frag == frag)
8164 goto drop;
8165 if (*last_frag + 1 != frag)
8166 /* out-of-order fragment */
8167 goto drop;
8168 } else
8169 *last_seq = seq;
8171 *last_frag = frag;
8172 *last_time = jiffies;
8173 return 0;
8175 drop:
8176 /* Comment this line now since we observed the card receives
8177 * duplicate packets but the FCTL_RETRY bit is not set in the
8178 * IBSS mode with fragmentation enabled.
8179 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
8180 return 1;
8183 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8184 struct ipw_rx_mem_buffer *rxb,
8185 struct ieee80211_rx_stats *stats)
8187 struct sk_buff *skb = rxb->skb;
8188 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8189 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8190 (skb->data + IPW_RX_FRAME_SIZE);
8192 ieee80211_rx_mgt(priv->ieee, header, stats);
8194 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8195 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8196 IEEE80211_STYPE_PROBE_RESP) ||
8197 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8198 IEEE80211_STYPE_BEACON))) {
8199 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8200 ipw_add_station(priv, header->addr2);
8203 if (priv->config & CFG_NET_STATS) {
8204 IPW_DEBUG_HC("sending stat packet\n");
8206 /* Set the size of the skb to the size of the full
8207 * ipw header and 802.11 frame */
8208 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8209 IPW_RX_FRAME_SIZE);
8211 /* Advance past the ipw packet header to the 802.11 frame */
8212 skb_pull(skb, IPW_RX_FRAME_SIZE);
8214 /* Push the ieee80211_rx_stats before the 802.11 frame */
8215 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8217 skb->dev = priv->ieee->dev;
8219 /* Point raw at the ieee80211_stats */
8220 skb_reset_mac_header(skb);
8222 skb->pkt_type = PACKET_OTHERHOST;
8223 skb->protocol = __constant_htons(ETH_P_80211_STATS);
8224 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8225 netif_rx(skb);
8226 rxb->skb = NULL;
8231 * Main entry function for recieving a packet with 80211 headers. This
8232 * should be called when ever the FW has notified us that there is a new
8233 * skb in the recieve queue.
8235 static void ipw_rx(struct ipw_priv *priv)
8237 struct ipw_rx_mem_buffer *rxb;
8238 struct ipw_rx_packet *pkt;
8239 struct ieee80211_hdr_4addr *header;
8240 u32 r, w, i;
8241 u8 network_packet;
8242 DECLARE_MAC_BUF(mac);
8243 DECLARE_MAC_BUF(mac2);
8244 DECLARE_MAC_BUF(mac3);
8246 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8247 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8248 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
8250 while (i != r) {
8251 rxb = priv->rxq->queue[i];
8252 if (unlikely(rxb == NULL)) {
8253 printk(KERN_CRIT "Queue not allocated!\n");
8254 break;
8256 priv->rxq->queue[i] = NULL;
8258 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8259 IPW_RX_BUF_SIZE,
8260 PCI_DMA_FROMDEVICE);
8262 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8263 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8264 pkt->header.message_type,
8265 pkt->header.rx_seq_num, pkt->header.control_bits);
8267 switch (pkt->header.message_type) {
8268 case RX_FRAME_TYPE: /* 802.11 frame */ {
8269 struct ieee80211_rx_stats stats = {
8270 .rssi = pkt->u.frame.rssi_dbm -
8271 IPW_RSSI_TO_DBM,
8272 .signal =
8273 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8274 IPW_RSSI_TO_DBM + 0x100,
8275 .noise =
8276 le16_to_cpu(pkt->u.frame.noise),
8277 .rate = pkt->u.frame.rate,
8278 .mac_time = jiffies,
8279 .received_channel =
8280 pkt->u.frame.received_channel,
8281 .freq =
8282 (pkt->u.frame.
8283 control & (1 << 0)) ?
8284 IEEE80211_24GHZ_BAND :
8285 IEEE80211_52GHZ_BAND,
8286 .len = le16_to_cpu(pkt->u.frame.length),
8289 if (stats.rssi != 0)
8290 stats.mask |= IEEE80211_STATMASK_RSSI;
8291 if (stats.signal != 0)
8292 stats.mask |= IEEE80211_STATMASK_SIGNAL;
8293 if (stats.noise != 0)
8294 stats.mask |= IEEE80211_STATMASK_NOISE;
8295 if (stats.rate != 0)
8296 stats.mask |= IEEE80211_STATMASK_RATE;
8298 priv->rx_packets++;
8300 #ifdef CONFIG_IPW2200_PROMISCUOUS
8301 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8302 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8303 #endif
8305 #ifdef CONFIG_IPW2200_MONITOR
8306 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8307 #ifdef CONFIG_IPW2200_RADIOTAP
8309 ipw_handle_data_packet_monitor(priv,
8310 rxb,
8311 &stats);
8312 #else
8313 ipw_handle_data_packet(priv, rxb,
8314 &stats);
8315 #endif
8316 break;
8318 #endif
8320 header =
8321 (struct ieee80211_hdr_4addr *)(rxb->skb->
8322 data +
8323 IPW_RX_FRAME_SIZE);
8324 /* TODO: Check Ad-Hoc dest/source and make sure
8325 * that we are actually parsing these packets
8326 * correctly -- we should probably use the
8327 * frame control of the packet and disregard
8328 * the current iw_mode */
8330 network_packet =
8331 is_network_packet(priv, header);
8332 if (network_packet && priv->assoc_network) {
8333 priv->assoc_network->stats.rssi =
8334 stats.rssi;
8335 priv->exp_avg_rssi =
8336 exponential_average(priv->exp_avg_rssi,
8337 stats.rssi, DEPTH_RSSI);
8340 IPW_DEBUG_RX("Frame: len=%u\n",
8341 le16_to_cpu(pkt->u.frame.length));
8343 if (le16_to_cpu(pkt->u.frame.length) <
8344 ieee80211_get_hdrlen(le16_to_cpu(
8345 header->frame_ctl))) {
8346 IPW_DEBUG_DROP
8347 ("Received packet is too small. "
8348 "Dropping.\n");
8349 priv->ieee->stats.rx_errors++;
8350 priv->wstats.discard.misc++;
8351 break;
8354 switch (WLAN_FC_GET_TYPE
8355 (le16_to_cpu(header->frame_ctl))) {
8357 case IEEE80211_FTYPE_MGMT:
8358 ipw_handle_mgmt_packet(priv, rxb,
8359 &stats);
8360 break;
8362 case IEEE80211_FTYPE_CTL:
8363 break;
8365 case IEEE80211_FTYPE_DATA:
8366 if (unlikely(!network_packet ||
8367 is_duplicate_packet(priv,
8368 header)))
8370 IPW_DEBUG_DROP("Dropping: "
8371 "%s, "
8372 "%s, "
8373 "%s\n",
8374 print_mac(mac,
8375 header->
8376 addr1),
8377 print_mac(mac2,
8378 header->
8379 addr2),
8380 print_mac(mac3,
8381 header->
8382 addr3));
8383 break;
8386 ipw_handle_data_packet(priv, rxb,
8387 &stats);
8389 break;
8391 break;
8394 case RX_HOST_NOTIFICATION_TYPE:{
8395 IPW_DEBUG_RX
8396 ("Notification: subtype=%02X flags=%02X size=%d\n",
8397 pkt->u.notification.subtype,
8398 pkt->u.notification.flags,
8399 le16_to_cpu(pkt->u.notification.size));
8400 ipw_rx_notification(priv, &pkt->u.notification);
8401 break;
8404 default:
8405 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8406 pkt->header.message_type);
8407 break;
8410 /* For now we just don't re-use anything. We can tweak this
8411 * later to try and re-use notification packets and SKBs that
8412 * fail to Rx correctly */
8413 if (rxb->skb != NULL) {
8414 dev_kfree_skb_any(rxb->skb);
8415 rxb->skb = NULL;
8418 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8419 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8420 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8422 i = (i + 1) % RX_QUEUE_SIZE;
8425 /* Backtrack one entry */
8426 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
8428 ipw_rx_queue_restock(priv);
8431 #define DEFAULT_RTS_THRESHOLD 2304U
8432 #define MIN_RTS_THRESHOLD 1U
8433 #define MAX_RTS_THRESHOLD 2304U
8434 #define DEFAULT_BEACON_INTERVAL 100U
8435 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8436 #define DEFAULT_LONG_RETRY_LIMIT 4U
8439 * ipw_sw_reset
8440 * @option: options to control different reset behaviour
8441 * 0 = reset everything except the 'disable' module_param
8442 * 1 = reset everything and print out driver info (for probe only)
8443 * 2 = reset everything
8445 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8447 int band, modulation;
8448 int old_mode = priv->ieee->iw_mode;
8450 /* Initialize module parameter values here */
8451 priv->config = 0;
8453 /* We default to disabling the LED code as right now it causes
8454 * too many systems to lock up... */
8455 if (!led)
8456 priv->config |= CFG_NO_LED;
8458 if (associate)
8459 priv->config |= CFG_ASSOCIATE;
8460 else
8461 IPW_DEBUG_INFO("Auto associate disabled.\n");
8463 if (auto_create)
8464 priv->config |= CFG_ADHOC_CREATE;
8465 else
8466 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8468 priv->config &= ~CFG_STATIC_ESSID;
8469 priv->essid_len = 0;
8470 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8472 if (disable && option) {
8473 priv->status |= STATUS_RF_KILL_SW;
8474 IPW_DEBUG_INFO("Radio disabled.\n");
8477 if (channel != 0) {
8478 priv->config |= CFG_STATIC_CHANNEL;
8479 priv->channel = channel;
8480 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8481 /* TODO: Validate that provided channel is in range */
8483 #ifdef CONFIG_IPW2200_QOS
8484 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8485 burst_duration_CCK, burst_duration_OFDM);
8486 #endif /* CONFIG_IPW2200_QOS */
8488 switch (mode) {
8489 case 1:
8490 priv->ieee->iw_mode = IW_MODE_ADHOC;
8491 priv->net_dev->type = ARPHRD_ETHER;
8493 break;
8494 #ifdef CONFIG_IPW2200_MONITOR
8495 case 2:
8496 priv->ieee->iw_mode = IW_MODE_MONITOR;
8497 #ifdef CONFIG_IPW2200_RADIOTAP
8498 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8499 #else
8500 priv->net_dev->type = ARPHRD_IEEE80211;
8501 #endif
8502 break;
8503 #endif
8504 default:
8505 case 0:
8506 priv->net_dev->type = ARPHRD_ETHER;
8507 priv->ieee->iw_mode = IW_MODE_INFRA;
8508 break;
8511 if (hwcrypto) {
8512 priv->ieee->host_encrypt = 0;
8513 priv->ieee->host_encrypt_msdu = 0;
8514 priv->ieee->host_decrypt = 0;
8515 priv->ieee->host_mc_decrypt = 0;
8517 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8519 /* IPW2200/2915 is abled to do hardware fragmentation. */
8520 priv->ieee->host_open_frag = 0;
8522 if ((priv->pci_dev->device == 0x4223) ||
8523 (priv->pci_dev->device == 0x4224)) {
8524 if (option == 1)
8525 printk(KERN_INFO DRV_NAME
8526 ": Detected Intel PRO/Wireless 2915ABG Network "
8527 "Connection\n");
8528 priv->ieee->abg_true = 1;
8529 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8530 modulation = IEEE80211_OFDM_MODULATION |
8531 IEEE80211_CCK_MODULATION;
8532 priv->adapter = IPW_2915ABG;
8533 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8534 } else {
8535 if (option == 1)
8536 printk(KERN_INFO DRV_NAME
8537 ": Detected Intel PRO/Wireless 2200BG Network "
8538 "Connection\n");
8540 priv->ieee->abg_true = 0;
8541 band = IEEE80211_24GHZ_BAND;
8542 modulation = IEEE80211_OFDM_MODULATION |
8543 IEEE80211_CCK_MODULATION;
8544 priv->adapter = IPW_2200BG;
8545 priv->ieee->mode = IEEE_G | IEEE_B;
8548 priv->ieee->freq_band = band;
8549 priv->ieee->modulation = modulation;
8551 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8553 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8554 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8556 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8557 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8558 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8560 /* If power management is turned on, default to AC mode */
8561 priv->power_mode = IPW_POWER_AC;
8562 priv->tx_power = IPW_TX_POWER_DEFAULT;
8564 return old_mode == priv->ieee->iw_mode;
8568 * This file defines the Wireless Extension handlers. It does not
8569 * define any methods of hardware manipulation and relies on the
8570 * functions defined in ipw_main to provide the HW interaction.
8572 * The exception to this is the use of the ipw_get_ordinal()
8573 * function used to poll the hardware vs. making unecessary calls.
8577 static int ipw_wx_get_name(struct net_device *dev,
8578 struct iw_request_info *info,
8579 union iwreq_data *wrqu, char *extra)
8581 struct ipw_priv *priv = ieee80211_priv(dev);
8582 mutex_lock(&priv->mutex);
8583 if (priv->status & STATUS_RF_KILL_MASK)
8584 strcpy(wrqu->name, "radio off");
8585 else if (!(priv->status & STATUS_ASSOCIATED))
8586 strcpy(wrqu->name, "unassociated");
8587 else
8588 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8589 ipw_modes[priv->assoc_request.ieee_mode]);
8590 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8591 mutex_unlock(&priv->mutex);
8592 return 0;
8595 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8597 if (channel == 0) {
8598 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8599 priv->config &= ~CFG_STATIC_CHANNEL;
8600 IPW_DEBUG_ASSOC("Attempting to associate with new "
8601 "parameters.\n");
8602 ipw_associate(priv);
8603 return 0;
8606 priv->config |= CFG_STATIC_CHANNEL;
8608 if (priv->channel == channel) {
8609 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8610 channel);
8611 return 0;
8614 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8615 priv->channel = channel;
8617 #ifdef CONFIG_IPW2200_MONITOR
8618 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8619 int i;
8620 if (priv->status & STATUS_SCANNING) {
8621 IPW_DEBUG_SCAN("Scan abort triggered due to "
8622 "channel change.\n");
8623 ipw_abort_scan(priv);
8626 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8627 udelay(10);
8629 if (priv->status & STATUS_SCANNING)
8630 IPW_DEBUG_SCAN("Still scanning...\n");
8631 else
8632 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8633 1000 - i);
8635 return 0;
8637 #endif /* CONFIG_IPW2200_MONITOR */
8639 /* Network configuration changed -- force [re]association */
8640 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8641 if (!ipw_disassociate(priv))
8642 ipw_associate(priv);
8644 return 0;
8647 static int ipw_wx_set_freq(struct net_device *dev,
8648 struct iw_request_info *info,
8649 union iwreq_data *wrqu, char *extra)
8651 struct ipw_priv *priv = ieee80211_priv(dev);
8652 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8653 struct iw_freq *fwrq = &wrqu->freq;
8654 int ret = 0, i;
8655 u8 channel, flags;
8656 int band;
8658 if (fwrq->m == 0) {
8659 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8660 mutex_lock(&priv->mutex);
8661 ret = ipw_set_channel(priv, 0);
8662 mutex_unlock(&priv->mutex);
8663 return ret;
8665 /* if setting by freq convert to channel */
8666 if (fwrq->e == 1) {
8667 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8668 if (channel == 0)
8669 return -EINVAL;
8670 } else
8671 channel = fwrq->m;
8673 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8674 return -EINVAL;
8676 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8677 i = ieee80211_channel_to_index(priv->ieee, channel);
8678 if (i == -1)
8679 return -EINVAL;
8681 flags = (band == IEEE80211_24GHZ_BAND) ?
8682 geo->bg[i].flags : geo->a[i].flags;
8683 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8684 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8685 return -EINVAL;
8689 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8690 mutex_lock(&priv->mutex);
8691 ret = ipw_set_channel(priv, channel);
8692 mutex_unlock(&priv->mutex);
8693 return ret;
8696 static int ipw_wx_get_freq(struct net_device *dev,
8697 struct iw_request_info *info,
8698 union iwreq_data *wrqu, char *extra)
8700 struct ipw_priv *priv = ieee80211_priv(dev);
8702 wrqu->freq.e = 0;
8704 /* If we are associated, trying to associate, or have a statically
8705 * configured CHANNEL then return that; otherwise return ANY */
8706 mutex_lock(&priv->mutex);
8707 if (priv->config & CFG_STATIC_CHANNEL ||
8708 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8709 int i;
8711 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
8712 BUG_ON(i == -1);
8713 wrqu->freq.e = 1;
8715 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
8716 case IEEE80211_52GHZ_BAND:
8717 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8718 break;
8720 case IEEE80211_24GHZ_BAND:
8721 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8722 break;
8724 default:
8725 BUG();
8727 } else
8728 wrqu->freq.m = 0;
8730 mutex_unlock(&priv->mutex);
8731 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8732 return 0;
8735 static int ipw_wx_set_mode(struct net_device *dev,
8736 struct iw_request_info *info,
8737 union iwreq_data *wrqu, char *extra)
8739 struct ipw_priv *priv = ieee80211_priv(dev);
8740 int err = 0;
8742 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8744 switch (wrqu->mode) {
8745 #ifdef CONFIG_IPW2200_MONITOR
8746 case IW_MODE_MONITOR:
8747 #endif
8748 case IW_MODE_ADHOC:
8749 case IW_MODE_INFRA:
8750 break;
8751 case IW_MODE_AUTO:
8752 wrqu->mode = IW_MODE_INFRA;
8753 break;
8754 default:
8755 return -EINVAL;
8757 if (wrqu->mode == priv->ieee->iw_mode)
8758 return 0;
8760 mutex_lock(&priv->mutex);
8762 ipw_sw_reset(priv, 0);
8764 #ifdef CONFIG_IPW2200_MONITOR
8765 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8766 priv->net_dev->type = ARPHRD_ETHER;
8768 if (wrqu->mode == IW_MODE_MONITOR)
8769 #ifdef CONFIG_IPW2200_RADIOTAP
8770 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8771 #else
8772 priv->net_dev->type = ARPHRD_IEEE80211;
8773 #endif
8774 #endif /* CONFIG_IPW2200_MONITOR */
8776 /* Free the existing firmware and reset the fw_loaded
8777 * flag so ipw_load() will bring in the new firmawre */
8778 free_firmware();
8780 priv->ieee->iw_mode = wrqu->mode;
8782 queue_work(priv->workqueue, &priv->adapter_restart);
8783 mutex_unlock(&priv->mutex);
8784 return err;
8787 static int ipw_wx_get_mode(struct net_device *dev,
8788 struct iw_request_info *info,
8789 union iwreq_data *wrqu, char *extra)
8791 struct ipw_priv *priv = ieee80211_priv(dev);
8792 mutex_lock(&priv->mutex);
8793 wrqu->mode = priv->ieee->iw_mode;
8794 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8795 mutex_unlock(&priv->mutex);
8796 return 0;
8799 /* Values are in microsecond */
8800 static const s32 timeout_duration[] = {
8801 350000,
8802 250000,
8803 75000,
8804 37000,
8805 25000,
8808 static const s32 period_duration[] = {
8809 400000,
8810 700000,
8811 1000000,
8812 1000000,
8813 1000000
8816 static int ipw_wx_get_range(struct net_device *dev,
8817 struct iw_request_info *info,
8818 union iwreq_data *wrqu, char *extra)
8820 struct ipw_priv *priv = ieee80211_priv(dev);
8821 struct iw_range *range = (struct iw_range *)extra;
8822 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8823 int i = 0, j;
8825 wrqu->data.length = sizeof(*range);
8826 memset(range, 0, sizeof(*range));
8828 /* 54Mbs == ~27 Mb/s real (802.11g) */
8829 range->throughput = 27 * 1000 * 1000;
8831 range->max_qual.qual = 100;
8832 /* TODO: Find real max RSSI and stick here */
8833 range->max_qual.level = 0;
8834 range->max_qual.noise = 0;
8835 range->max_qual.updated = 7; /* Updated all three */
8837 range->avg_qual.qual = 70;
8838 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8839 range->avg_qual.level = 0; /* FIXME to real average level */
8840 range->avg_qual.noise = 0;
8841 range->avg_qual.updated = 7; /* Updated all three */
8842 mutex_lock(&priv->mutex);
8843 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8845 for (i = 0; i < range->num_bitrates; i++)
8846 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8847 500000;
8849 range->max_rts = DEFAULT_RTS_THRESHOLD;
8850 range->min_frag = MIN_FRAG_THRESHOLD;
8851 range->max_frag = MAX_FRAG_THRESHOLD;
8853 range->encoding_size[0] = 5;
8854 range->encoding_size[1] = 13;
8855 range->num_encoding_sizes = 2;
8856 range->max_encoding_tokens = WEP_KEYS;
8858 /* Set the Wireless Extension versions */
8859 range->we_version_compiled = WIRELESS_EXT;
8860 range->we_version_source = 18;
8862 i = 0;
8863 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8864 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8865 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8866 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8867 continue;
8869 range->freq[i].i = geo->bg[j].channel;
8870 range->freq[i].m = geo->bg[j].freq * 100000;
8871 range->freq[i].e = 1;
8872 i++;
8876 if (priv->ieee->mode & IEEE_A) {
8877 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8878 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8879 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8880 continue;
8882 range->freq[i].i = geo->a[j].channel;
8883 range->freq[i].m = geo->a[j].freq * 100000;
8884 range->freq[i].e = 1;
8885 i++;
8889 range->num_channels = i;
8890 range->num_frequency = i;
8892 mutex_unlock(&priv->mutex);
8894 /* Event capability (kernel + driver) */
8895 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8896 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8897 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8898 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8899 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8901 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8902 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8904 IPW_DEBUG_WX("GET Range\n");
8905 return 0;
8908 static int ipw_wx_set_wap(struct net_device *dev,
8909 struct iw_request_info *info,
8910 union iwreq_data *wrqu, char *extra)
8912 struct ipw_priv *priv = ieee80211_priv(dev);
8913 DECLARE_MAC_BUF(mac);
8915 static const unsigned char any[] = {
8916 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8918 static const unsigned char off[] = {
8919 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8922 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8923 return -EINVAL;
8924 mutex_lock(&priv->mutex);
8925 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8926 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8927 /* we disable mandatory BSSID association */
8928 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8929 priv->config &= ~CFG_STATIC_BSSID;
8930 IPW_DEBUG_ASSOC("Attempting to associate with new "
8931 "parameters.\n");
8932 ipw_associate(priv);
8933 mutex_unlock(&priv->mutex);
8934 return 0;
8937 priv->config |= CFG_STATIC_BSSID;
8938 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8939 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8940 mutex_unlock(&priv->mutex);
8941 return 0;
8944 IPW_DEBUG_WX("Setting mandatory BSSID to %s\n",
8945 print_mac(mac, wrqu->ap_addr.sa_data));
8947 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8949 /* Network configuration changed -- force [re]association */
8950 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8951 if (!ipw_disassociate(priv))
8952 ipw_associate(priv);
8954 mutex_unlock(&priv->mutex);
8955 return 0;
8958 static int ipw_wx_get_wap(struct net_device *dev,
8959 struct iw_request_info *info,
8960 union iwreq_data *wrqu, char *extra)
8962 struct ipw_priv *priv = ieee80211_priv(dev);
8963 DECLARE_MAC_BUF(mac);
8965 /* If we are associated, trying to associate, or have a statically
8966 * configured BSSID then return that; otherwise return ANY */
8967 mutex_lock(&priv->mutex);
8968 if (priv->config & CFG_STATIC_BSSID ||
8969 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8970 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8971 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8972 } else
8973 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8975 IPW_DEBUG_WX("Getting WAP BSSID: %s\n",
8976 print_mac(mac, wrqu->ap_addr.sa_data));
8977 mutex_unlock(&priv->mutex);
8978 return 0;
8981 static int ipw_wx_set_essid(struct net_device *dev,
8982 struct iw_request_info *info,
8983 union iwreq_data *wrqu, char *extra)
8985 struct ipw_priv *priv = ieee80211_priv(dev);
8986 int length;
8988 mutex_lock(&priv->mutex);
8990 if (!wrqu->essid.flags)
8992 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8993 ipw_disassociate(priv);
8994 priv->config &= ~CFG_STATIC_ESSID;
8995 ipw_associate(priv);
8996 mutex_unlock(&priv->mutex);
8997 return 0;
9000 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9002 priv->config |= CFG_STATIC_ESSID;
9004 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9005 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9006 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9007 mutex_unlock(&priv->mutex);
9008 return 0;
9011 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(extra, length),
9012 length);
9014 priv->essid_len = length;
9015 memcpy(priv->essid, extra, priv->essid_len);
9017 /* Network configuration changed -- force [re]association */
9018 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9019 if (!ipw_disassociate(priv))
9020 ipw_associate(priv);
9022 mutex_unlock(&priv->mutex);
9023 return 0;
9026 static int ipw_wx_get_essid(struct net_device *dev,
9027 struct iw_request_info *info,
9028 union iwreq_data *wrqu, char *extra)
9030 struct ipw_priv *priv = ieee80211_priv(dev);
9032 /* If we are associated, trying to associate, or have a statically
9033 * configured ESSID then return that; otherwise return ANY */
9034 mutex_lock(&priv->mutex);
9035 if (priv->config & CFG_STATIC_ESSID ||
9036 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9037 IPW_DEBUG_WX("Getting essid: '%s'\n",
9038 escape_essid(priv->essid, priv->essid_len));
9039 memcpy(extra, priv->essid, priv->essid_len);
9040 wrqu->essid.length = priv->essid_len;
9041 wrqu->essid.flags = 1; /* active */
9042 } else {
9043 IPW_DEBUG_WX("Getting essid: ANY\n");
9044 wrqu->essid.length = 0;
9045 wrqu->essid.flags = 0; /* active */
9047 mutex_unlock(&priv->mutex);
9048 return 0;
9051 static int ipw_wx_set_nick(struct net_device *dev,
9052 struct iw_request_info *info,
9053 union iwreq_data *wrqu, char *extra)
9055 struct ipw_priv *priv = ieee80211_priv(dev);
9057 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9058 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9059 return -E2BIG;
9060 mutex_lock(&priv->mutex);
9061 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9062 memset(priv->nick, 0, sizeof(priv->nick));
9063 memcpy(priv->nick, extra, wrqu->data.length);
9064 IPW_DEBUG_TRACE("<<\n");
9065 mutex_unlock(&priv->mutex);
9066 return 0;
9070 static int ipw_wx_get_nick(struct net_device *dev,
9071 struct iw_request_info *info,
9072 union iwreq_data *wrqu, char *extra)
9074 struct ipw_priv *priv = ieee80211_priv(dev);
9075 IPW_DEBUG_WX("Getting nick\n");
9076 mutex_lock(&priv->mutex);
9077 wrqu->data.length = strlen(priv->nick);
9078 memcpy(extra, priv->nick, wrqu->data.length);
9079 wrqu->data.flags = 1; /* active */
9080 mutex_unlock(&priv->mutex);
9081 return 0;
9084 static int ipw_wx_set_sens(struct net_device *dev,
9085 struct iw_request_info *info,
9086 union iwreq_data *wrqu, char *extra)
9088 struct ipw_priv *priv = ieee80211_priv(dev);
9089 int err = 0;
9091 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9092 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9093 mutex_lock(&priv->mutex);
9095 if (wrqu->sens.fixed == 0)
9097 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9098 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9099 goto out;
9101 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9102 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9103 err = -EINVAL;
9104 goto out;
9107 priv->roaming_threshold = wrqu->sens.value;
9108 priv->disassociate_threshold = 3*wrqu->sens.value;
9109 out:
9110 mutex_unlock(&priv->mutex);
9111 return err;
9114 static int ipw_wx_get_sens(struct net_device *dev,
9115 struct iw_request_info *info,
9116 union iwreq_data *wrqu, char *extra)
9118 struct ipw_priv *priv = ieee80211_priv(dev);
9119 mutex_lock(&priv->mutex);
9120 wrqu->sens.fixed = 1;
9121 wrqu->sens.value = priv->roaming_threshold;
9122 mutex_unlock(&priv->mutex);
9124 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9125 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9127 return 0;
9130 static int ipw_wx_set_rate(struct net_device *dev,
9131 struct iw_request_info *info,
9132 union iwreq_data *wrqu, char *extra)
9134 /* TODO: We should use semaphores or locks for access to priv */
9135 struct ipw_priv *priv = ieee80211_priv(dev);
9136 u32 target_rate = wrqu->bitrate.value;
9137 u32 fixed, mask;
9139 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9140 /* value = X, fixed = 1 means only rate X */
9141 /* value = X, fixed = 0 means all rates lower equal X */
9143 if (target_rate == -1) {
9144 fixed = 0;
9145 mask = IEEE80211_DEFAULT_RATES_MASK;
9146 /* Now we should reassociate */
9147 goto apply;
9150 mask = 0;
9151 fixed = wrqu->bitrate.fixed;
9153 if (target_rate == 1000000 || !fixed)
9154 mask |= IEEE80211_CCK_RATE_1MB_MASK;
9155 if (target_rate == 1000000)
9156 goto apply;
9158 if (target_rate == 2000000 || !fixed)
9159 mask |= IEEE80211_CCK_RATE_2MB_MASK;
9160 if (target_rate == 2000000)
9161 goto apply;
9163 if (target_rate == 5500000 || !fixed)
9164 mask |= IEEE80211_CCK_RATE_5MB_MASK;
9165 if (target_rate == 5500000)
9166 goto apply;
9168 if (target_rate == 6000000 || !fixed)
9169 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9170 if (target_rate == 6000000)
9171 goto apply;
9173 if (target_rate == 9000000 || !fixed)
9174 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9175 if (target_rate == 9000000)
9176 goto apply;
9178 if (target_rate == 11000000 || !fixed)
9179 mask |= IEEE80211_CCK_RATE_11MB_MASK;
9180 if (target_rate == 11000000)
9181 goto apply;
9183 if (target_rate == 12000000 || !fixed)
9184 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9185 if (target_rate == 12000000)
9186 goto apply;
9188 if (target_rate == 18000000 || !fixed)
9189 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9190 if (target_rate == 18000000)
9191 goto apply;
9193 if (target_rate == 24000000 || !fixed)
9194 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9195 if (target_rate == 24000000)
9196 goto apply;
9198 if (target_rate == 36000000 || !fixed)
9199 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9200 if (target_rate == 36000000)
9201 goto apply;
9203 if (target_rate == 48000000 || !fixed)
9204 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9205 if (target_rate == 48000000)
9206 goto apply;
9208 if (target_rate == 54000000 || !fixed)
9209 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9210 if (target_rate == 54000000)
9211 goto apply;
9213 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9214 return -EINVAL;
9216 apply:
9217 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9218 mask, fixed ? "fixed" : "sub-rates");
9219 mutex_lock(&priv->mutex);
9220 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9221 priv->config &= ~CFG_FIXED_RATE;
9222 ipw_set_fixed_rate(priv, priv->ieee->mode);
9223 } else
9224 priv->config |= CFG_FIXED_RATE;
9226 if (priv->rates_mask == mask) {
9227 IPW_DEBUG_WX("Mask set to current mask.\n");
9228 mutex_unlock(&priv->mutex);
9229 return 0;
9232 priv->rates_mask = mask;
9234 /* Network configuration changed -- force [re]association */
9235 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9236 if (!ipw_disassociate(priv))
9237 ipw_associate(priv);
9239 mutex_unlock(&priv->mutex);
9240 return 0;
9243 static int ipw_wx_get_rate(struct net_device *dev,
9244 struct iw_request_info *info,
9245 union iwreq_data *wrqu, char *extra)
9247 struct ipw_priv *priv = ieee80211_priv(dev);
9248 mutex_lock(&priv->mutex);
9249 wrqu->bitrate.value = priv->last_rate;
9250 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9251 mutex_unlock(&priv->mutex);
9252 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9253 return 0;
9256 static int ipw_wx_set_rts(struct net_device *dev,
9257 struct iw_request_info *info,
9258 union iwreq_data *wrqu, char *extra)
9260 struct ipw_priv *priv = ieee80211_priv(dev);
9261 mutex_lock(&priv->mutex);
9262 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9263 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9264 else {
9265 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9266 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9267 mutex_unlock(&priv->mutex);
9268 return -EINVAL;
9270 priv->rts_threshold = wrqu->rts.value;
9273 ipw_send_rts_threshold(priv, priv->rts_threshold);
9274 mutex_unlock(&priv->mutex);
9275 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9276 return 0;
9279 static int ipw_wx_get_rts(struct net_device *dev,
9280 struct iw_request_info *info,
9281 union iwreq_data *wrqu, char *extra)
9283 struct ipw_priv *priv = ieee80211_priv(dev);
9284 mutex_lock(&priv->mutex);
9285 wrqu->rts.value = priv->rts_threshold;
9286 wrqu->rts.fixed = 0; /* no auto select */
9287 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9288 mutex_unlock(&priv->mutex);
9289 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9290 return 0;
9293 static int ipw_wx_set_txpow(struct net_device *dev,
9294 struct iw_request_info *info,
9295 union iwreq_data *wrqu, char *extra)
9297 struct ipw_priv *priv = ieee80211_priv(dev);
9298 int err = 0;
9300 mutex_lock(&priv->mutex);
9301 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9302 err = -EINPROGRESS;
9303 goto out;
9306 if (!wrqu->power.fixed)
9307 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9309 if (wrqu->power.flags != IW_TXPOW_DBM) {
9310 err = -EINVAL;
9311 goto out;
9314 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9315 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9316 err = -EINVAL;
9317 goto out;
9320 priv->tx_power = wrqu->power.value;
9321 err = ipw_set_tx_power(priv);
9322 out:
9323 mutex_unlock(&priv->mutex);
9324 return err;
9327 static int ipw_wx_get_txpow(struct net_device *dev,
9328 struct iw_request_info *info,
9329 union iwreq_data *wrqu, char *extra)
9331 struct ipw_priv *priv = ieee80211_priv(dev);
9332 mutex_lock(&priv->mutex);
9333 wrqu->power.value = priv->tx_power;
9334 wrqu->power.fixed = 1;
9335 wrqu->power.flags = IW_TXPOW_DBM;
9336 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9337 mutex_unlock(&priv->mutex);
9339 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9340 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9342 return 0;
9345 static int ipw_wx_set_frag(struct net_device *dev,
9346 struct iw_request_info *info,
9347 union iwreq_data *wrqu, char *extra)
9349 struct ipw_priv *priv = ieee80211_priv(dev);
9350 mutex_lock(&priv->mutex);
9351 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9352 priv->ieee->fts = DEFAULT_FTS;
9353 else {
9354 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9355 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9356 mutex_unlock(&priv->mutex);
9357 return -EINVAL;
9360 priv->ieee->fts = wrqu->frag.value & ~0x1;
9363 ipw_send_frag_threshold(priv, wrqu->frag.value);
9364 mutex_unlock(&priv->mutex);
9365 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9366 return 0;
9369 static int ipw_wx_get_frag(struct net_device *dev,
9370 struct iw_request_info *info,
9371 union iwreq_data *wrqu, char *extra)
9373 struct ipw_priv *priv = ieee80211_priv(dev);
9374 mutex_lock(&priv->mutex);
9375 wrqu->frag.value = priv->ieee->fts;
9376 wrqu->frag.fixed = 0; /* no auto select */
9377 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9378 mutex_unlock(&priv->mutex);
9379 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9381 return 0;
9384 static int ipw_wx_set_retry(struct net_device *dev,
9385 struct iw_request_info *info,
9386 union iwreq_data *wrqu, char *extra)
9388 struct ipw_priv *priv = ieee80211_priv(dev);
9390 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9391 return -EINVAL;
9393 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9394 return 0;
9396 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9397 return -EINVAL;
9399 mutex_lock(&priv->mutex);
9400 if (wrqu->retry.flags & IW_RETRY_SHORT)
9401 priv->short_retry_limit = (u8) wrqu->retry.value;
9402 else if (wrqu->retry.flags & IW_RETRY_LONG)
9403 priv->long_retry_limit = (u8) wrqu->retry.value;
9404 else {
9405 priv->short_retry_limit = (u8) wrqu->retry.value;
9406 priv->long_retry_limit = (u8) wrqu->retry.value;
9409 ipw_send_retry_limit(priv, priv->short_retry_limit,
9410 priv->long_retry_limit);
9411 mutex_unlock(&priv->mutex);
9412 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9413 priv->short_retry_limit, priv->long_retry_limit);
9414 return 0;
9417 static int ipw_wx_get_retry(struct net_device *dev,
9418 struct iw_request_info *info,
9419 union iwreq_data *wrqu, char *extra)
9421 struct ipw_priv *priv = ieee80211_priv(dev);
9423 mutex_lock(&priv->mutex);
9424 wrqu->retry.disabled = 0;
9426 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9427 mutex_unlock(&priv->mutex);
9428 return -EINVAL;
9431 if (wrqu->retry.flags & IW_RETRY_LONG) {
9432 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9433 wrqu->retry.value = priv->long_retry_limit;
9434 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9435 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9436 wrqu->retry.value = priv->short_retry_limit;
9437 } else {
9438 wrqu->retry.flags = IW_RETRY_LIMIT;
9439 wrqu->retry.value = priv->short_retry_limit;
9441 mutex_unlock(&priv->mutex);
9443 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9445 return 0;
9448 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
9449 int essid_len)
9451 struct ipw_scan_request_ext scan;
9452 int err = 0, scan_type;
9454 if (!(priv->status & STATUS_INIT) ||
9455 (priv->status & STATUS_EXIT_PENDING))
9456 return 0;
9458 mutex_lock(&priv->mutex);
9460 if (priv->status & STATUS_RF_KILL_MASK) {
9461 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
9462 priv->status |= STATUS_SCAN_PENDING;
9463 goto done;
9466 IPW_DEBUG_HC("starting request direct scan!\n");
9468 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
9469 /* We should not sleep here; otherwise we will block most
9470 * of the system (for instance, we hold rtnl_lock when we
9471 * get here).
9473 err = -EAGAIN;
9474 goto done;
9476 memset(&scan, 0, sizeof(scan));
9478 if (priv->config & CFG_SPEED_SCAN)
9479 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9480 cpu_to_le16(30);
9481 else
9482 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9483 cpu_to_le16(20);
9485 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
9486 cpu_to_le16(20);
9487 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
9488 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
9490 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
9492 err = ipw_send_ssid(priv, essid, essid_len);
9493 if (err) {
9494 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
9495 goto done;
9497 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
9499 ipw_add_scan_channels(priv, &scan, scan_type);
9501 err = ipw_send_scan_request_ext(priv, &scan);
9502 if (err) {
9503 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
9504 goto done;
9507 priv->status |= STATUS_SCANNING;
9509 done:
9510 mutex_unlock(&priv->mutex);
9511 return err;
9514 static int ipw_wx_set_scan(struct net_device *dev,
9515 struct iw_request_info *info,
9516 union iwreq_data *wrqu, char *extra)
9518 struct ipw_priv *priv = ieee80211_priv(dev);
9519 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9521 mutex_lock(&priv->mutex);
9522 priv->user_requested_scan = 1;
9523 mutex_unlock(&priv->mutex);
9525 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9526 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9527 ipw_request_direct_scan(priv, req->essid,
9528 req->essid_len);
9529 return 0;
9531 if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9532 queue_work(priv->workqueue,
9533 &priv->request_passive_scan);
9534 return 0;
9538 IPW_DEBUG_WX("Start scan\n");
9540 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
9542 return 0;
9545 static int ipw_wx_get_scan(struct net_device *dev,
9546 struct iw_request_info *info,
9547 union iwreq_data *wrqu, char *extra)
9549 struct ipw_priv *priv = ieee80211_priv(dev);
9550 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9553 static int ipw_wx_set_encode(struct net_device *dev,
9554 struct iw_request_info *info,
9555 union iwreq_data *wrqu, char *key)
9557 struct ipw_priv *priv = ieee80211_priv(dev);
9558 int ret;
9559 u32 cap = priv->capability;
9561 mutex_lock(&priv->mutex);
9562 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9564 /* In IBSS mode, we need to notify the firmware to update
9565 * the beacon info after we changed the capability. */
9566 if (cap != priv->capability &&
9567 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9568 priv->status & STATUS_ASSOCIATED)
9569 ipw_disassociate(priv);
9571 mutex_unlock(&priv->mutex);
9572 return ret;
9575 static int ipw_wx_get_encode(struct net_device *dev,
9576 struct iw_request_info *info,
9577 union iwreq_data *wrqu, char *key)
9579 struct ipw_priv *priv = ieee80211_priv(dev);
9580 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9583 static int ipw_wx_set_power(struct net_device *dev,
9584 struct iw_request_info *info,
9585 union iwreq_data *wrqu, char *extra)
9587 struct ipw_priv *priv = ieee80211_priv(dev);
9588 int err;
9589 mutex_lock(&priv->mutex);
9590 if (wrqu->power.disabled) {
9591 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9592 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9593 if (err) {
9594 IPW_DEBUG_WX("failed setting power mode.\n");
9595 mutex_unlock(&priv->mutex);
9596 return err;
9598 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9599 mutex_unlock(&priv->mutex);
9600 return 0;
9603 switch (wrqu->power.flags & IW_POWER_MODE) {
9604 case IW_POWER_ON: /* If not specified */
9605 case IW_POWER_MODE: /* If set all mask */
9606 case IW_POWER_ALL_R: /* If explicitely state all */
9607 break;
9608 default: /* Otherwise we don't support it */
9609 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9610 wrqu->power.flags);
9611 mutex_unlock(&priv->mutex);
9612 return -EOPNOTSUPP;
9615 /* If the user hasn't specified a power management mode yet, default
9616 * to BATTERY */
9617 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9618 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9619 else
9620 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9622 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9623 if (err) {
9624 IPW_DEBUG_WX("failed setting power mode.\n");
9625 mutex_unlock(&priv->mutex);
9626 return err;
9629 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9630 mutex_unlock(&priv->mutex);
9631 return 0;
9634 static int ipw_wx_get_power(struct net_device *dev,
9635 struct iw_request_info *info,
9636 union iwreq_data *wrqu, char *extra)
9638 struct ipw_priv *priv = ieee80211_priv(dev);
9639 mutex_lock(&priv->mutex);
9640 if (!(priv->power_mode & IPW_POWER_ENABLED))
9641 wrqu->power.disabled = 1;
9642 else
9643 wrqu->power.disabled = 0;
9645 mutex_unlock(&priv->mutex);
9646 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9648 return 0;
9651 static int ipw_wx_set_powermode(struct net_device *dev,
9652 struct iw_request_info *info,
9653 union iwreq_data *wrqu, char *extra)
9655 struct ipw_priv *priv = ieee80211_priv(dev);
9656 int mode = *(int *)extra;
9657 int err;
9659 mutex_lock(&priv->mutex);
9660 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9661 mode = IPW_POWER_AC;
9663 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9664 err = ipw_send_power_mode(priv, mode);
9665 if (err) {
9666 IPW_DEBUG_WX("failed setting power mode.\n");
9667 mutex_unlock(&priv->mutex);
9668 return err;
9670 priv->power_mode = IPW_POWER_ENABLED | mode;
9672 mutex_unlock(&priv->mutex);
9673 return 0;
9676 #define MAX_WX_STRING 80
9677 static int ipw_wx_get_powermode(struct net_device *dev,
9678 struct iw_request_info *info,
9679 union iwreq_data *wrqu, char *extra)
9681 struct ipw_priv *priv = ieee80211_priv(dev);
9682 int level = IPW_POWER_LEVEL(priv->power_mode);
9683 char *p = extra;
9685 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9687 switch (level) {
9688 case IPW_POWER_AC:
9689 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9690 break;
9691 case IPW_POWER_BATTERY:
9692 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9693 break;
9694 default:
9695 p += snprintf(p, MAX_WX_STRING - (p - extra),
9696 "(Timeout %dms, Period %dms)",
9697 timeout_duration[level - 1] / 1000,
9698 period_duration[level - 1] / 1000);
9701 if (!(priv->power_mode & IPW_POWER_ENABLED))
9702 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9704 wrqu->data.length = p - extra + 1;
9706 return 0;
9709 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9710 struct iw_request_info *info,
9711 union iwreq_data *wrqu, char *extra)
9713 struct ipw_priv *priv = ieee80211_priv(dev);
9714 int mode = *(int *)extra;
9715 u8 band = 0, modulation = 0;
9717 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9718 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9719 return -EINVAL;
9721 mutex_lock(&priv->mutex);
9722 if (priv->adapter == IPW_2915ABG) {
9723 priv->ieee->abg_true = 1;
9724 if (mode & IEEE_A) {
9725 band |= IEEE80211_52GHZ_BAND;
9726 modulation |= IEEE80211_OFDM_MODULATION;
9727 } else
9728 priv->ieee->abg_true = 0;
9729 } else {
9730 if (mode & IEEE_A) {
9731 IPW_WARNING("Attempt to set 2200BG into "
9732 "802.11a mode\n");
9733 mutex_unlock(&priv->mutex);
9734 return -EINVAL;
9737 priv->ieee->abg_true = 0;
9740 if (mode & IEEE_B) {
9741 band |= IEEE80211_24GHZ_BAND;
9742 modulation |= IEEE80211_CCK_MODULATION;
9743 } else
9744 priv->ieee->abg_true = 0;
9746 if (mode & IEEE_G) {
9747 band |= IEEE80211_24GHZ_BAND;
9748 modulation |= IEEE80211_OFDM_MODULATION;
9749 } else
9750 priv->ieee->abg_true = 0;
9752 priv->ieee->mode = mode;
9753 priv->ieee->freq_band = band;
9754 priv->ieee->modulation = modulation;
9755 init_supported_rates(priv, &priv->rates);
9757 /* Network configuration changed -- force [re]association */
9758 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9759 if (!ipw_disassociate(priv)) {
9760 ipw_send_supported_rates(priv, &priv->rates);
9761 ipw_associate(priv);
9764 /* Update the band LEDs */
9765 ipw_led_band_on(priv);
9767 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9768 mode & IEEE_A ? 'a' : '.',
9769 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9770 mutex_unlock(&priv->mutex);
9771 return 0;
9774 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9775 struct iw_request_info *info,
9776 union iwreq_data *wrqu, char *extra)
9778 struct ipw_priv *priv = ieee80211_priv(dev);
9779 mutex_lock(&priv->mutex);
9780 switch (priv->ieee->mode) {
9781 case IEEE_A:
9782 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9783 break;
9784 case IEEE_B:
9785 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9786 break;
9787 case IEEE_A | IEEE_B:
9788 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9789 break;
9790 case IEEE_G:
9791 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9792 break;
9793 case IEEE_A | IEEE_G:
9794 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9795 break;
9796 case IEEE_B | IEEE_G:
9797 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9798 break;
9799 case IEEE_A | IEEE_B | IEEE_G:
9800 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9801 break;
9802 default:
9803 strncpy(extra, "unknown", MAX_WX_STRING);
9804 break;
9807 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9809 wrqu->data.length = strlen(extra) + 1;
9810 mutex_unlock(&priv->mutex);
9812 return 0;
9815 static int ipw_wx_set_preamble(struct net_device *dev,
9816 struct iw_request_info *info,
9817 union iwreq_data *wrqu, char *extra)
9819 struct ipw_priv *priv = ieee80211_priv(dev);
9820 int mode = *(int *)extra;
9821 mutex_lock(&priv->mutex);
9822 /* Switching from SHORT -> LONG requires a disassociation */
9823 if (mode == 1) {
9824 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9825 priv->config |= CFG_PREAMBLE_LONG;
9827 /* Network configuration changed -- force [re]association */
9828 IPW_DEBUG_ASSOC
9829 ("[re]association triggered due to preamble change.\n");
9830 if (!ipw_disassociate(priv))
9831 ipw_associate(priv);
9833 goto done;
9836 if (mode == 0) {
9837 priv->config &= ~CFG_PREAMBLE_LONG;
9838 goto done;
9840 mutex_unlock(&priv->mutex);
9841 return -EINVAL;
9843 done:
9844 mutex_unlock(&priv->mutex);
9845 return 0;
9848 static int ipw_wx_get_preamble(struct net_device *dev,
9849 struct iw_request_info *info,
9850 union iwreq_data *wrqu, char *extra)
9852 struct ipw_priv *priv = ieee80211_priv(dev);
9853 mutex_lock(&priv->mutex);
9854 if (priv->config & CFG_PREAMBLE_LONG)
9855 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9856 else
9857 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9858 mutex_unlock(&priv->mutex);
9859 return 0;
9862 #ifdef CONFIG_IPW2200_MONITOR
9863 static int ipw_wx_set_monitor(struct net_device *dev,
9864 struct iw_request_info *info,
9865 union iwreq_data *wrqu, char *extra)
9867 struct ipw_priv *priv = ieee80211_priv(dev);
9868 int *parms = (int *)extra;
9869 int enable = (parms[0] > 0);
9870 mutex_lock(&priv->mutex);
9871 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9872 if (enable) {
9873 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9874 #ifdef CONFIG_IPW2200_RADIOTAP
9875 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9876 #else
9877 priv->net_dev->type = ARPHRD_IEEE80211;
9878 #endif
9879 queue_work(priv->workqueue, &priv->adapter_restart);
9882 ipw_set_channel(priv, parms[1]);
9883 } else {
9884 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9885 mutex_unlock(&priv->mutex);
9886 return 0;
9888 priv->net_dev->type = ARPHRD_ETHER;
9889 queue_work(priv->workqueue, &priv->adapter_restart);
9891 mutex_unlock(&priv->mutex);
9892 return 0;
9895 #endif /* CONFIG_IPW2200_MONITOR */
9897 static int ipw_wx_reset(struct net_device *dev,
9898 struct iw_request_info *info,
9899 union iwreq_data *wrqu, char *extra)
9901 struct ipw_priv *priv = ieee80211_priv(dev);
9902 IPW_DEBUG_WX("RESET\n");
9903 queue_work(priv->workqueue, &priv->adapter_restart);
9904 return 0;
9907 static int ipw_wx_sw_reset(struct net_device *dev,
9908 struct iw_request_info *info,
9909 union iwreq_data *wrqu, char *extra)
9911 struct ipw_priv *priv = ieee80211_priv(dev);
9912 union iwreq_data wrqu_sec = {
9913 .encoding = {
9914 .flags = IW_ENCODE_DISABLED,
9917 int ret;
9919 IPW_DEBUG_WX("SW_RESET\n");
9921 mutex_lock(&priv->mutex);
9923 ret = ipw_sw_reset(priv, 2);
9924 if (!ret) {
9925 free_firmware();
9926 ipw_adapter_restart(priv);
9929 /* The SW reset bit might have been toggled on by the 'disable'
9930 * module parameter, so take appropriate action */
9931 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9933 mutex_unlock(&priv->mutex);
9934 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9935 mutex_lock(&priv->mutex);
9937 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9938 /* Configuration likely changed -- force [re]association */
9939 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9940 "reset.\n");
9941 if (!ipw_disassociate(priv))
9942 ipw_associate(priv);
9945 mutex_unlock(&priv->mutex);
9947 return 0;
9950 /* Rebase the WE IOCTLs to zero for the handler array */
9951 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9952 static iw_handler ipw_wx_handlers[] = {
9953 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9954 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9955 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9956 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9957 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9958 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9959 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9960 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9961 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9962 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9963 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9964 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9965 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9966 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9967 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9968 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9969 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9970 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9971 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9972 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9973 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9974 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9975 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9976 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9977 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9978 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9979 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9980 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9981 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9982 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9983 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9984 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9985 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9986 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9987 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9988 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9989 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9990 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9991 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9992 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9993 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9996 enum {
9997 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9998 IPW_PRIV_GET_POWER,
9999 IPW_PRIV_SET_MODE,
10000 IPW_PRIV_GET_MODE,
10001 IPW_PRIV_SET_PREAMBLE,
10002 IPW_PRIV_GET_PREAMBLE,
10003 IPW_PRIV_RESET,
10004 IPW_PRIV_SW_RESET,
10005 #ifdef CONFIG_IPW2200_MONITOR
10006 IPW_PRIV_SET_MONITOR,
10007 #endif
10010 static struct iw_priv_args ipw_priv_args[] = {
10012 .cmd = IPW_PRIV_SET_POWER,
10013 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10014 .name = "set_power"},
10016 .cmd = IPW_PRIV_GET_POWER,
10017 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10018 .name = "get_power"},
10020 .cmd = IPW_PRIV_SET_MODE,
10021 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10022 .name = "set_mode"},
10024 .cmd = IPW_PRIV_GET_MODE,
10025 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10026 .name = "get_mode"},
10028 .cmd = IPW_PRIV_SET_PREAMBLE,
10029 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10030 .name = "set_preamble"},
10032 .cmd = IPW_PRIV_GET_PREAMBLE,
10033 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10034 .name = "get_preamble"},
10036 IPW_PRIV_RESET,
10037 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10039 IPW_PRIV_SW_RESET,
10040 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10041 #ifdef CONFIG_IPW2200_MONITOR
10043 IPW_PRIV_SET_MONITOR,
10044 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10045 #endif /* CONFIG_IPW2200_MONITOR */
10048 static iw_handler ipw_priv_handler[] = {
10049 ipw_wx_set_powermode,
10050 ipw_wx_get_powermode,
10051 ipw_wx_set_wireless_mode,
10052 ipw_wx_get_wireless_mode,
10053 ipw_wx_set_preamble,
10054 ipw_wx_get_preamble,
10055 ipw_wx_reset,
10056 ipw_wx_sw_reset,
10057 #ifdef CONFIG_IPW2200_MONITOR
10058 ipw_wx_set_monitor,
10059 #endif
10062 static struct iw_handler_def ipw_wx_handler_def = {
10063 .standard = ipw_wx_handlers,
10064 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10065 .num_private = ARRAY_SIZE(ipw_priv_handler),
10066 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10067 .private = ipw_priv_handler,
10068 .private_args = ipw_priv_args,
10069 .get_wireless_stats = ipw_get_wireless_stats,
10073 * Get wireless statistics.
10074 * Called by /proc/net/wireless
10075 * Also called by SIOCGIWSTATS
10077 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10079 struct ipw_priv *priv = ieee80211_priv(dev);
10080 struct iw_statistics *wstats;
10082 wstats = &priv->wstats;
10084 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10085 * netdev->get_wireless_stats seems to be called before fw is
10086 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10087 * and associated; if not associcated, the values are all meaningless
10088 * anyway, so set them all to NULL and INVALID */
10089 if (!(priv->status & STATUS_ASSOCIATED)) {
10090 wstats->miss.beacon = 0;
10091 wstats->discard.retries = 0;
10092 wstats->qual.qual = 0;
10093 wstats->qual.level = 0;
10094 wstats->qual.noise = 0;
10095 wstats->qual.updated = 7;
10096 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10097 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10098 return wstats;
10101 wstats->qual.qual = priv->quality;
10102 wstats->qual.level = priv->exp_avg_rssi;
10103 wstats->qual.noise = priv->exp_avg_noise;
10104 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10105 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10107 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10108 wstats->discard.retries = priv->last_tx_failures;
10109 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10111 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10112 goto fail_get_ordinal;
10113 wstats->discard.retries += tx_retry; */
10115 return wstats;
10118 /* net device stuff */
10120 static void init_sys_config(struct ipw_sys_config *sys_config)
10122 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10123 sys_config->bt_coexistence = 0;
10124 sys_config->answer_broadcast_ssid_probe = 0;
10125 sys_config->accept_all_data_frames = 0;
10126 sys_config->accept_non_directed_frames = 1;
10127 sys_config->exclude_unicast_unencrypted = 0;
10128 sys_config->disable_unicast_decryption = 1;
10129 sys_config->exclude_multicast_unencrypted = 0;
10130 sys_config->disable_multicast_decryption = 1;
10131 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10132 antenna = CFG_SYS_ANTENNA_BOTH;
10133 sys_config->antenna_diversity = antenna;
10134 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10135 sys_config->dot11g_auto_detection = 0;
10136 sys_config->enable_cts_to_self = 0;
10137 sys_config->bt_coexist_collision_thr = 0;
10138 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10139 sys_config->silence_threshold = 0x1e;
10142 static int ipw_net_open(struct net_device *dev)
10144 struct ipw_priv *priv = ieee80211_priv(dev);
10145 IPW_DEBUG_INFO("dev->open\n");
10146 /* we should be verifying the device is ready to be opened */
10147 mutex_lock(&priv->mutex);
10148 if (!(priv->status & STATUS_RF_KILL_MASK) &&
10149 (priv->status & STATUS_ASSOCIATED))
10150 netif_start_queue(dev);
10151 mutex_unlock(&priv->mutex);
10152 return 0;
10155 static int ipw_net_stop(struct net_device *dev)
10157 IPW_DEBUG_INFO("dev->close\n");
10158 netif_stop_queue(dev);
10159 return 0;
10163 todo:
10165 modify to send one tfd per fragment instead of using chunking. otherwise
10166 we need to heavily modify the ieee80211_skb_to_txb.
10169 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10170 int pri)
10172 struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10173 txb->fragments[0]->data;
10174 int i = 0;
10175 struct tfd_frame *tfd;
10176 #ifdef CONFIG_IPW2200_QOS
10177 int tx_id = ipw_get_tx_queue_number(priv, pri);
10178 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10179 #else
10180 struct clx2_tx_queue *txq = &priv->txq[0];
10181 #endif
10182 struct clx2_queue *q = &txq->q;
10183 u8 id, hdr_len, unicast;
10184 u16 remaining_bytes;
10185 int fc;
10186 DECLARE_MAC_BUF(mac);
10188 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10189 switch (priv->ieee->iw_mode) {
10190 case IW_MODE_ADHOC:
10191 unicast = !is_multicast_ether_addr(hdr->addr1);
10192 id = ipw_find_station(priv, hdr->addr1);
10193 if (id == IPW_INVALID_STATION) {
10194 id = ipw_add_station(priv, hdr->addr1);
10195 if (id == IPW_INVALID_STATION) {
10196 IPW_WARNING("Attempt to send data to "
10197 "invalid cell: %s\n",
10198 print_mac(mac, hdr->addr1));
10199 goto drop;
10202 break;
10204 case IW_MODE_INFRA:
10205 default:
10206 unicast = !is_multicast_ether_addr(hdr->addr3);
10207 id = 0;
10208 break;
10211 tfd = &txq->bd[q->first_empty];
10212 txq->txb[q->first_empty] = txb;
10213 memset(tfd, 0, sizeof(*tfd));
10214 tfd->u.data.station_number = id;
10216 tfd->control_flags.message_type = TX_FRAME_TYPE;
10217 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10219 tfd->u.data.cmd_id = DINO_CMD_TX;
10220 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10221 remaining_bytes = txb->payload_size;
10223 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10224 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10225 else
10226 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10228 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10229 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10231 fc = le16_to_cpu(hdr->frame_ctl);
10232 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10234 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10236 if (likely(unicast))
10237 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10239 if (txb->encrypted && !priv->ieee->host_encrypt) {
10240 switch (priv->ieee->sec.level) {
10241 case SEC_LEVEL_3:
10242 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10243 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10244 /* XXX: ACK flag must be set for CCMP even if it
10245 * is a multicast/broadcast packet, because CCMP
10246 * group communication encrypted by GTK is
10247 * actually done by the AP. */
10248 if (!unicast)
10249 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10251 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10252 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10253 tfd->u.data.key_index = 0;
10254 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10255 break;
10256 case SEC_LEVEL_2:
10257 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10258 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10259 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10260 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10261 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10262 break;
10263 case SEC_LEVEL_1:
10264 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10265 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10266 tfd->u.data.key_index = priv->ieee->tx_keyidx;
10267 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
10269 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10270 else
10271 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10272 break;
10273 case SEC_LEVEL_0:
10274 break;
10275 default:
10276 printk(KERN_ERR "Unknow security level %d\n",
10277 priv->ieee->sec.level);
10278 break;
10280 } else
10281 /* No hardware encryption */
10282 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10284 #ifdef CONFIG_IPW2200_QOS
10285 if (fc & IEEE80211_STYPE_QOS_DATA)
10286 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10287 #endif /* CONFIG_IPW2200_QOS */
10289 /* payload */
10290 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10291 txb->nr_frags));
10292 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10293 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10294 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10295 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10296 i, le32_to_cpu(tfd->u.data.num_chunks),
10297 txb->fragments[i]->len - hdr_len);
10298 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10299 i, tfd->u.data.num_chunks,
10300 txb->fragments[i]->len - hdr_len);
10301 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10302 txb->fragments[i]->len - hdr_len);
10304 tfd->u.data.chunk_ptr[i] =
10305 cpu_to_le32(pci_map_single
10306 (priv->pci_dev,
10307 txb->fragments[i]->data + hdr_len,
10308 txb->fragments[i]->len - hdr_len,
10309 PCI_DMA_TODEVICE));
10310 tfd->u.data.chunk_len[i] =
10311 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10314 if (i != txb->nr_frags) {
10315 struct sk_buff *skb;
10316 u16 remaining_bytes = 0;
10317 int j;
10319 for (j = i; j < txb->nr_frags; j++)
10320 remaining_bytes += txb->fragments[j]->len - hdr_len;
10322 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10323 remaining_bytes);
10324 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10325 if (skb != NULL) {
10326 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10327 for (j = i; j < txb->nr_frags; j++) {
10328 int size = txb->fragments[j]->len - hdr_len;
10330 printk(KERN_INFO "Adding frag %d %d...\n",
10331 j, size);
10332 memcpy(skb_put(skb, size),
10333 txb->fragments[j]->data + hdr_len, size);
10335 dev_kfree_skb_any(txb->fragments[i]);
10336 txb->fragments[i] = skb;
10337 tfd->u.data.chunk_ptr[i] =
10338 cpu_to_le32(pci_map_single
10339 (priv->pci_dev, skb->data,
10340 tfd->u.data.chunk_len[i],
10341 PCI_DMA_TODEVICE));
10343 tfd->u.data.num_chunks =
10344 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
10349 /* kick DMA */
10350 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10351 ipw_write32(priv, q->reg_w, q->first_empty);
10353 if (ipw_queue_space(q) < q->high_mark)
10354 netif_stop_queue(priv->net_dev);
10356 return NETDEV_TX_OK;
10358 drop:
10359 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10360 ieee80211_txb_free(txb);
10361 return NETDEV_TX_OK;
10364 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10366 struct ipw_priv *priv = ieee80211_priv(dev);
10367 #ifdef CONFIG_IPW2200_QOS
10368 int tx_id = ipw_get_tx_queue_number(priv, pri);
10369 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10370 #else
10371 struct clx2_tx_queue *txq = &priv->txq[0];
10372 #endif /* CONFIG_IPW2200_QOS */
10374 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
10375 return 1;
10377 return 0;
10380 #ifdef CONFIG_IPW2200_PROMISCUOUS
10381 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10382 struct ieee80211_txb *txb)
10384 struct ieee80211_rx_stats dummystats;
10385 struct ieee80211_hdr *hdr;
10386 u8 n;
10387 u16 filter = priv->prom_priv->filter;
10388 int hdr_only = 0;
10390 if (filter & IPW_PROM_NO_TX)
10391 return;
10393 memset(&dummystats, 0, sizeof(dummystats));
10395 /* Filtering of fragment chains is done agains the first fragment */
10396 hdr = (void *)txb->fragments[0]->data;
10397 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
10398 if (filter & IPW_PROM_NO_MGMT)
10399 return;
10400 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10401 hdr_only = 1;
10402 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
10403 if (filter & IPW_PROM_NO_CTL)
10404 return;
10405 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10406 hdr_only = 1;
10407 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
10408 if (filter & IPW_PROM_NO_DATA)
10409 return;
10410 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10411 hdr_only = 1;
10414 for(n=0; n<txb->nr_frags; ++n) {
10415 struct sk_buff *src = txb->fragments[n];
10416 struct sk_buff *dst;
10417 struct ieee80211_radiotap_header *rt_hdr;
10418 int len;
10420 if (hdr_only) {
10421 hdr = (void *)src->data;
10422 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10423 } else
10424 len = src->len;
10426 dst = alloc_skb(
10427 len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC);
10428 if (!dst) continue;
10430 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10432 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10433 rt_hdr->it_pad = 0;
10434 rt_hdr->it_present = 0; /* after all, it's just an idea */
10435 rt_hdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
10437 *(u16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10438 ieee80211chan2mhz(priv->channel));
10439 if (priv->channel > 14) /* 802.11a */
10440 *(u16*)skb_put(dst, sizeof(u16)) =
10441 cpu_to_le16(IEEE80211_CHAN_OFDM |
10442 IEEE80211_CHAN_5GHZ);
10443 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10444 *(u16*)skb_put(dst, sizeof(u16)) =
10445 cpu_to_le16(IEEE80211_CHAN_CCK |
10446 IEEE80211_CHAN_2GHZ);
10447 else /* 802.11g */
10448 *(u16*)skb_put(dst, sizeof(u16)) =
10449 cpu_to_le16(IEEE80211_CHAN_OFDM |
10450 IEEE80211_CHAN_2GHZ);
10452 rt_hdr->it_len = dst->len;
10454 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10456 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10457 dev_kfree_skb_any(dst);
10460 #endif
10462 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10463 struct net_device *dev, int pri)
10465 struct ipw_priv *priv = ieee80211_priv(dev);
10466 unsigned long flags;
10467 int ret;
10469 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10470 spin_lock_irqsave(&priv->lock, flags);
10472 if (!(priv->status & STATUS_ASSOCIATED)) {
10473 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
10474 priv->ieee->stats.tx_carrier_errors++;
10475 netif_stop_queue(dev);
10476 goto fail_unlock;
10479 #ifdef CONFIG_IPW2200_PROMISCUOUS
10480 if (rtap_iface && netif_running(priv->prom_net_dev))
10481 ipw_handle_promiscuous_tx(priv, txb);
10482 #endif
10484 ret = ipw_tx_skb(priv, txb, pri);
10485 if (ret == NETDEV_TX_OK)
10486 __ipw_led_activity_on(priv);
10487 spin_unlock_irqrestore(&priv->lock, flags);
10489 return ret;
10491 fail_unlock:
10492 spin_unlock_irqrestore(&priv->lock, flags);
10493 return 1;
10496 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
10498 struct ipw_priv *priv = ieee80211_priv(dev);
10500 priv->ieee->stats.tx_packets = priv->tx_packets;
10501 priv->ieee->stats.rx_packets = priv->rx_packets;
10502 return &priv->ieee->stats;
10505 static void ipw_net_set_multicast_list(struct net_device *dev)
10510 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10512 struct ipw_priv *priv = ieee80211_priv(dev);
10513 struct sockaddr *addr = p;
10514 DECLARE_MAC_BUF(mac);
10516 if (!is_valid_ether_addr(addr->sa_data))
10517 return -EADDRNOTAVAIL;
10518 mutex_lock(&priv->mutex);
10519 priv->config |= CFG_CUSTOM_MAC;
10520 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10521 printk(KERN_INFO "%s: Setting MAC to %s\n",
10522 priv->net_dev->name, print_mac(mac, priv->mac_addr));
10523 queue_work(priv->workqueue, &priv->adapter_restart);
10524 mutex_unlock(&priv->mutex);
10525 return 0;
10528 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10529 struct ethtool_drvinfo *info)
10531 struct ipw_priv *p = ieee80211_priv(dev);
10532 char vers[64];
10533 char date[32];
10534 u32 len;
10536 strcpy(info->driver, DRV_NAME);
10537 strcpy(info->version, DRV_VERSION);
10539 len = sizeof(vers);
10540 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10541 len = sizeof(date);
10542 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10544 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10545 vers, date);
10546 strcpy(info->bus_info, pci_name(p->pci_dev));
10547 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10550 static u32 ipw_ethtool_get_link(struct net_device *dev)
10552 struct ipw_priv *priv = ieee80211_priv(dev);
10553 return (priv->status & STATUS_ASSOCIATED) != 0;
10556 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10558 return IPW_EEPROM_IMAGE_SIZE;
10561 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10562 struct ethtool_eeprom *eeprom, u8 * bytes)
10564 struct ipw_priv *p = ieee80211_priv(dev);
10566 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10567 return -EINVAL;
10568 mutex_lock(&p->mutex);
10569 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10570 mutex_unlock(&p->mutex);
10571 return 0;
10574 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10575 struct ethtool_eeprom *eeprom, u8 * bytes)
10577 struct ipw_priv *p = ieee80211_priv(dev);
10578 int i;
10580 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10581 return -EINVAL;
10582 mutex_lock(&p->mutex);
10583 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10584 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10585 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10586 mutex_unlock(&p->mutex);
10587 return 0;
10590 static const struct ethtool_ops ipw_ethtool_ops = {
10591 .get_link = ipw_ethtool_get_link,
10592 .get_drvinfo = ipw_ethtool_get_drvinfo,
10593 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10594 .get_eeprom = ipw_ethtool_get_eeprom,
10595 .set_eeprom = ipw_ethtool_set_eeprom,
10598 static irqreturn_t ipw_isr(int irq, void *data)
10600 struct ipw_priv *priv = data;
10601 u32 inta, inta_mask;
10603 if (!priv)
10604 return IRQ_NONE;
10606 spin_lock(&priv->irq_lock);
10608 if (!(priv->status & STATUS_INT_ENABLED)) {
10609 /* IRQ is disabled */
10610 goto none;
10613 inta = ipw_read32(priv, IPW_INTA_RW);
10614 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10616 if (inta == 0xFFFFFFFF) {
10617 /* Hardware disappeared */
10618 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10619 goto none;
10622 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10623 /* Shared interrupt */
10624 goto none;
10627 /* tell the device to stop sending interrupts */
10628 __ipw_disable_interrupts(priv);
10630 /* ack current interrupts */
10631 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10632 ipw_write32(priv, IPW_INTA_RW, inta);
10634 /* Cache INTA value for our tasklet */
10635 priv->isr_inta = inta;
10637 tasklet_schedule(&priv->irq_tasklet);
10639 spin_unlock(&priv->irq_lock);
10641 return IRQ_HANDLED;
10642 none:
10643 spin_unlock(&priv->irq_lock);
10644 return IRQ_NONE;
10647 static void ipw_rf_kill(void *adapter)
10649 struct ipw_priv *priv = adapter;
10650 unsigned long flags;
10652 spin_lock_irqsave(&priv->lock, flags);
10654 if (rf_kill_active(priv)) {
10655 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10656 if (priv->workqueue)
10657 queue_delayed_work(priv->workqueue,
10658 &priv->rf_kill, 2 * HZ);
10659 goto exit_unlock;
10662 /* RF Kill is now disabled, so bring the device back up */
10664 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10665 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10666 "device\n");
10668 /* we can not do an adapter restart while inside an irq lock */
10669 queue_work(priv->workqueue, &priv->adapter_restart);
10670 } else
10671 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10672 "enabled\n");
10674 exit_unlock:
10675 spin_unlock_irqrestore(&priv->lock, flags);
10678 static void ipw_bg_rf_kill(struct work_struct *work)
10680 struct ipw_priv *priv =
10681 container_of(work, struct ipw_priv, rf_kill.work);
10682 mutex_lock(&priv->mutex);
10683 ipw_rf_kill(priv);
10684 mutex_unlock(&priv->mutex);
10687 static void ipw_link_up(struct ipw_priv *priv)
10689 priv->last_seq_num = -1;
10690 priv->last_frag_num = -1;
10691 priv->last_packet_time = 0;
10693 netif_carrier_on(priv->net_dev);
10694 if (netif_queue_stopped(priv->net_dev)) {
10695 IPW_DEBUG_NOTIF("waking queue\n");
10696 netif_wake_queue(priv->net_dev);
10697 } else {
10698 IPW_DEBUG_NOTIF("starting queue\n");
10699 netif_start_queue(priv->net_dev);
10702 cancel_delayed_work(&priv->request_scan);
10703 cancel_delayed_work(&priv->scan_event);
10704 ipw_reset_stats(priv);
10705 /* Ensure the rate is updated immediately */
10706 priv->last_rate = ipw_get_current_rate(priv);
10707 ipw_gather_stats(priv);
10708 ipw_led_link_up(priv);
10709 notify_wx_assoc_event(priv);
10711 if (priv->config & CFG_BACKGROUND_SCAN)
10712 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10715 static void ipw_bg_link_up(struct work_struct *work)
10717 struct ipw_priv *priv =
10718 container_of(work, struct ipw_priv, link_up);
10719 mutex_lock(&priv->mutex);
10720 ipw_link_up(priv);
10721 mutex_unlock(&priv->mutex);
10724 static void ipw_link_down(struct ipw_priv *priv)
10726 ipw_led_link_down(priv);
10727 netif_carrier_off(priv->net_dev);
10728 netif_stop_queue(priv->net_dev);
10729 notify_wx_assoc_event(priv);
10731 /* Cancel any queued work ... */
10732 cancel_delayed_work(&priv->request_scan);
10733 cancel_delayed_work(&priv->adhoc_check);
10734 cancel_delayed_work(&priv->gather_stats);
10736 ipw_reset_stats(priv);
10738 if (!(priv->status & STATUS_EXIT_PENDING)) {
10739 /* Queue up another scan... */
10740 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10741 } else
10742 cancel_delayed_work(&priv->scan_event);
10745 static void ipw_bg_link_down(struct work_struct *work)
10747 struct ipw_priv *priv =
10748 container_of(work, struct ipw_priv, link_down);
10749 mutex_lock(&priv->mutex);
10750 ipw_link_down(priv);
10751 mutex_unlock(&priv->mutex);
10754 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10756 int ret = 0;
10758 priv->workqueue = create_workqueue(DRV_NAME);
10759 init_waitqueue_head(&priv->wait_command_queue);
10760 init_waitqueue_head(&priv->wait_state);
10762 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10763 INIT_WORK(&priv->associate, ipw_bg_associate);
10764 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10765 INIT_WORK(&priv->system_config, ipw_system_config);
10766 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10767 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10768 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10769 INIT_WORK(&priv->up, ipw_bg_up);
10770 INIT_WORK(&priv->down, ipw_bg_down);
10771 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10772 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10773 INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10774 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10775 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10776 INIT_WORK(&priv->roam, ipw_bg_roam);
10777 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10778 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10779 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10780 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10781 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10782 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10783 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10785 #ifdef CONFIG_IPW2200_QOS
10786 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10787 #endif /* CONFIG_IPW2200_QOS */
10789 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10790 ipw_irq_tasklet, (unsigned long)priv);
10792 return ret;
10795 static void shim__set_security(struct net_device *dev,
10796 struct ieee80211_security *sec)
10798 struct ipw_priv *priv = ieee80211_priv(dev);
10799 int i;
10800 for (i = 0; i < 4; i++) {
10801 if (sec->flags & (1 << i)) {
10802 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10803 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10804 if (sec->key_sizes[i] == 0)
10805 priv->ieee->sec.flags &= ~(1 << i);
10806 else {
10807 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10808 sec->key_sizes[i]);
10809 priv->ieee->sec.flags |= (1 << i);
10811 priv->status |= STATUS_SECURITY_UPDATED;
10812 } else if (sec->level != SEC_LEVEL_1)
10813 priv->ieee->sec.flags &= ~(1 << i);
10816 if (sec->flags & SEC_ACTIVE_KEY) {
10817 if (sec->active_key <= 3) {
10818 priv->ieee->sec.active_key = sec->active_key;
10819 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10820 } else
10821 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10822 priv->status |= STATUS_SECURITY_UPDATED;
10823 } else
10824 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10826 if ((sec->flags & SEC_AUTH_MODE) &&
10827 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10828 priv->ieee->sec.auth_mode = sec->auth_mode;
10829 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10830 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10831 priv->capability |= CAP_SHARED_KEY;
10832 else
10833 priv->capability &= ~CAP_SHARED_KEY;
10834 priv->status |= STATUS_SECURITY_UPDATED;
10837 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10838 priv->ieee->sec.flags |= SEC_ENABLED;
10839 priv->ieee->sec.enabled = sec->enabled;
10840 priv->status |= STATUS_SECURITY_UPDATED;
10841 if (sec->enabled)
10842 priv->capability |= CAP_PRIVACY_ON;
10843 else
10844 priv->capability &= ~CAP_PRIVACY_ON;
10847 if (sec->flags & SEC_ENCRYPT)
10848 priv->ieee->sec.encrypt = sec->encrypt;
10850 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10851 priv->ieee->sec.level = sec->level;
10852 priv->ieee->sec.flags |= SEC_LEVEL;
10853 priv->status |= STATUS_SECURITY_UPDATED;
10856 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10857 ipw_set_hwcrypto_keys(priv);
10859 /* To match current functionality of ipw2100 (which works well w/
10860 * various supplicants, we don't force a disassociate if the
10861 * privacy capability changes ... */
10862 #if 0
10863 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10864 (((priv->assoc_request.capability &
10865 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10866 (!(priv->assoc_request.capability &
10867 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10868 IPW_DEBUG_ASSOC("Disassociating due to capability "
10869 "change.\n");
10870 ipw_disassociate(priv);
10872 #endif
10875 static int init_supported_rates(struct ipw_priv *priv,
10876 struct ipw_supported_rates *rates)
10878 /* TODO: Mask out rates based on priv->rates_mask */
10880 memset(rates, 0, sizeof(*rates));
10881 /* configure supported rates */
10882 switch (priv->ieee->freq_band) {
10883 case IEEE80211_52GHZ_BAND:
10884 rates->ieee_mode = IPW_A_MODE;
10885 rates->purpose = IPW_RATE_CAPABILITIES;
10886 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10887 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10888 break;
10890 default: /* Mixed or 2.4Ghz */
10891 rates->ieee_mode = IPW_G_MODE;
10892 rates->purpose = IPW_RATE_CAPABILITIES;
10893 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10894 IEEE80211_CCK_DEFAULT_RATES_MASK);
10895 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10896 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10897 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10899 break;
10902 return 0;
10905 static int ipw_config(struct ipw_priv *priv)
10907 /* This is only called from ipw_up, which resets/reloads the firmware
10908 so, we don't need to first disable the card before we configure
10909 it */
10910 if (ipw_set_tx_power(priv))
10911 goto error;
10913 /* initialize adapter address */
10914 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10915 goto error;
10917 /* set basic system config settings */
10918 init_sys_config(&priv->sys_config);
10920 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10921 * Does not support BT priority yet (don't abort or defer our Tx) */
10922 if (bt_coexist) {
10923 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10925 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10926 priv->sys_config.bt_coexistence
10927 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10928 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10929 priv->sys_config.bt_coexistence
10930 |= CFG_BT_COEXISTENCE_OOB;
10933 #ifdef CONFIG_IPW2200_PROMISCUOUS
10934 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10935 priv->sys_config.accept_all_data_frames = 1;
10936 priv->sys_config.accept_non_directed_frames = 1;
10937 priv->sys_config.accept_all_mgmt_bcpr = 1;
10938 priv->sys_config.accept_all_mgmt_frames = 1;
10940 #endif
10942 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10943 priv->sys_config.answer_broadcast_ssid_probe = 1;
10944 else
10945 priv->sys_config.answer_broadcast_ssid_probe = 0;
10947 if (ipw_send_system_config(priv))
10948 goto error;
10950 init_supported_rates(priv, &priv->rates);
10951 if (ipw_send_supported_rates(priv, &priv->rates))
10952 goto error;
10954 /* Set request-to-send threshold */
10955 if (priv->rts_threshold) {
10956 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10957 goto error;
10959 #ifdef CONFIG_IPW2200_QOS
10960 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10961 ipw_qos_activate(priv, NULL);
10962 #endif /* CONFIG_IPW2200_QOS */
10964 if (ipw_set_random_seed(priv))
10965 goto error;
10967 /* final state transition to the RUN state */
10968 if (ipw_send_host_complete(priv))
10969 goto error;
10971 priv->status |= STATUS_INIT;
10973 ipw_led_init(priv);
10974 ipw_led_radio_on(priv);
10975 priv->notif_missed_beacons = 0;
10977 /* Set hardware WEP key if it is configured. */
10978 if ((priv->capability & CAP_PRIVACY_ON) &&
10979 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10980 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10981 ipw_set_hwcrypto_keys(priv);
10983 return 0;
10985 error:
10986 return -EIO;
10990 * NOTE:
10992 * These tables have been tested in conjunction with the
10993 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10995 * Altering this values, using it on other hardware, or in geographies
10996 * not intended for resale of the above mentioned Intel adapters has
10997 * not been tested.
10999 * Remember to update the table in README.ipw2200 when changing this
11000 * table.
11003 static const struct ieee80211_geo ipw_geos[] = {
11004 { /* Restricted */
11005 "---",
11006 .bg_channels = 11,
11007 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11008 {2427, 4}, {2432, 5}, {2437, 6},
11009 {2442, 7}, {2447, 8}, {2452, 9},
11010 {2457, 10}, {2462, 11}},
11013 { /* Custom US/Canada */
11014 "ZZF",
11015 .bg_channels = 11,
11016 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11017 {2427, 4}, {2432, 5}, {2437, 6},
11018 {2442, 7}, {2447, 8}, {2452, 9},
11019 {2457, 10}, {2462, 11}},
11020 .a_channels = 8,
11021 .a = {{5180, 36},
11022 {5200, 40},
11023 {5220, 44},
11024 {5240, 48},
11025 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11026 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11027 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11028 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
11031 { /* Rest of World */
11032 "ZZD",
11033 .bg_channels = 13,
11034 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11035 {2427, 4}, {2432, 5}, {2437, 6},
11036 {2442, 7}, {2447, 8}, {2452, 9},
11037 {2457, 10}, {2462, 11}, {2467, 12},
11038 {2472, 13}},
11041 { /* Custom USA & Europe & High */
11042 "ZZA",
11043 .bg_channels = 11,
11044 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11045 {2427, 4}, {2432, 5}, {2437, 6},
11046 {2442, 7}, {2447, 8}, {2452, 9},
11047 {2457, 10}, {2462, 11}},
11048 .a_channels = 13,
11049 .a = {{5180, 36},
11050 {5200, 40},
11051 {5220, 44},
11052 {5240, 48},
11053 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11054 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11055 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11056 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11057 {5745, 149},
11058 {5765, 153},
11059 {5785, 157},
11060 {5805, 161},
11061 {5825, 165}},
11064 { /* Custom NA & Europe */
11065 "ZZB",
11066 .bg_channels = 11,
11067 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11068 {2427, 4}, {2432, 5}, {2437, 6},
11069 {2442, 7}, {2447, 8}, {2452, 9},
11070 {2457, 10}, {2462, 11}},
11071 .a_channels = 13,
11072 .a = {{5180, 36},
11073 {5200, 40},
11074 {5220, 44},
11075 {5240, 48},
11076 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11077 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11078 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11079 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11080 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11081 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11082 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11083 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11084 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11087 { /* Custom Japan */
11088 "ZZC",
11089 .bg_channels = 11,
11090 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11091 {2427, 4}, {2432, 5}, {2437, 6},
11092 {2442, 7}, {2447, 8}, {2452, 9},
11093 {2457, 10}, {2462, 11}},
11094 .a_channels = 4,
11095 .a = {{5170, 34}, {5190, 38},
11096 {5210, 42}, {5230, 46}},
11099 { /* Custom */
11100 "ZZM",
11101 .bg_channels = 11,
11102 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11103 {2427, 4}, {2432, 5}, {2437, 6},
11104 {2442, 7}, {2447, 8}, {2452, 9},
11105 {2457, 10}, {2462, 11}},
11108 { /* Europe */
11109 "ZZE",
11110 .bg_channels = 13,
11111 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11112 {2427, 4}, {2432, 5}, {2437, 6},
11113 {2442, 7}, {2447, 8}, {2452, 9},
11114 {2457, 10}, {2462, 11}, {2467, 12},
11115 {2472, 13}},
11116 .a_channels = 19,
11117 .a = {{5180, 36},
11118 {5200, 40},
11119 {5220, 44},
11120 {5240, 48},
11121 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11122 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11123 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11124 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11125 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11126 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11127 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11128 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11129 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11130 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11131 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11132 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11133 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11134 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11135 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
11138 { /* Custom Japan */
11139 "ZZJ",
11140 .bg_channels = 14,
11141 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11142 {2427, 4}, {2432, 5}, {2437, 6},
11143 {2442, 7}, {2447, 8}, {2452, 9},
11144 {2457, 10}, {2462, 11}, {2467, 12},
11145 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
11146 .a_channels = 4,
11147 .a = {{5170, 34}, {5190, 38},
11148 {5210, 42}, {5230, 46}},
11151 { /* Rest of World */
11152 "ZZR",
11153 .bg_channels = 14,
11154 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11155 {2427, 4}, {2432, 5}, {2437, 6},
11156 {2442, 7}, {2447, 8}, {2452, 9},
11157 {2457, 10}, {2462, 11}, {2467, 12},
11158 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11159 IEEE80211_CH_PASSIVE_ONLY}},
11162 { /* High Band */
11163 "ZZH",
11164 .bg_channels = 13,
11165 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11166 {2427, 4}, {2432, 5}, {2437, 6},
11167 {2442, 7}, {2447, 8}, {2452, 9},
11168 {2457, 10}, {2462, 11},
11169 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11170 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11171 .a_channels = 4,
11172 .a = {{5745, 149}, {5765, 153},
11173 {5785, 157}, {5805, 161}},
11176 { /* Custom Europe */
11177 "ZZG",
11178 .bg_channels = 13,
11179 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11180 {2427, 4}, {2432, 5}, {2437, 6},
11181 {2442, 7}, {2447, 8}, {2452, 9},
11182 {2457, 10}, {2462, 11},
11183 {2467, 12}, {2472, 13}},
11184 .a_channels = 4,
11185 .a = {{5180, 36}, {5200, 40},
11186 {5220, 44}, {5240, 48}},
11189 { /* Europe */
11190 "ZZK",
11191 .bg_channels = 13,
11192 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11193 {2427, 4}, {2432, 5}, {2437, 6},
11194 {2442, 7}, {2447, 8}, {2452, 9},
11195 {2457, 10}, {2462, 11},
11196 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11197 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11198 .a_channels = 24,
11199 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11200 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11201 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11202 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11203 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11204 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11205 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11206 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11207 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11208 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11209 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11210 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11211 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11212 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11213 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11214 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11215 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11216 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11217 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11218 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11219 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11220 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11221 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11222 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11225 { /* Europe */
11226 "ZZL",
11227 .bg_channels = 11,
11228 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11229 {2427, 4}, {2432, 5}, {2437, 6},
11230 {2442, 7}, {2447, 8}, {2452, 9},
11231 {2457, 10}, {2462, 11}},
11232 .a_channels = 13,
11233 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11234 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11235 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11236 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11237 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11238 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11239 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11240 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11241 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11242 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11243 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11244 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11245 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11249 #define MAX_HW_RESTARTS 5
11250 static int ipw_up(struct ipw_priv *priv)
11252 int rc, i, j;
11254 if (priv->status & STATUS_EXIT_PENDING)
11255 return -EIO;
11257 if (cmdlog && !priv->cmdlog) {
11258 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11259 GFP_KERNEL);
11260 if (priv->cmdlog == NULL) {
11261 IPW_ERROR("Error allocating %d command log entries.\n",
11262 cmdlog);
11263 return -ENOMEM;
11264 } else {
11265 priv->cmdlog_len = cmdlog;
11269 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11270 /* Load the microcode, firmware, and eeprom.
11271 * Also start the clocks. */
11272 rc = ipw_load(priv);
11273 if (rc) {
11274 IPW_ERROR("Unable to load firmware: %d\n", rc);
11275 return rc;
11278 ipw_init_ordinals(priv);
11279 if (!(priv->config & CFG_CUSTOM_MAC))
11280 eeprom_parse_mac(priv, priv->mac_addr);
11281 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11283 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11284 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11285 ipw_geos[j].name, 3))
11286 break;
11288 if (j == ARRAY_SIZE(ipw_geos)) {
11289 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11290 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11291 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11292 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11293 j = 0;
11295 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11296 IPW_WARNING("Could not set geography.");
11297 return 0;
11300 if (priv->status & STATUS_RF_KILL_SW) {
11301 IPW_WARNING("Radio disabled by module parameter.\n");
11302 return 0;
11303 } else if (rf_kill_active(priv)) {
11304 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11305 "Kill switch must be turned off for "
11306 "wireless networking to work.\n");
11307 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11308 2 * HZ);
11309 return 0;
11312 rc = ipw_config(priv);
11313 if (!rc) {
11314 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11316 /* If configure to try and auto-associate, kick
11317 * off a scan. */
11318 queue_delayed_work(priv->workqueue,
11319 &priv->request_scan, 0);
11321 return 0;
11324 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11325 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11326 i, MAX_HW_RESTARTS);
11328 /* We had an error bringing up the hardware, so take it
11329 * all the way back down so we can try again */
11330 ipw_down(priv);
11333 /* tried to restart and config the device for as long as our
11334 * patience could withstand */
11335 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11337 return -EIO;
11340 static void ipw_bg_up(struct work_struct *work)
11342 struct ipw_priv *priv =
11343 container_of(work, struct ipw_priv, up);
11344 mutex_lock(&priv->mutex);
11345 ipw_up(priv);
11346 mutex_unlock(&priv->mutex);
11349 static void ipw_deinit(struct ipw_priv *priv)
11351 int i;
11353 if (priv->status & STATUS_SCANNING) {
11354 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11355 ipw_abort_scan(priv);
11358 if (priv->status & STATUS_ASSOCIATED) {
11359 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11360 ipw_disassociate(priv);
11363 ipw_led_shutdown(priv);
11365 /* Wait up to 1s for status to change to not scanning and not
11366 * associated (disassociation can take a while for a ful 802.11
11367 * exchange */
11368 for (i = 1000; i && (priv->status &
11369 (STATUS_DISASSOCIATING |
11370 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11371 udelay(10);
11373 if (priv->status & (STATUS_DISASSOCIATING |
11374 STATUS_ASSOCIATED | STATUS_SCANNING))
11375 IPW_DEBUG_INFO("Still associated or scanning...\n");
11376 else
11377 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11379 /* Attempt to disable the card */
11380 ipw_send_card_disable(priv, 0);
11382 priv->status &= ~STATUS_INIT;
11385 static void ipw_down(struct ipw_priv *priv)
11387 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11389 priv->status |= STATUS_EXIT_PENDING;
11391 if (ipw_is_init(priv))
11392 ipw_deinit(priv);
11394 /* Wipe out the EXIT_PENDING status bit if we are not actually
11395 * exiting the module */
11396 if (!exit_pending)
11397 priv->status &= ~STATUS_EXIT_PENDING;
11399 /* tell the device to stop sending interrupts */
11400 ipw_disable_interrupts(priv);
11402 /* Clear all bits but the RF Kill */
11403 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11404 netif_carrier_off(priv->net_dev);
11405 netif_stop_queue(priv->net_dev);
11407 ipw_stop_nic(priv);
11409 ipw_led_radio_off(priv);
11412 static void ipw_bg_down(struct work_struct *work)
11414 struct ipw_priv *priv =
11415 container_of(work, struct ipw_priv, down);
11416 mutex_lock(&priv->mutex);
11417 ipw_down(priv);
11418 mutex_unlock(&priv->mutex);
11421 /* Called by register_netdev() */
11422 static int ipw_net_init(struct net_device *dev)
11424 struct ipw_priv *priv = ieee80211_priv(dev);
11425 mutex_lock(&priv->mutex);
11427 if (ipw_up(priv)) {
11428 mutex_unlock(&priv->mutex);
11429 return -EIO;
11432 mutex_unlock(&priv->mutex);
11433 return 0;
11436 /* PCI driver stuff */
11437 static struct pci_device_id card_ids[] = {
11438 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11439 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11440 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11441 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11442 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11443 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11444 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11445 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11446 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11447 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11448 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11449 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11450 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11451 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11452 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11453 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11454 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11455 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11456 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11457 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11458 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11459 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11461 /* required last entry */
11462 {0,}
11465 MODULE_DEVICE_TABLE(pci, card_ids);
11467 static struct attribute *ipw_sysfs_entries[] = {
11468 &dev_attr_rf_kill.attr,
11469 &dev_attr_direct_dword.attr,
11470 &dev_attr_indirect_byte.attr,
11471 &dev_attr_indirect_dword.attr,
11472 &dev_attr_mem_gpio_reg.attr,
11473 &dev_attr_command_event_reg.attr,
11474 &dev_attr_nic_type.attr,
11475 &dev_attr_status.attr,
11476 &dev_attr_cfg.attr,
11477 &dev_attr_error.attr,
11478 &dev_attr_event_log.attr,
11479 &dev_attr_cmd_log.attr,
11480 &dev_attr_eeprom_delay.attr,
11481 &dev_attr_ucode_version.attr,
11482 &dev_attr_rtc.attr,
11483 &dev_attr_scan_age.attr,
11484 &dev_attr_led.attr,
11485 &dev_attr_speed_scan.attr,
11486 &dev_attr_net_stats.attr,
11487 &dev_attr_channels.attr,
11488 #ifdef CONFIG_IPW2200_PROMISCUOUS
11489 &dev_attr_rtap_iface.attr,
11490 &dev_attr_rtap_filter.attr,
11491 #endif
11492 NULL
11495 static struct attribute_group ipw_attribute_group = {
11496 .name = NULL, /* put in device directory */
11497 .attrs = ipw_sysfs_entries,
11500 #ifdef CONFIG_IPW2200_PROMISCUOUS
11501 static int ipw_prom_open(struct net_device *dev)
11503 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11504 struct ipw_priv *priv = prom_priv->priv;
11506 IPW_DEBUG_INFO("prom dev->open\n");
11507 netif_carrier_off(dev);
11508 netif_stop_queue(dev);
11510 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11511 priv->sys_config.accept_all_data_frames = 1;
11512 priv->sys_config.accept_non_directed_frames = 1;
11513 priv->sys_config.accept_all_mgmt_bcpr = 1;
11514 priv->sys_config.accept_all_mgmt_frames = 1;
11516 ipw_send_system_config(priv);
11519 return 0;
11522 static int ipw_prom_stop(struct net_device *dev)
11524 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11525 struct ipw_priv *priv = prom_priv->priv;
11527 IPW_DEBUG_INFO("prom dev->stop\n");
11529 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11530 priv->sys_config.accept_all_data_frames = 0;
11531 priv->sys_config.accept_non_directed_frames = 0;
11532 priv->sys_config.accept_all_mgmt_bcpr = 0;
11533 priv->sys_config.accept_all_mgmt_frames = 0;
11535 ipw_send_system_config(priv);
11538 return 0;
11541 static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11543 IPW_DEBUG_INFO("prom dev->xmit\n");
11544 netif_stop_queue(dev);
11545 return -EOPNOTSUPP;
11548 static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
11550 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11551 return &prom_priv->ieee->stats;
11554 static int ipw_prom_alloc(struct ipw_priv *priv)
11556 int rc = 0;
11558 if (priv->prom_net_dev)
11559 return -EPERM;
11561 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11562 if (priv->prom_net_dev == NULL)
11563 return -ENOMEM;
11565 priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11566 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11567 priv->prom_priv->priv = priv;
11569 strcpy(priv->prom_net_dev->name, "rtap%d");
11571 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11572 priv->prom_net_dev->open = ipw_prom_open;
11573 priv->prom_net_dev->stop = ipw_prom_stop;
11574 priv->prom_net_dev->get_stats = ipw_prom_get_stats;
11575 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11577 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11579 rc = register_netdev(priv->prom_net_dev);
11580 if (rc) {
11581 free_ieee80211(priv->prom_net_dev);
11582 priv->prom_net_dev = NULL;
11583 return rc;
11586 return 0;
11589 static void ipw_prom_free(struct ipw_priv *priv)
11591 if (!priv->prom_net_dev)
11592 return;
11594 unregister_netdev(priv->prom_net_dev);
11595 free_ieee80211(priv->prom_net_dev);
11597 priv->prom_net_dev = NULL;
11600 #endif
11603 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11605 int err = 0;
11606 struct net_device *net_dev;
11607 void __iomem *base;
11608 u32 length, val;
11609 struct ipw_priv *priv;
11610 int i;
11612 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11613 if (net_dev == NULL) {
11614 err = -ENOMEM;
11615 goto out;
11618 priv = ieee80211_priv(net_dev);
11619 priv->ieee = netdev_priv(net_dev);
11621 priv->net_dev = net_dev;
11622 priv->pci_dev = pdev;
11623 ipw_debug_level = debug;
11624 spin_lock_init(&priv->irq_lock);
11625 spin_lock_init(&priv->lock);
11626 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11627 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11629 mutex_init(&priv->mutex);
11630 if (pci_enable_device(pdev)) {
11631 err = -ENODEV;
11632 goto out_free_ieee80211;
11635 pci_set_master(pdev);
11637 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11638 if (!err)
11639 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11640 if (err) {
11641 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11642 goto out_pci_disable_device;
11645 pci_set_drvdata(pdev, priv);
11647 err = pci_request_regions(pdev, DRV_NAME);
11648 if (err)
11649 goto out_pci_disable_device;
11651 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11652 * PCI Tx retries from interfering with C3 CPU state */
11653 pci_read_config_dword(pdev, 0x40, &val);
11654 if ((val & 0x0000ff00) != 0)
11655 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11657 length = pci_resource_len(pdev, 0);
11658 priv->hw_len = length;
11660 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
11661 if (!base) {
11662 err = -ENODEV;
11663 goto out_pci_release_regions;
11666 priv->hw_base = base;
11667 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11668 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11670 err = ipw_setup_deferred_work(priv);
11671 if (err) {
11672 IPW_ERROR("Unable to setup deferred work\n");
11673 goto out_iounmap;
11676 ipw_sw_reset(priv, 1);
11678 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11679 if (err) {
11680 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11681 goto out_destroy_workqueue;
11684 SET_NETDEV_DEV(net_dev, &pdev->dev);
11686 mutex_lock(&priv->mutex);
11688 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11689 priv->ieee->set_security = shim__set_security;
11690 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11692 #ifdef CONFIG_IPW2200_QOS
11693 priv->ieee->is_qos_active = ipw_is_qos_active;
11694 priv->ieee->handle_probe_response = ipw_handle_beacon;
11695 priv->ieee->handle_beacon = ipw_handle_probe_response;
11696 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11697 #endif /* CONFIG_IPW2200_QOS */
11699 priv->ieee->perfect_rssi = -20;
11700 priv->ieee->worst_rssi = -85;
11702 net_dev->open = ipw_net_open;
11703 net_dev->stop = ipw_net_stop;
11704 net_dev->init = ipw_net_init;
11705 net_dev->get_stats = ipw_net_get_stats;
11706 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11707 net_dev->set_mac_address = ipw_net_set_mac_address;
11708 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11709 net_dev->wireless_data = &priv->wireless_data;
11710 net_dev->wireless_handlers = &ipw_wx_handler_def;
11711 net_dev->ethtool_ops = &ipw_ethtool_ops;
11712 net_dev->irq = pdev->irq;
11713 net_dev->base_addr = (unsigned long)priv->hw_base;
11714 net_dev->mem_start = pci_resource_start(pdev, 0);
11715 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11717 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11718 if (err) {
11719 IPW_ERROR("failed to create sysfs device attributes\n");
11720 mutex_unlock(&priv->mutex);
11721 goto out_release_irq;
11724 mutex_unlock(&priv->mutex);
11725 err = register_netdev(net_dev);
11726 if (err) {
11727 IPW_ERROR("failed to register network device\n");
11728 goto out_remove_sysfs;
11731 #ifdef CONFIG_IPW2200_PROMISCUOUS
11732 if (rtap_iface) {
11733 err = ipw_prom_alloc(priv);
11734 if (err) {
11735 IPW_ERROR("Failed to register promiscuous network "
11736 "device (error %d).\n", err);
11737 unregister_netdev(priv->net_dev);
11738 goto out_remove_sysfs;
11741 #endif
11743 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11744 "channels, %d 802.11a channels)\n",
11745 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11746 priv->ieee->geo.a_channels);
11748 return 0;
11750 out_remove_sysfs:
11751 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11752 out_release_irq:
11753 free_irq(pdev->irq, priv);
11754 out_destroy_workqueue:
11755 destroy_workqueue(priv->workqueue);
11756 priv->workqueue = NULL;
11757 out_iounmap:
11758 iounmap(priv->hw_base);
11759 out_pci_release_regions:
11760 pci_release_regions(pdev);
11761 out_pci_disable_device:
11762 pci_disable_device(pdev);
11763 pci_set_drvdata(pdev, NULL);
11764 out_free_ieee80211:
11765 free_ieee80211(priv->net_dev);
11766 out:
11767 return err;
11770 static void ipw_pci_remove(struct pci_dev *pdev)
11772 struct ipw_priv *priv = pci_get_drvdata(pdev);
11773 struct list_head *p, *q;
11774 int i;
11776 if (!priv)
11777 return;
11779 mutex_lock(&priv->mutex);
11781 priv->status |= STATUS_EXIT_PENDING;
11782 ipw_down(priv);
11783 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11785 mutex_unlock(&priv->mutex);
11787 unregister_netdev(priv->net_dev);
11789 if (priv->rxq) {
11790 ipw_rx_queue_free(priv, priv->rxq);
11791 priv->rxq = NULL;
11793 ipw_tx_queue_free(priv);
11795 if (priv->cmdlog) {
11796 kfree(priv->cmdlog);
11797 priv->cmdlog = NULL;
11799 /* ipw_down will ensure that there is no more pending work
11800 * in the workqueue's, so we can safely remove them now. */
11801 cancel_delayed_work(&priv->adhoc_check);
11802 cancel_delayed_work(&priv->gather_stats);
11803 cancel_delayed_work(&priv->request_scan);
11804 cancel_delayed_work(&priv->scan_event);
11805 cancel_delayed_work(&priv->rf_kill);
11806 cancel_delayed_work(&priv->scan_check);
11807 destroy_workqueue(priv->workqueue);
11808 priv->workqueue = NULL;
11810 /* Free MAC hash list for ADHOC */
11811 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11812 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11813 list_del(p);
11814 kfree(list_entry(p, struct ipw_ibss_seq, list));
11818 kfree(priv->error);
11819 priv->error = NULL;
11821 #ifdef CONFIG_IPW2200_PROMISCUOUS
11822 ipw_prom_free(priv);
11823 #endif
11825 free_irq(pdev->irq, priv);
11826 iounmap(priv->hw_base);
11827 pci_release_regions(pdev);
11828 pci_disable_device(pdev);
11829 pci_set_drvdata(pdev, NULL);
11830 free_ieee80211(priv->net_dev);
11831 free_firmware();
11834 #ifdef CONFIG_PM
11835 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11837 struct ipw_priv *priv = pci_get_drvdata(pdev);
11838 struct net_device *dev = priv->net_dev;
11840 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11842 /* Take down the device; powers it off, etc. */
11843 ipw_down(priv);
11845 /* Remove the PRESENT state of the device */
11846 netif_device_detach(dev);
11848 pci_save_state(pdev);
11849 pci_disable_device(pdev);
11850 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11852 return 0;
11855 static int ipw_pci_resume(struct pci_dev *pdev)
11857 struct ipw_priv *priv = pci_get_drvdata(pdev);
11858 struct net_device *dev = priv->net_dev;
11859 int err;
11860 u32 val;
11862 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11864 pci_set_power_state(pdev, PCI_D0);
11865 err = pci_enable_device(pdev);
11866 if (err) {
11867 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11868 dev->name);
11869 return err;
11871 pci_restore_state(pdev);
11874 * Suspend/Resume resets the PCI configuration space, so we have to
11875 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11876 * from interfering with C3 CPU state. pci_restore_state won't help
11877 * here since it only restores the first 64 bytes pci config header.
11879 pci_read_config_dword(pdev, 0x40, &val);
11880 if ((val & 0x0000ff00) != 0)
11881 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11883 /* Set the device back into the PRESENT state; this will also wake
11884 * the queue of needed */
11885 netif_device_attach(dev);
11887 /* Bring the device back up */
11888 queue_work(priv->workqueue, &priv->up);
11890 return 0;
11892 #endif
11894 static void ipw_pci_shutdown(struct pci_dev *pdev)
11896 struct ipw_priv *priv = pci_get_drvdata(pdev);
11898 /* Take down the device; powers it off, etc. */
11899 ipw_down(priv);
11901 pci_disable_device(pdev);
11904 /* driver initialization stuff */
11905 static struct pci_driver ipw_driver = {
11906 .name = DRV_NAME,
11907 .id_table = card_ids,
11908 .probe = ipw_pci_probe,
11909 .remove = __devexit_p(ipw_pci_remove),
11910 #ifdef CONFIG_PM
11911 .suspend = ipw_pci_suspend,
11912 .resume = ipw_pci_resume,
11913 #endif
11914 .shutdown = ipw_pci_shutdown,
11917 static int __init ipw_init(void)
11919 int ret;
11921 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11922 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11924 ret = pci_register_driver(&ipw_driver);
11925 if (ret) {
11926 IPW_ERROR("Unable to initialize PCI module\n");
11927 return ret;
11930 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11931 if (ret) {
11932 IPW_ERROR("Unable to create driver sysfs file\n");
11933 pci_unregister_driver(&ipw_driver);
11934 return ret;
11937 return ret;
11940 static void __exit ipw_exit(void)
11942 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11943 pci_unregister_driver(&ipw_driver);
11946 module_param(disable, int, 0444);
11947 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11949 module_param(associate, int, 0444);
11950 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11952 module_param(auto_create, int, 0444);
11953 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11955 module_param(led, int, 0444);
11956 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11958 module_param(debug, int, 0444);
11959 MODULE_PARM_DESC(debug, "debug output mask");
11961 module_param(channel, int, 0444);
11962 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11964 #ifdef CONFIG_IPW2200_PROMISCUOUS
11965 module_param(rtap_iface, int, 0444);
11966 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11967 #endif
11969 #ifdef CONFIG_IPW2200_QOS
11970 module_param(qos_enable, int, 0444);
11971 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11973 module_param(qos_burst_enable, int, 0444);
11974 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11976 module_param(qos_no_ack_mask, int, 0444);
11977 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11979 module_param(burst_duration_CCK, int, 0444);
11980 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11982 module_param(burst_duration_OFDM, int, 0444);
11983 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11984 #endif /* CONFIG_IPW2200_QOS */
11986 #ifdef CONFIG_IPW2200_MONITOR
11987 module_param(mode, int, 0444);
11988 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11989 #else
11990 module_param(mode, int, 0444);
11991 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11992 #endif
11994 module_param(bt_coexist, int, 0444);
11995 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11997 module_param(hwcrypto, int, 0444);
11998 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12000 module_param(cmdlog, int, 0444);
12001 MODULE_PARM_DESC(cmdlog,
12002 "allocate a ring buffer for logging firmware commands");
12004 module_param(roaming, int, 0444);
12005 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12007 module_param(antenna, int, 0444);
12008 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12010 module_exit(ipw_exit);
12011 module_init(ipw_init);