iwlwifi: remove useless network and duplicate checking
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / wireless / ipw2200.c
blob6e704608947c775f9f89267d81667d3fffcac24c
1 /******************************************************************************
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 ******************************************************************************/
33 #include "ipw2200.h"
34 #include <linux/version.h>
37 #ifndef KBUILD_EXTMOD
38 #define VK "k"
39 #else
40 #define VK
41 #endif
43 #ifdef CONFIG_IPW2200_DEBUG
44 #define VD "d"
45 #else
46 #define VD
47 #endif
49 #ifdef CONFIG_IPW2200_MONITOR
50 #define VM "m"
51 #else
52 #define VM
53 #endif
55 #ifdef CONFIG_IPW2200_PROMISCUOUS
56 #define VP "p"
57 #else
58 #define VP
59 #endif
61 #ifdef CONFIG_IPW2200_RADIOTAP
62 #define VR "r"
63 #else
64 #define VR
65 #endif
67 #ifdef CONFIG_IPW2200_QOS
68 #define VQ "q"
69 #else
70 #define VQ
71 #endif
73 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
74 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
76 #define DRV_VERSION IPW2200_VERSION
78 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_VERSION(DRV_VERSION);
82 MODULE_AUTHOR(DRV_COPYRIGHT);
83 MODULE_LICENSE("GPL");
85 static int cmdlog = 0;
86 static int debug = 0;
87 static int channel = 0;
88 static int mode = 0;
90 static u32 ipw_debug_level;
91 static int associate = 1;
92 static int auto_create = 1;
93 static int led = 0;
94 static int disable = 0;
95 static int bt_coexist = 0;
96 static int hwcrypto = 0;
97 static int roaming = 1;
98 static const char ipw_modes[] = {
99 'a', 'b', 'g', '?'
101 static int antenna = CFG_SYS_ANTENNA_BOTH;
103 #ifdef CONFIG_IPW2200_PROMISCUOUS
104 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
105 #endif
108 #ifdef CONFIG_IPW2200_QOS
109 static int qos_enable = 0;
110 static int qos_burst_enable = 0;
111 static int qos_no_ack_mask = 0;
112 static int burst_duration_CCK = 0;
113 static int burst_duration_OFDM = 0;
115 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
116 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
117 QOS_TX3_CW_MIN_OFDM},
118 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
119 QOS_TX3_CW_MAX_OFDM},
120 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
121 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
122 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
123 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
126 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
127 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
128 QOS_TX3_CW_MIN_CCK},
129 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
130 QOS_TX3_CW_MAX_CCK},
131 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
132 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
133 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
134 QOS_TX3_TXOP_LIMIT_CCK}
137 static struct ieee80211_qos_parameters def_parameters_OFDM = {
138 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
139 DEF_TX3_CW_MIN_OFDM},
140 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
141 DEF_TX3_CW_MAX_OFDM},
142 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
143 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
144 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
145 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
148 static struct ieee80211_qos_parameters def_parameters_CCK = {
149 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
150 DEF_TX3_CW_MIN_CCK},
151 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
152 DEF_TX3_CW_MAX_CCK},
153 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
154 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
155 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
156 DEF_TX3_TXOP_LIMIT_CCK}
159 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
161 static int from_priority_to_tx_queue[] = {
162 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
163 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
166 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
168 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
169 *qos_param);
170 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
171 *qos_param);
172 #endif /* CONFIG_IPW2200_QOS */
174 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
175 static void ipw_remove_current_network(struct ipw_priv *priv);
176 static void ipw_rx(struct ipw_priv *priv);
177 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
178 struct clx2_tx_queue *txq, int qindex);
179 static int ipw_queue_reset(struct ipw_priv *priv);
181 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
182 int len, int sync);
184 static void ipw_tx_queue_free(struct ipw_priv *);
186 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
187 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
188 static void ipw_rx_queue_replenish(void *);
189 static int ipw_up(struct ipw_priv *);
190 static void ipw_bg_up(struct work_struct *work);
191 static void ipw_down(struct ipw_priv *);
192 static void ipw_bg_down(struct work_struct *work);
193 static int ipw_config(struct ipw_priv *);
194 static int init_supported_rates(struct ipw_priv *priv,
195 struct ipw_supported_rates *prates);
196 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
197 static void ipw_send_wep_keys(struct ipw_priv *, int);
199 static int snprint_line(char *buf, size_t count,
200 const u8 * data, u32 len, u32 ofs)
202 int out, i, j, l;
203 char c;
205 out = snprintf(buf, count, "%08X", ofs);
207 for (l = 0, i = 0; i < 2; i++) {
208 out += snprintf(buf + out, count - out, " ");
209 for (j = 0; j < 8 && l < len; j++, l++)
210 out += snprintf(buf + out, count - out, "%02X ",
211 data[(i * 8 + j)]);
212 for (; j < 8; j++)
213 out += snprintf(buf + out, count - out, " ");
216 out += snprintf(buf + out, count - out, " ");
217 for (l = 0, i = 0; i < 2; i++) {
218 out += snprintf(buf + out, count - out, " ");
219 for (j = 0; j < 8 && l < len; j++, l++) {
220 c = data[(i * 8 + j)];
221 if (!isascii(c) || !isprint(c))
222 c = '.';
224 out += snprintf(buf + out, count - out, "%c", c);
227 for (; j < 8; j++)
228 out += snprintf(buf + out, count - out, " ");
231 return out;
234 static void printk_buf(int level, const u8 * data, u32 len)
236 char line[81];
237 u32 ofs = 0;
238 if (!(ipw_debug_level & level))
239 return;
241 while (len) {
242 snprint_line(line, sizeof(line), &data[ofs],
243 min(len, 16U), ofs);
244 printk(KERN_DEBUG "%s\n", line);
245 ofs += 16;
246 len -= min(len, 16U);
250 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
252 size_t out = size;
253 u32 ofs = 0;
254 int total = 0;
256 while (size && len) {
257 out = snprint_line(output, size, &data[ofs],
258 min_t(size_t, len, 16U), ofs);
260 ofs += 16;
261 output += out;
262 size -= out;
263 len -= min_t(size_t, len, 16U);
264 total += out;
266 return total;
269 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
270 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
271 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
273 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
274 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
275 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
277 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
278 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
279 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
281 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
282 __LINE__, (u32) (b), (u32) (c));
283 _ipw_write_reg8(a, b, c);
286 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
287 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
288 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
290 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
291 __LINE__, (u32) (b), (u32) (c));
292 _ipw_write_reg16(a, b, c);
295 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
296 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
297 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
299 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
300 __LINE__, (u32) (b), (u32) (c));
301 _ipw_write_reg32(a, b, c);
304 /* 8-bit direct write (low 4K) */
305 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
307 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
308 #define ipw_write8(ipw, ofs, val) \
309 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
310 _ipw_write8(ipw, ofs, val)
312 /* 16-bit direct write (low 4K) */
313 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
315 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
316 #define ipw_write16(ipw, ofs, val) \
317 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
318 _ipw_write16(ipw, ofs, val)
320 /* 32-bit direct write (low 4K) */
321 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
323 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
324 #define ipw_write32(ipw, ofs, val) \
325 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
326 _ipw_write32(ipw, ofs, val)
328 /* 8-bit direct read (low 4K) */
329 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
331 /* 8-bit direct read (low 4K), with debug wrapper */
332 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
334 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
335 return _ipw_read8(ipw, ofs);
338 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
339 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
341 /* 16-bit direct read (low 4K) */
342 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
344 /* 16-bit direct read (low 4K), with debug wrapper */
345 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
347 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
348 return _ipw_read16(ipw, ofs);
351 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
352 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
354 /* 32-bit direct read (low 4K) */
355 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
357 /* 32-bit direct read (low 4K), with debug wrapper */
358 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
360 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
361 return _ipw_read32(ipw, ofs);
364 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
365 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
367 /* multi-byte read (above 4K), with debug wrapper */
368 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
369 static inline void __ipw_read_indirect(const char *f, int l,
370 struct ipw_priv *a, u32 b, u8 * c, int d)
372 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
374 _ipw_read_indirect(a, b, c, d);
377 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
378 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
380 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
381 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
382 int num);
383 #define ipw_write_indirect(a, b, c, d) \
384 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
385 _ipw_write_indirect(a, b, c, d)
387 /* 32-bit indirect write (above 4K) */
388 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
390 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
391 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
392 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
395 /* 8-bit indirect write (above 4K) */
396 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
398 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
399 u32 dif_len = reg - aligned_addr;
401 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
402 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
403 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
406 /* 16-bit indirect write (above 4K) */
407 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
409 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
410 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
412 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
413 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
414 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
417 /* 8-bit indirect read (above 4K) */
418 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
420 u32 word;
421 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
422 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
423 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
424 return (word >> ((reg & 0x3) * 8)) & 0xff;
427 /* 32-bit indirect read (above 4K) */
428 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
430 u32 value;
432 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
434 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
435 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
436 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
437 return value;
440 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
441 /* for area above 1st 4K of SRAM/reg space */
442 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
443 int num)
445 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
446 u32 dif_len = addr - aligned_addr;
447 u32 i;
449 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
451 if (num <= 0) {
452 return;
455 /* Read the first dword (or portion) byte by byte */
456 if (unlikely(dif_len)) {
457 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
458 /* Start reading at aligned_addr + dif_len */
459 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
460 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
461 aligned_addr += 4;
464 /* Read all of the middle dwords as dwords, with auto-increment */
465 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
466 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
467 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
469 /* Read the last dword (or portion) byte by byte */
470 if (unlikely(num)) {
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472 for (i = 0; num > 0; i++, num--)
473 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
477 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
478 /* for area above 1st 4K of SRAM/reg space */
479 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
480 int num)
482 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
483 u32 dif_len = addr - aligned_addr;
484 u32 i;
486 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
488 if (num <= 0) {
489 return;
492 /* Write the first dword (or portion) byte by byte */
493 if (unlikely(dif_len)) {
494 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
495 /* Start writing at aligned_addr + dif_len */
496 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
497 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
498 aligned_addr += 4;
501 /* Write all of the middle dwords as dwords, with auto-increment */
502 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
503 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
504 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
506 /* Write the last dword (or portion) byte by byte */
507 if (unlikely(num)) {
508 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
509 for (i = 0; num > 0; i++, num--, buf++)
510 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
514 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
515 /* for 1st 4K of SRAM/regs space */
516 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
517 int num)
519 memcpy_toio((priv->hw_base + addr), buf, num);
522 /* Set bit(s) in low 4K of SRAM/regs */
523 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
525 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
528 /* Clear bit(s) in low 4K of SRAM/regs */
529 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
531 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
534 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
536 if (priv->status & STATUS_INT_ENABLED)
537 return;
538 priv->status |= STATUS_INT_ENABLED;
539 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
542 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
544 if (!(priv->status & STATUS_INT_ENABLED))
545 return;
546 priv->status &= ~STATUS_INT_ENABLED;
547 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
550 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
552 unsigned long flags;
554 spin_lock_irqsave(&priv->irq_lock, flags);
555 __ipw_enable_interrupts(priv);
556 spin_unlock_irqrestore(&priv->irq_lock, flags);
559 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
561 unsigned long flags;
563 spin_lock_irqsave(&priv->irq_lock, flags);
564 __ipw_disable_interrupts(priv);
565 spin_unlock_irqrestore(&priv->irq_lock, flags);
568 static char *ipw_error_desc(u32 val)
570 switch (val) {
571 case IPW_FW_ERROR_OK:
572 return "ERROR_OK";
573 case IPW_FW_ERROR_FAIL:
574 return "ERROR_FAIL";
575 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
576 return "MEMORY_UNDERFLOW";
577 case IPW_FW_ERROR_MEMORY_OVERFLOW:
578 return "MEMORY_OVERFLOW";
579 case IPW_FW_ERROR_BAD_PARAM:
580 return "BAD_PARAM";
581 case IPW_FW_ERROR_BAD_CHECKSUM:
582 return "BAD_CHECKSUM";
583 case IPW_FW_ERROR_NMI_INTERRUPT:
584 return "NMI_INTERRUPT";
585 case IPW_FW_ERROR_BAD_DATABASE:
586 return "BAD_DATABASE";
587 case IPW_FW_ERROR_ALLOC_FAIL:
588 return "ALLOC_FAIL";
589 case IPW_FW_ERROR_DMA_UNDERRUN:
590 return "DMA_UNDERRUN";
591 case IPW_FW_ERROR_DMA_STATUS:
592 return "DMA_STATUS";
593 case IPW_FW_ERROR_DINO_ERROR:
594 return "DINO_ERROR";
595 case IPW_FW_ERROR_EEPROM_ERROR:
596 return "EEPROM_ERROR";
597 case IPW_FW_ERROR_SYSASSERT:
598 return "SYSASSERT";
599 case IPW_FW_ERROR_FATAL_ERROR:
600 return "FATAL_ERROR";
601 default:
602 return "UNKNOWN_ERROR";
606 static void ipw_dump_error_log(struct ipw_priv *priv,
607 struct ipw_fw_error *error)
609 u32 i;
611 if (!error) {
612 IPW_ERROR("Error allocating and capturing error log. "
613 "Nothing to dump.\n");
614 return;
617 IPW_ERROR("Start IPW Error Log Dump:\n");
618 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
619 error->status, error->config);
621 for (i = 0; i < error->elem_len; i++)
622 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
623 ipw_error_desc(error->elem[i].desc),
624 error->elem[i].time,
625 error->elem[i].blink1,
626 error->elem[i].blink2,
627 error->elem[i].link1,
628 error->elem[i].link2, error->elem[i].data);
629 for (i = 0; i < error->log_len; i++)
630 IPW_ERROR("%i\t0x%08x\t%i\n",
631 error->log[i].time,
632 error->log[i].data, error->log[i].event);
635 static inline int ipw_is_init(struct ipw_priv *priv)
637 return (priv->status & STATUS_INIT) ? 1 : 0;
640 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
642 u32 addr, field_info, field_len, field_count, total_len;
644 IPW_DEBUG_ORD("ordinal = %i\n", ord);
646 if (!priv || !val || !len) {
647 IPW_DEBUG_ORD("Invalid argument\n");
648 return -EINVAL;
651 /* verify device ordinal tables have been initialized */
652 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
653 IPW_DEBUG_ORD("Access ordinals before initialization\n");
654 return -EINVAL;
657 switch (IPW_ORD_TABLE_ID_MASK & ord) {
658 case IPW_ORD_TABLE_0_MASK:
660 * TABLE 0: Direct access to a table of 32 bit values
662 * This is a very simple table with the data directly
663 * read from the table
666 /* remove the table id from the ordinal */
667 ord &= IPW_ORD_TABLE_VALUE_MASK;
669 /* boundary check */
670 if (ord > priv->table0_len) {
671 IPW_DEBUG_ORD("ordinal value (%i) longer then "
672 "max (%i)\n", ord, priv->table0_len);
673 return -EINVAL;
676 /* verify we have enough room to store the value */
677 if (*len < sizeof(u32)) {
678 IPW_DEBUG_ORD("ordinal buffer length too small, "
679 "need %zd\n", sizeof(u32));
680 return -EINVAL;
683 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
684 ord, priv->table0_addr + (ord << 2));
686 *len = sizeof(u32);
687 ord <<= 2;
688 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
689 break;
691 case IPW_ORD_TABLE_1_MASK:
693 * TABLE 1: Indirect access to a table of 32 bit values
695 * This is a fairly large table of u32 values each
696 * representing starting addr for the data (which is
697 * also a u32)
700 /* remove the table id from the ordinal */
701 ord &= IPW_ORD_TABLE_VALUE_MASK;
703 /* boundary check */
704 if (ord > priv->table1_len) {
705 IPW_DEBUG_ORD("ordinal value too long\n");
706 return -EINVAL;
709 /* verify we have enough room to store the value */
710 if (*len < sizeof(u32)) {
711 IPW_DEBUG_ORD("ordinal buffer length too small, "
712 "need %zd\n", sizeof(u32));
713 return -EINVAL;
716 *((u32 *) val) =
717 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
718 *len = sizeof(u32);
719 break;
721 case IPW_ORD_TABLE_2_MASK:
723 * TABLE 2: Indirect access to a table of variable sized values
725 * This table consist of six values, each containing
726 * - dword containing the starting offset of the data
727 * - dword containing the lengh in the first 16bits
728 * and the count in the second 16bits
731 /* remove the table id from the ordinal */
732 ord &= IPW_ORD_TABLE_VALUE_MASK;
734 /* boundary check */
735 if (ord > priv->table2_len) {
736 IPW_DEBUG_ORD("ordinal value too long\n");
737 return -EINVAL;
740 /* get the address of statistic */
741 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
743 /* get the second DW of statistics ;
744 * two 16-bit words - first is length, second is count */
745 field_info =
746 ipw_read_reg32(priv,
747 priv->table2_addr + (ord << 3) +
748 sizeof(u32));
750 /* get each entry length */
751 field_len = *((u16 *) & field_info);
753 /* get number of entries */
754 field_count = *(((u16 *) & field_info) + 1);
756 /* abort if not enought memory */
757 total_len = field_len * field_count;
758 if (total_len > *len) {
759 *len = total_len;
760 return -EINVAL;
763 *len = total_len;
764 if (!total_len)
765 return 0;
767 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
768 "field_info = 0x%08x\n",
769 addr, total_len, field_info);
770 ipw_read_indirect(priv, addr, val, total_len);
771 break;
773 default:
774 IPW_DEBUG_ORD("Invalid ordinal!\n");
775 return -EINVAL;
779 return 0;
782 static void ipw_init_ordinals(struct ipw_priv *priv)
784 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
785 priv->table0_len = ipw_read32(priv, priv->table0_addr);
787 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
788 priv->table0_addr, priv->table0_len);
790 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
791 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
793 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
794 priv->table1_addr, priv->table1_len);
796 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
797 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
798 priv->table2_len &= 0x0000ffff; /* use first two bytes */
800 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
801 priv->table2_addr, priv->table2_len);
805 static u32 ipw_register_toggle(u32 reg)
807 reg &= ~IPW_START_STANDBY;
808 if (reg & IPW_GATE_ODMA)
809 reg &= ~IPW_GATE_ODMA;
810 if (reg & IPW_GATE_IDMA)
811 reg &= ~IPW_GATE_IDMA;
812 if (reg & IPW_GATE_ADMA)
813 reg &= ~IPW_GATE_ADMA;
814 return reg;
818 * LED behavior:
819 * - On radio ON, turn on any LEDs that require to be on during start
820 * - On initialization, start unassociated blink
821 * - On association, disable unassociated blink
822 * - On disassociation, start unassociated blink
823 * - On radio OFF, turn off any LEDs started during radio on
826 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
827 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
828 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
830 static void ipw_led_link_on(struct ipw_priv *priv)
832 unsigned long flags;
833 u32 led;
835 /* If configured to not use LEDs, or nic_type is 1,
836 * then we don't toggle a LINK led */
837 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
838 return;
840 spin_lock_irqsave(&priv->lock, flags);
842 if (!(priv->status & STATUS_RF_KILL_MASK) &&
843 !(priv->status & STATUS_LED_LINK_ON)) {
844 IPW_DEBUG_LED("Link LED On\n");
845 led = ipw_read_reg32(priv, IPW_EVENT_REG);
846 led |= priv->led_association_on;
848 led = ipw_register_toggle(led);
850 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
851 ipw_write_reg32(priv, IPW_EVENT_REG, led);
853 priv->status |= STATUS_LED_LINK_ON;
855 /* If we aren't associated, schedule turning the LED off */
856 if (!(priv->status & STATUS_ASSOCIATED))
857 queue_delayed_work(priv->workqueue,
858 &priv->led_link_off,
859 LD_TIME_LINK_ON);
862 spin_unlock_irqrestore(&priv->lock, flags);
865 static void ipw_bg_led_link_on(struct work_struct *work)
867 struct ipw_priv *priv =
868 container_of(work, struct ipw_priv, led_link_on.work);
869 mutex_lock(&priv->mutex);
870 ipw_led_link_on(priv);
871 mutex_unlock(&priv->mutex);
874 static void ipw_led_link_off(struct ipw_priv *priv)
876 unsigned long flags;
877 u32 led;
879 /* If configured not to use LEDs, or nic type is 1,
880 * then we don't goggle the LINK led. */
881 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
882 return;
884 spin_lock_irqsave(&priv->lock, flags);
886 if (priv->status & STATUS_LED_LINK_ON) {
887 led = ipw_read_reg32(priv, IPW_EVENT_REG);
888 led &= priv->led_association_off;
889 led = ipw_register_toggle(led);
891 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
892 ipw_write_reg32(priv, IPW_EVENT_REG, led);
894 IPW_DEBUG_LED("Link LED Off\n");
896 priv->status &= ~STATUS_LED_LINK_ON;
898 /* If we aren't associated and the radio is on, schedule
899 * turning the LED on (blink while unassociated) */
900 if (!(priv->status & STATUS_RF_KILL_MASK) &&
901 !(priv->status & STATUS_ASSOCIATED))
902 queue_delayed_work(priv->workqueue, &priv->led_link_on,
903 LD_TIME_LINK_OFF);
907 spin_unlock_irqrestore(&priv->lock, flags);
910 static void ipw_bg_led_link_off(struct work_struct *work)
912 struct ipw_priv *priv =
913 container_of(work, struct ipw_priv, led_link_off.work);
914 mutex_lock(&priv->mutex);
915 ipw_led_link_off(priv);
916 mutex_unlock(&priv->mutex);
919 static void __ipw_led_activity_on(struct ipw_priv *priv)
921 u32 led;
923 if (priv->config & CFG_NO_LED)
924 return;
926 if (priv->status & STATUS_RF_KILL_MASK)
927 return;
929 if (!(priv->status & STATUS_LED_ACT_ON)) {
930 led = ipw_read_reg32(priv, IPW_EVENT_REG);
931 led |= priv->led_activity_on;
933 led = ipw_register_toggle(led);
935 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
936 ipw_write_reg32(priv, IPW_EVENT_REG, led);
938 IPW_DEBUG_LED("Activity LED On\n");
940 priv->status |= STATUS_LED_ACT_ON;
942 cancel_delayed_work(&priv->led_act_off);
943 queue_delayed_work(priv->workqueue, &priv->led_act_off,
944 LD_TIME_ACT_ON);
945 } else {
946 /* Reschedule LED off for full time period */
947 cancel_delayed_work(&priv->led_act_off);
948 queue_delayed_work(priv->workqueue, &priv->led_act_off,
949 LD_TIME_ACT_ON);
953 #if 0
954 void ipw_led_activity_on(struct ipw_priv *priv)
956 unsigned long flags;
957 spin_lock_irqsave(&priv->lock, flags);
958 __ipw_led_activity_on(priv);
959 spin_unlock_irqrestore(&priv->lock, flags);
961 #endif /* 0 */
963 static void ipw_led_activity_off(struct ipw_priv *priv)
965 unsigned long flags;
966 u32 led;
968 if (priv->config & CFG_NO_LED)
969 return;
971 spin_lock_irqsave(&priv->lock, flags);
973 if (priv->status & STATUS_LED_ACT_ON) {
974 led = ipw_read_reg32(priv, IPW_EVENT_REG);
975 led &= priv->led_activity_off;
977 led = ipw_register_toggle(led);
979 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
980 ipw_write_reg32(priv, IPW_EVENT_REG, led);
982 IPW_DEBUG_LED("Activity LED Off\n");
984 priv->status &= ~STATUS_LED_ACT_ON;
987 spin_unlock_irqrestore(&priv->lock, flags);
990 static void ipw_bg_led_activity_off(struct work_struct *work)
992 struct ipw_priv *priv =
993 container_of(work, struct ipw_priv, led_act_off.work);
994 mutex_lock(&priv->mutex);
995 ipw_led_activity_off(priv);
996 mutex_unlock(&priv->mutex);
999 static void ipw_led_band_on(struct ipw_priv *priv)
1001 unsigned long flags;
1002 u32 led;
1004 /* Only nic type 1 supports mode LEDs */
1005 if (priv->config & CFG_NO_LED ||
1006 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1007 return;
1009 spin_lock_irqsave(&priv->lock, flags);
1011 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1012 if (priv->assoc_network->mode == IEEE_A) {
1013 led |= priv->led_ofdm_on;
1014 led &= priv->led_association_off;
1015 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1016 } else if (priv->assoc_network->mode == IEEE_G) {
1017 led |= priv->led_ofdm_on;
1018 led |= priv->led_association_on;
1019 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1020 } else {
1021 led &= priv->led_ofdm_off;
1022 led |= priv->led_association_on;
1023 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1026 led = ipw_register_toggle(led);
1028 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1029 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1031 spin_unlock_irqrestore(&priv->lock, flags);
1034 static void ipw_led_band_off(struct ipw_priv *priv)
1036 unsigned long flags;
1037 u32 led;
1039 /* Only nic type 1 supports mode LEDs */
1040 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1041 return;
1043 spin_lock_irqsave(&priv->lock, flags);
1045 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1046 led &= priv->led_ofdm_off;
1047 led &= priv->led_association_off;
1049 led = ipw_register_toggle(led);
1051 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1052 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1054 spin_unlock_irqrestore(&priv->lock, flags);
1057 static void ipw_led_radio_on(struct ipw_priv *priv)
1059 ipw_led_link_on(priv);
1062 static void ipw_led_radio_off(struct ipw_priv *priv)
1064 ipw_led_activity_off(priv);
1065 ipw_led_link_off(priv);
1068 static void ipw_led_link_up(struct ipw_priv *priv)
1070 /* Set the Link Led on for all nic types */
1071 ipw_led_link_on(priv);
1074 static void ipw_led_link_down(struct ipw_priv *priv)
1076 ipw_led_activity_off(priv);
1077 ipw_led_link_off(priv);
1079 if (priv->status & STATUS_RF_KILL_MASK)
1080 ipw_led_radio_off(priv);
1083 static void ipw_led_init(struct ipw_priv *priv)
1085 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1087 /* Set the default PINs for the link and activity leds */
1088 priv->led_activity_on = IPW_ACTIVITY_LED;
1089 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1091 priv->led_association_on = IPW_ASSOCIATED_LED;
1092 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1094 /* Set the default PINs for the OFDM leds */
1095 priv->led_ofdm_on = IPW_OFDM_LED;
1096 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1098 switch (priv->nic_type) {
1099 case EEPROM_NIC_TYPE_1:
1100 /* In this NIC type, the LEDs are reversed.... */
1101 priv->led_activity_on = IPW_ASSOCIATED_LED;
1102 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1103 priv->led_association_on = IPW_ACTIVITY_LED;
1104 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1106 if (!(priv->config & CFG_NO_LED))
1107 ipw_led_band_on(priv);
1109 /* And we don't blink link LEDs for this nic, so
1110 * just return here */
1111 return;
1113 case EEPROM_NIC_TYPE_3:
1114 case EEPROM_NIC_TYPE_2:
1115 case EEPROM_NIC_TYPE_4:
1116 case EEPROM_NIC_TYPE_0:
1117 break;
1119 default:
1120 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1121 priv->nic_type);
1122 priv->nic_type = EEPROM_NIC_TYPE_0;
1123 break;
1126 if (!(priv->config & CFG_NO_LED)) {
1127 if (priv->status & STATUS_ASSOCIATED)
1128 ipw_led_link_on(priv);
1129 else
1130 ipw_led_link_off(priv);
1134 static void ipw_led_shutdown(struct ipw_priv *priv)
1136 ipw_led_activity_off(priv);
1137 ipw_led_link_off(priv);
1138 ipw_led_band_off(priv);
1139 cancel_delayed_work(&priv->led_link_on);
1140 cancel_delayed_work(&priv->led_link_off);
1141 cancel_delayed_work(&priv->led_act_off);
1145 * The following adds a new attribute to the sysfs representation
1146 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1147 * used for controling the debug level.
1149 * See the level definitions in ipw for details.
1151 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1153 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1156 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1157 size_t count)
1159 char *p = (char *)buf;
1160 u32 val;
1162 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1163 p++;
1164 if (p[0] == 'x' || p[0] == 'X')
1165 p++;
1166 val = simple_strtoul(p, &p, 16);
1167 } else
1168 val = simple_strtoul(p, &p, 10);
1169 if (p == buf)
1170 printk(KERN_INFO DRV_NAME
1171 ": %s is not in hex or decimal form.\n", buf);
1172 else
1173 ipw_debug_level = val;
1175 return strnlen(buf, count);
1178 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1179 show_debug_level, store_debug_level);
1181 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1183 /* length = 1st dword in log */
1184 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1187 static void ipw_capture_event_log(struct ipw_priv *priv,
1188 u32 log_len, struct ipw_event *log)
1190 u32 base;
1192 if (log_len) {
1193 base = ipw_read32(priv, IPW_EVENT_LOG);
1194 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1195 (u8 *) log, sizeof(*log) * log_len);
1199 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1201 struct ipw_fw_error *error;
1202 u32 log_len = ipw_get_event_log_len(priv);
1203 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1204 u32 elem_len = ipw_read_reg32(priv, base);
1206 error = kmalloc(sizeof(*error) +
1207 sizeof(*error->elem) * elem_len +
1208 sizeof(*error->log) * log_len, GFP_ATOMIC);
1209 if (!error) {
1210 IPW_ERROR("Memory allocation for firmware error log "
1211 "failed.\n");
1212 return NULL;
1214 error->jiffies = jiffies;
1215 error->status = priv->status;
1216 error->config = priv->config;
1217 error->elem_len = elem_len;
1218 error->log_len = log_len;
1219 error->elem = (struct ipw_error_elem *)error->payload;
1220 error->log = (struct ipw_event *)(error->elem + elem_len);
1222 ipw_capture_event_log(priv, log_len, error->log);
1224 if (elem_len)
1225 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1226 sizeof(*error->elem) * elem_len);
1228 return error;
1231 static ssize_t show_event_log(struct device *d,
1232 struct device_attribute *attr, char *buf)
1234 struct ipw_priv *priv = dev_get_drvdata(d);
1235 u32 log_len = ipw_get_event_log_len(priv);
1236 u32 log_size;
1237 struct ipw_event *log;
1238 u32 len = 0, i;
1240 /* not using min() because of its strict type checking */
1241 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1242 sizeof(*log) * log_len : PAGE_SIZE;
1243 log = kzalloc(log_size, GFP_KERNEL);
1244 if (!log) {
1245 IPW_ERROR("Unable to allocate memory for log\n");
1246 return 0;
1248 log_len = log_size / sizeof(*log);
1249 ipw_capture_event_log(priv, log_len, log);
1251 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1252 for (i = 0; i < log_len; i++)
1253 len += snprintf(buf + len, PAGE_SIZE - len,
1254 "\n%08X%08X%08X",
1255 log[i].time, log[i].event, log[i].data);
1256 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1257 kfree(log);
1258 return len;
1261 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1263 static ssize_t show_error(struct device *d,
1264 struct device_attribute *attr, char *buf)
1266 struct ipw_priv *priv = dev_get_drvdata(d);
1267 u32 len = 0, i;
1268 if (!priv->error)
1269 return 0;
1270 len += snprintf(buf + len, PAGE_SIZE - len,
1271 "%08lX%08X%08X%08X",
1272 priv->error->jiffies,
1273 priv->error->status,
1274 priv->error->config, priv->error->elem_len);
1275 for (i = 0; i < priv->error->elem_len; i++)
1276 len += snprintf(buf + len, PAGE_SIZE - len,
1277 "\n%08X%08X%08X%08X%08X%08X%08X",
1278 priv->error->elem[i].time,
1279 priv->error->elem[i].desc,
1280 priv->error->elem[i].blink1,
1281 priv->error->elem[i].blink2,
1282 priv->error->elem[i].link1,
1283 priv->error->elem[i].link2,
1284 priv->error->elem[i].data);
1286 len += snprintf(buf + len, PAGE_SIZE - len,
1287 "\n%08X", priv->error->log_len);
1288 for (i = 0; i < priv->error->log_len; i++)
1289 len += snprintf(buf + len, PAGE_SIZE - len,
1290 "\n%08X%08X%08X",
1291 priv->error->log[i].time,
1292 priv->error->log[i].event,
1293 priv->error->log[i].data);
1294 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1295 return len;
1298 static ssize_t clear_error(struct device *d,
1299 struct device_attribute *attr,
1300 const char *buf, size_t count)
1302 struct ipw_priv *priv = dev_get_drvdata(d);
1304 kfree(priv->error);
1305 priv->error = NULL;
1306 return count;
1309 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1311 static ssize_t show_cmd_log(struct device *d,
1312 struct device_attribute *attr, char *buf)
1314 struct ipw_priv *priv = dev_get_drvdata(d);
1315 u32 len = 0, i;
1316 if (!priv->cmdlog)
1317 return 0;
1318 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1319 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1320 i = (i + 1) % priv->cmdlog_len) {
1321 len +=
1322 snprintf(buf + len, PAGE_SIZE - len,
1323 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1324 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1325 priv->cmdlog[i].cmd.len);
1326 len +=
1327 snprintk_buf(buf + len, PAGE_SIZE - len,
1328 (u8 *) priv->cmdlog[i].cmd.param,
1329 priv->cmdlog[i].cmd.len);
1330 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1332 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1333 return len;
1336 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1338 #ifdef CONFIG_IPW2200_PROMISCUOUS
1339 static void ipw_prom_free(struct ipw_priv *priv);
1340 static int ipw_prom_alloc(struct ipw_priv *priv);
1341 static ssize_t store_rtap_iface(struct device *d,
1342 struct device_attribute *attr,
1343 const char *buf, size_t count)
1345 struct ipw_priv *priv = dev_get_drvdata(d);
1346 int rc = 0;
1348 if (count < 1)
1349 return -EINVAL;
1351 switch (buf[0]) {
1352 case '0':
1353 if (!rtap_iface)
1354 return count;
1356 if (netif_running(priv->prom_net_dev)) {
1357 IPW_WARNING("Interface is up. Cannot unregister.\n");
1358 return count;
1361 ipw_prom_free(priv);
1362 rtap_iface = 0;
1363 break;
1365 case '1':
1366 if (rtap_iface)
1367 return count;
1369 rc = ipw_prom_alloc(priv);
1370 if (!rc)
1371 rtap_iface = 1;
1372 break;
1374 default:
1375 return -EINVAL;
1378 if (rc) {
1379 IPW_ERROR("Failed to register promiscuous network "
1380 "device (error %d).\n", rc);
1383 return count;
1386 static ssize_t show_rtap_iface(struct device *d,
1387 struct device_attribute *attr,
1388 char *buf)
1390 struct ipw_priv *priv = dev_get_drvdata(d);
1391 if (rtap_iface)
1392 return sprintf(buf, "%s", priv->prom_net_dev->name);
1393 else {
1394 buf[0] = '-';
1395 buf[1] = '1';
1396 buf[2] = '\0';
1397 return 3;
1401 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1402 store_rtap_iface);
1404 static ssize_t store_rtap_filter(struct device *d,
1405 struct device_attribute *attr,
1406 const char *buf, size_t count)
1408 struct ipw_priv *priv = dev_get_drvdata(d);
1410 if (!priv->prom_priv) {
1411 IPW_ERROR("Attempting to set filter without "
1412 "rtap_iface enabled.\n");
1413 return -EPERM;
1416 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1418 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1419 BIT_ARG16(priv->prom_priv->filter));
1421 return count;
1424 static ssize_t show_rtap_filter(struct device *d,
1425 struct device_attribute *attr,
1426 char *buf)
1428 struct ipw_priv *priv = dev_get_drvdata(d);
1429 return sprintf(buf, "0x%04X",
1430 priv->prom_priv ? priv->prom_priv->filter : 0);
1433 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1434 store_rtap_filter);
1435 #endif
1437 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1438 char *buf)
1440 struct ipw_priv *priv = dev_get_drvdata(d);
1441 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1444 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1445 const char *buf, size_t count)
1447 struct ipw_priv *priv = dev_get_drvdata(d);
1448 struct net_device *dev = priv->net_dev;
1449 char buffer[] = "00000000";
1450 unsigned long len =
1451 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1452 unsigned long val;
1453 char *p = buffer;
1455 IPW_DEBUG_INFO("enter\n");
1457 strncpy(buffer, buf, len);
1458 buffer[len] = 0;
1460 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1461 p++;
1462 if (p[0] == 'x' || p[0] == 'X')
1463 p++;
1464 val = simple_strtoul(p, &p, 16);
1465 } else
1466 val = simple_strtoul(p, &p, 10);
1467 if (p == buffer) {
1468 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1469 } else {
1470 priv->ieee->scan_age = val;
1471 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1474 IPW_DEBUG_INFO("exit\n");
1475 return len;
1478 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1480 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1481 char *buf)
1483 struct ipw_priv *priv = dev_get_drvdata(d);
1484 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1487 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1488 const char *buf, size_t count)
1490 struct ipw_priv *priv = dev_get_drvdata(d);
1492 IPW_DEBUG_INFO("enter\n");
1494 if (count == 0)
1495 return 0;
1497 if (*buf == 0) {
1498 IPW_DEBUG_LED("Disabling LED control.\n");
1499 priv->config |= CFG_NO_LED;
1500 ipw_led_shutdown(priv);
1501 } else {
1502 IPW_DEBUG_LED("Enabling LED control.\n");
1503 priv->config &= ~CFG_NO_LED;
1504 ipw_led_init(priv);
1507 IPW_DEBUG_INFO("exit\n");
1508 return count;
1511 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1513 static ssize_t show_status(struct device *d,
1514 struct device_attribute *attr, char *buf)
1516 struct ipw_priv *p = d->driver_data;
1517 return sprintf(buf, "0x%08x\n", (int)p->status);
1520 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1522 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1523 char *buf)
1525 struct ipw_priv *p = d->driver_data;
1526 return sprintf(buf, "0x%08x\n", (int)p->config);
1529 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1531 static ssize_t show_nic_type(struct device *d,
1532 struct device_attribute *attr, char *buf)
1534 struct ipw_priv *priv = d->driver_data;
1535 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1538 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1540 static ssize_t show_ucode_version(struct device *d,
1541 struct device_attribute *attr, char *buf)
1543 u32 len = sizeof(u32), tmp = 0;
1544 struct ipw_priv *p = d->driver_data;
1546 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1547 return 0;
1549 return sprintf(buf, "0x%08x\n", tmp);
1552 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1554 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1555 char *buf)
1557 u32 len = sizeof(u32), tmp = 0;
1558 struct ipw_priv *p = d->driver_data;
1560 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1561 return 0;
1563 return sprintf(buf, "0x%08x\n", tmp);
1566 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1569 * Add a device attribute to view/control the delay between eeprom
1570 * operations.
1572 static ssize_t show_eeprom_delay(struct device *d,
1573 struct device_attribute *attr, char *buf)
1575 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1576 return sprintf(buf, "%i\n", n);
1578 static ssize_t store_eeprom_delay(struct device *d,
1579 struct device_attribute *attr,
1580 const char *buf, size_t count)
1582 struct ipw_priv *p = d->driver_data;
1583 sscanf(buf, "%i", &p->eeprom_delay);
1584 return strnlen(buf, count);
1587 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1588 show_eeprom_delay, store_eeprom_delay);
1590 static ssize_t show_command_event_reg(struct device *d,
1591 struct device_attribute *attr, char *buf)
1593 u32 reg = 0;
1594 struct ipw_priv *p = d->driver_data;
1596 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1597 return sprintf(buf, "0x%08x\n", reg);
1599 static ssize_t store_command_event_reg(struct device *d,
1600 struct device_attribute *attr,
1601 const char *buf, size_t count)
1603 u32 reg;
1604 struct ipw_priv *p = d->driver_data;
1606 sscanf(buf, "%x", &reg);
1607 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1608 return strnlen(buf, count);
1611 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1612 show_command_event_reg, store_command_event_reg);
1614 static ssize_t show_mem_gpio_reg(struct device *d,
1615 struct device_attribute *attr, char *buf)
1617 u32 reg = 0;
1618 struct ipw_priv *p = d->driver_data;
1620 reg = ipw_read_reg32(p, 0x301100);
1621 return sprintf(buf, "0x%08x\n", reg);
1623 static ssize_t store_mem_gpio_reg(struct device *d,
1624 struct device_attribute *attr,
1625 const char *buf, size_t count)
1627 u32 reg;
1628 struct ipw_priv *p = d->driver_data;
1630 sscanf(buf, "%x", &reg);
1631 ipw_write_reg32(p, 0x301100, reg);
1632 return strnlen(buf, count);
1635 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1636 show_mem_gpio_reg, store_mem_gpio_reg);
1638 static ssize_t show_indirect_dword(struct device *d,
1639 struct device_attribute *attr, char *buf)
1641 u32 reg = 0;
1642 struct ipw_priv *priv = d->driver_data;
1644 if (priv->status & STATUS_INDIRECT_DWORD)
1645 reg = ipw_read_reg32(priv, priv->indirect_dword);
1646 else
1647 reg = 0;
1649 return sprintf(buf, "0x%08x\n", reg);
1651 static ssize_t store_indirect_dword(struct device *d,
1652 struct device_attribute *attr,
1653 const char *buf, size_t count)
1655 struct ipw_priv *priv = d->driver_data;
1657 sscanf(buf, "%x", &priv->indirect_dword);
1658 priv->status |= STATUS_INDIRECT_DWORD;
1659 return strnlen(buf, count);
1662 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1663 show_indirect_dword, store_indirect_dword);
1665 static ssize_t show_indirect_byte(struct device *d,
1666 struct device_attribute *attr, char *buf)
1668 u8 reg = 0;
1669 struct ipw_priv *priv = d->driver_data;
1671 if (priv->status & STATUS_INDIRECT_BYTE)
1672 reg = ipw_read_reg8(priv, priv->indirect_byte);
1673 else
1674 reg = 0;
1676 return sprintf(buf, "0x%02x\n", reg);
1678 static ssize_t store_indirect_byte(struct device *d,
1679 struct device_attribute *attr,
1680 const char *buf, size_t count)
1682 struct ipw_priv *priv = d->driver_data;
1684 sscanf(buf, "%x", &priv->indirect_byte);
1685 priv->status |= STATUS_INDIRECT_BYTE;
1686 return strnlen(buf, count);
1689 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1690 show_indirect_byte, store_indirect_byte);
1692 static ssize_t show_direct_dword(struct device *d,
1693 struct device_attribute *attr, char *buf)
1695 u32 reg = 0;
1696 struct ipw_priv *priv = d->driver_data;
1698 if (priv->status & STATUS_DIRECT_DWORD)
1699 reg = ipw_read32(priv, priv->direct_dword);
1700 else
1701 reg = 0;
1703 return sprintf(buf, "0x%08x\n", reg);
1705 static ssize_t store_direct_dword(struct device *d,
1706 struct device_attribute *attr,
1707 const char *buf, size_t count)
1709 struct ipw_priv *priv = d->driver_data;
1711 sscanf(buf, "%x", &priv->direct_dword);
1712 priv->status |= STATUS_DIRECT_DWORD;
1713 return strnlen(buf, count);
1716 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1717 show_direct_dword, store_direct_dword);
1719 static int rf_kill_active(struct ipw_priv *priv)
1721 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1722 priv->status |= STATUS_RF_KILL_HW;
1723 else
1724 priv->status &= ~STATUS_RF_KILL_HW;
1726 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1729 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1730 char *buf)
1732 /* 0 - RF kill not enabled
1733 1 - SW based RF kill active (sysfs)
1734 2 - HW based RF kill active
1735 3 - Both HW and SW baed RF kill active */
1736 struct ipw_priv *priv = d->driver_data;
1737 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1738 (rf_kill_active(priv) ? 0x2 : 0x0);
1739 return sprintf(buf, "%i\n", val);
1742 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1744 if ((disable_radio ? 1 : 0) ==
1745 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1746 return 0;
1748 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1749 disable_radio ? "OFF" : "ON");
1751 if (disable_radio) {
1752 priv->status |= STATUS_RF_KILL_SW;
1754 if (priv->workqueue) {
1755 cancel_delayed_work(&priv->request_scan);
1756 cancel_delayed_work(&priv->request_direct_scan);
1757 cancel_delayed_work(&priv->request_passive_scan);
1758 cancel_delayed_work(&priv->scan_event);
1760 queue_work(priv->workqueue, &priv->down);
1761 } else {
1762 priv->status &= ~STATUS_RF_KILL_SW;
1763 if (rf_kill_active(priv)) {
1764 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1765 "disabled by HW switch\n");
1766 /* Make sure the RF_KILL check timer is running */
1767 cancel_delayed_work(&priv->rf_kill);
1768 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1769 round_jiffies_relative(2 * HZ));
1770 } else
1771 queue_work(priv->workqueue, &priv->up);
1774 return 1;
1777 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1778 const char *buf, size_t count)
1780 struct ipw_priv *priv = d->driver_data;
1782 ipw_radio_kill_sw(priv, buf[0] == '1');
1784 return count;
1787 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1789 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1790 char *buf)
1792 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1793 int pos = 0, len = 0;
1794 if (priv->config & CFG_SPEED_SCAN) {
1795 while (priv->speed_scan[pos] != 0)
1796 len += sprintf(&buf[len], "%d ",
1797 priv->speed_scan[pos++]);
1798 return len + sprintf(&buf[len], "\n");
1801 return sprintf(buf, "0\n");
1804 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1805 const char *buf, size_t count)
1807 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1808 int channel, pos = 0;
1809 const char *p = buf;
1811 /* list of space separated channels to scan, optionally ending with 0 */
1812 while ((channel = simple_strtol(p, NULL, 0))) {
1813 if (pos == MAX_SPEED_SCAN - 1) {
1814 priv->speed_scan[pos] = 0;
1815 break;
1818 if (ieee80211_is_valid_channel(priv->ieee, channel))
1819 priv->speed_scan[pos++] = channel;
1820 else
1821 IPW_WARNING("Skipping invalid channel request: %d\n",
1822 channel);
1823 p = strchr(p, ' ');
1824 if (!p)
1825 break;
1826 while (*p == ' ' || *p == '\t')
1827 p++;
1830 if (pos == 0)
1831 priv->config &= ~CFG_SPEED_SCAN;
1832 else {
1833 priv->speed_scan_pos = 0;
1834 priv->config |= CFG_SPEED_SCAN;
1837 return count;
1840 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1841 store_speed_scan);
1843 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1844 char *buf)
1846 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1847 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1850 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1851 const char *buf, size_t count)
1853 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1854 if (buf[0] == '1')
1855 priv->config |= CFG_NET_STATS;
1856 else
1857 priv->config &= ~CFG_NET_STATS;
1859 return count;
1862 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1863 show_net_stats, store_net_stats);
1865 static ssize_t show_channels(struct device *d,
1866 struct device_attribute *attr,
1867 char *buf)
1869 struct ipw_priv *priv = dev_get_drvdata(d);
1870 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
1871 int len = 0, i;
1873 len = sprintf(&buf[len],
1874 "Displaying %d channels in 2.4Ghz band "
1875 "(802.11bg):\n", geo->bg_channels);
1877 for (i = 0; i < geo->bg_channels; i++) {
1878 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1879 geo->bg[i].channel,
1880 geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT ?
1881 " (radar spectrum)" : "",
1882 ((geo->bg[i].flags & IEEE80211_CH_NO_IBSS) ||
1883 (geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT))
1884 ? "" : ", IBSS",
1885 geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1886 "passive only" : "active/passive",
1887 geo->bg[i].flags & IEEE80211_CH_B_ONLY ?
1888 "B" : "B/G");
1891 len += sprintf(&buf[len],
1892 "Displaying %d channels in 5.2Ghz band "
1893 "(802.11a):\n", geo->a_channels);
1894 for (i = 0; i < geo->a_channels; i++) {
1895 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1896 geo->a[i].channel,
1897 geo->a[i].flags & IEEE80211_CH_RADAR_DETECT ?
1898 " (radar spectrum)" : "",
1899 ((geo->a[i].flags & IEEE80211_CH_NO_IBSS) ||
1900 (geo->a[i].flags & IEEE80211_CH_RADAR_DETECT))
1901 ? "" : ", IBSS",
1902 geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1903 "passive only" : "active/passive");
1906 return len;
1909 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1911 static void notify_wx_assoc_event(struct ipw_priv *priv)
1913 union iwreq_data wrqu;
1914 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1915 if (priv->status & STATUS_ASSOCIATED)
1916 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1917 else
1918 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1919 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1922 static void ipw_irq_tasklet(struct ipw_priv *priv)
1924 u32 inta, inta_mask, handled = 0;
1925 unsigned long flags;
1926 int rc = 0;
1928 spin_lock_irqsave(&priv->irq_lock, flags);
1930 inta = ipw_read32(priv, IPW_INTA_RW);
1931 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1932 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1934 /* Add any cached INTA values that need to be handled */
1935 inta |= priv->isr_inta;
1937 spin_unlock_irqrestore(&priv->irq_lock, flags);
1939 spin_lock_irqsave(&priv->lock, flags);
1941 /* handle all the justifications for the interrupt */
1942 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1943 ipw_rx(priv);
1944 handled |= IPW_INTA_BIT_RX_TRANSFER;
1947 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1948 IPW_DEBUG_HC("Command completed.\n");
1949 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1950 priv->status &= ~STATUS_HCMD_ACTIVE;
1951 wake_up_interruptible(&priv->wait_command_queue);
1952 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1955 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1956 IPW_DEBUG_TX("TX_QUEUE_1\n");
1957 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1958 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1961 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1962 IPW_DEBUG_TX("TX_QUEUE_2\n");
1963 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1964 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1967 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1968 IPW_DEBUG_TX("TX_QUEUE_3\n");
1969 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1970 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1973 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1974 IPW_DEBUG_TX("TX_QUEUE_4\n");
1975 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1976 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1979 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1980 IPW_WARNING("STATUS_CHANGE\n");
1981 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1984 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1985 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1986 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1989 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1990 IPW_WARNING("HOST_CMD_DONE\n");
1991 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1994 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1995 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1996 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1999 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2000 IPW_WARNING("PHY_OFF_DONE\n");
2001 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2004 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2005 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2006 priv->status |= STATUS_RF_KILL_HW;
2007 wake_up_interruptible(&priv->wait_command_queue);
2008 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2009 cancel_delayed_work(&priv->request_scan);
2010 cancel_delayed_work(&priv->request_direct_scan);
2011 cancel_delayed_work(&priv->request_passive_scan);
2012 cancel_delayed_work(&priv->scan_event);
2013 schedule_work(&priv->link_down);
2014 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
2015 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2018 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2019 IPW_WARNING("Firmware error detected. Restarting.\n");
2020 if (priv->error) {
2021 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2022 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2023 struct ipw_fw_error *error =
2024 ipw_alloc_error_log(priv);
2025 ipw_dump_error_log(priv, error);
2026 kfree(error);
2028 } else {
2029 priv->error = ipw_alloc_error_log(priv);
2030 if (priv->error)
2031 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2032 else
2033 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2034 "log.\n");
2035 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2036 ipw_dump_error_log(priv, priv->error);
2039 /* XXX: If hardware encryption is for WPA/WPA2,
2040 * we have to notify the supplicant. */
2041 if (priv->ieee->sec.encrypt) {
2042 priv->status &= ~STATUS_ASSOCIATED;
2043 notify_wx_assoc_event(priv);
2046 /* Keep the restart process from trying to send host
2047 * commands by clearing the INIT status bit */
2048 priv->status &= ~STATUS_INIT;
2050 /* Cancel currently queued command. */
2051 priv->status &= ~STATUS_HCMD_ACTIVE;
2052 wake_up_interruptible(&priv->wait_command_queue);
2054 queue_work(priv->workqueue, &priv->adapter_restart);
2055 handled |= IPW_INTA_BIT_FATAL_ERROR;
2058 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2059 IPW_ERROR("Parity error\n");
2060 handled |= IPW_INTA_BIT_PARITY_ERROR;
2063 if (handled != inta) {
2064 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2067 spin_unlock_irqrestore(&priv->lock, flags);
2069 /* enable all interrupts */
2070 ipw_enable_interrupts(priv);
2073 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2074 static char *get_cmd_string(u8 cmd)
2076 switch (cmd) {
2077 IPW_CMD(HOST_COMPLETE);
2078 IPW_CMD(POWER_DOWN);
2079 IPW_CMD(SYSTEM_CONFIG);
2080 IPW_CMD(MULTICAST_ADDRESS);
2081 IPW_CMD(SSID);
2082 IPW_CMD(ADAPTER_ADDRESS);
2083 IPW_CMD(PORT_TYPE);
2084 IPW_CMD(RTS_THRESHOLD);
2085 IPW_CMD(FRAG_THRESHOLD);
2086 IPW_CMD(POWER_MODE);
2087 IPW_CMD(WEP_KEY);
2088 IPW_CMD(TGI_TX_KEY);
2089 IPW_CMD(SCAN_REQUEST);
2090 IPW_CMD(SCAN_REQUEST_EXT);
2091 IPW_CMD(ASSOCIATE);
2092 IPW_CMD(SUPPORTED_RATES);
2093 IPW_CMD(SCAN_ABORT);
2094 IPW_CMD(TX_FLUSH);
2095 IPW_CMD(QOS_PARAMETERS);
2096 IPW_CMD(DINO_CONFIG);
2097 IPW_CMD(RSN_CAPABILITIES);
2098 IPW_CMD(RX_KEY);
2099 IPW_CMD(CARD_DISABLE);
2100 IPW_CMD(SEED_NUMBER);
2101 IPW_CMD(TX_POWER);
2102 IPW_CMD(COUNTRY_INFO);
2103 IPW_CMD(AIRONET_INFO);
2104 IPW_CMD(AP_TX_POWER);
2105 IPW_CMD(CCKM_INFO);
2106 IPW_CMD(CCX_VER_INFO);
2107 IPW_CMD(SET_CALIBRATION);
2108 IPW_CMD(SENSITIVITY_CALIB);
2109 IPW_CMD(RETRY_LIMIT);
2110 IPW_CMD(IPW_PRE_POWER_DOWN);
2111 IPW_CMD(VAP_BEACON_TEMPLATE);
2112 IPW_CMD(VAP_DTIM_PERIOD);
2113 IPW_CMD(EXT_SUPPORTED_RATES);
2114 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2115 IPW_CMD(VAP_QUIET_INTERVALS);
2116 IPW_CMD(VAP_CHANNEL_SWITCH);
2117 IPW_CMD(VAP_MANDATORY_CHANNELS);
2118 IPW_CMD(VAP_CELL_PWR_LIMIT);
2119 IPW_CMD(VAP_CF_PARAM_SET);
2120 IPW_CMD(VAP_SET_BEACONING_STATE);
2121 IPW_CMD(MEASUREMENT);
2122 IPW_CMD(POWER_CAPABILITY);
2123 IPW_CMD(SUPPORTED_CHANNELS);
2124 IPW_CMD(TPC_REPORT);
2125 IPW_CMD(WME_INFO);
2126 IPW_CMD(PRODUCTION_COMMAND);
2127 default:
2128 return "UNKNOWN";
2132 #define HOST_COMPLETE_TIMEOUT HZ
2134 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2136 int rc = 0;
2137 unsigned long flags;
2139 spin_lock_irqsave(&priv->lock, flags);
2140 if (priv->status & STATUS_HCMD_ACTIVE) {
2141 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2142 get_cmd_string(cmd->cmd));
2143 spin_unlock_irqrestore(&priv->lock, flags);
2144 return -EAGAIN;
2147 priv->status |= STATUS_HCMD_ACTIVE;
2149 if (priv->cmdlog) {
2150 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2151 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2152 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2153 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2154 cmd->len);
2155 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2158 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2159 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2160 priv->status);
2162 #ifndef DEBUG_CMD_WEP_KEY
2163 if (cmd->cmd == IPW_CMD_WEP_KEY)
2164 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2165 else
2166 #endif
2167 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2169 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2170 if (rc) {
2171 priv->status &= ~STATUS_HCMD_ACTIVE;
2172 IPW_ERROR("Failed to send %s: Reason %d\n",
2173 get_cmd_string(cmd->cmd), rc);
2174 spin_unlock_irqrestore(&priv->lock, flags);
2175 goto exit;
2177 spin_unlock_irqrestore(&priv->lock, flags);
2179 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2180 !(priv->
2181 status & STATUS_HCMD_ACTIVE),
2182 HOST_COMPLETE_TIMEOUT);
2183 if (rc == 0) {
2184 spin_lock_irqsave(&priv->lock, flags);
2185 if (priv->status & STATUS_HCMD_ACTIVE) {
2186 IPW_ERROR("Failed to send %s: Command timed out.\n",
2187 get_cmd_string(cmd->cmd));
2188 priv->status &= ~STATUS_HCMD_ACTIVE;
2189 spin_unlock_irqrestore(&priv->lock, flags);
2190 rc = -EIO;
2191 goto exit;
2193 spin_unlock_irqrestore(&priv->lock, flags);
2194 } else
2195 rc = 0;
2197 if (priv->status & STATUS_RF_KILL_HW) {
2198 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2199 get_cmd_string(cmd->cmd));
2200 rc = -EIO;
2201 goto exit;
2204 exit:
2205 if (priv->cmdlog) {
2206 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2207 priv->cmdlog_pos %= priv->cmdlog_len;
2209 return rc;
2212 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2214 struct host_cmd cmd = {
2215 .cmd = command,
2218 return __ipw_send_cmd(priv, &cmd);
2221 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2222 void *data)
2224 struct host_cmd cmd = {
2225 .cmd = command,
2226 .len = len,
2227 .param = data,
2230 return __ipw_send_cmd(priv, &cmd);
2233 static int ipw_send_host_complete(struct ipw_priv *priv)
2235 if (!priv) {
2236 IPW_ERROR("Invalid args\n");
2237 return -1;
2240 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2243 static int ipw_send_system_config(struct ipw_priv *priv)
2245 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2246 sizeof(priv->sys_config),
2247 &priv->sys_config);
2250 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2252 if (!priv || !ssid) {
2253 IPW_ERROR("Invalid args\n");
2254 return -1;
2257 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2258 ssid);
2261 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2263 if (!priv || !mac) {
2264 IPW_ERROR("Invalid args\n");
2265 return -1;
2268 IPW_DEBUG_INFO("%s: Setting MAC to %s\n",
2269 priv->net_dev->name, print_mac(mac, mac));
2271 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2275 * NOTE: This must be executed from our workqueue as it results in udelay
2276 * being called which may corrupt the keyboard if executed on default
2277 * workqueue
2279 static void ipw_adapter_restart(void *adapter)
2281 struct ipw_priv *priv = adapter;
2283 if (priv->status & STATUS_RF_KILL_MASK)
2284 return;
2286 ipw_down(priv);
2288 if (priv->assoc_network &&
2289 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2290 ipw_remove_current_network(priv);
2292 if (ipw_up(priv)) {
2293 IPW_ERROR("Failed to up device\n");
2294 return;
2298 static void ipw_bg_adapter_restart(struct work_struct *work)
2300 struct ipw_priv *priv =
2301 container_of(work, struct ipw_priv, adapter_restart);
2302 mutex_lock(&priv->mutex);
2303 ipw_adapter_restart(priv);
2304 mutex_unlock(&priv->mutex);
2307 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2309 static void ipw_scan_check(void *data)
2311 struct ipw_priv *priv = data;
2312 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2313 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2314 "adapter after (%dms).\n",
2315 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2316 queue_work(priv->workqueue, &priv->adapter_restart);
2320 static void ipw_bg_scan_check(struct work_struct *work)
2322 struct ipw_priv *priv =
2323 container_of(work, struct ipw_priv, scan_check.work);
2324 mutex_lock(&priv->mutex);
2325 ipw_scan_check(priv);
2326 mutex_unlock(&priv->mutex);
2329 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2330 struct ipw_scan_request_ext *request)
2332 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2333 sizeof(*request), request);
2336 static int ipw_send_scan_abort(struct ipw_priv *priv)
2338 if (!priv) {
2339 IPW_ERROR("Invalid args\n");
2340 return -1;
2343 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2346 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2348 struct ipw_sensitivity_calib calib = {
2349 .beacon_rssi_raw = cpu_to_le16(sens),
2352 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2353 &calib);
2356 static int ipw_send_associate(struct ipw_priv *priv,
2357 struct ipw_associate *associate)
2359 if (!priv || !associate) {
2360 IPW_ERROR("Invalid args\n");
2361 return -1;
2364 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2365 associate);
2368 static int ipw_send_supported_rates(struct ipw_priv *priv,
2369 struct ipw_supported_rates *rates)
2371 if (!priv || !rates) {
2372 IPW_ERROR("Invalid args\n");
2373 return -1;
2376 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2377 rates);
2380 static int ipw_set_random_seed(struct ipw_priv *priv)
2382 u32 val;
2384 if (!priv) {
2385 IPW_ERROR("Invalid args\n");
2386 return -1;
2389 get_random_bytes(&val, sizeof(val));
2391 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2394 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2396 __le32 v = cpu_to_le32(phy_off);
2397 if (!priv) {
2398 IPW_ERROR("Invalid args\n");
2399 return -1;
2402 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2405 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2407 if (!priv || !power) {
2408 IPW_ERROR("Invalid args\n");
2409 return -1;
2412 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2415 static int ipw_set_tx_power(struct ipw_priv *priv)
2417 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2418 struct ipw_tx_power tx_power;
2419 s8 max_power;
2420 int i;
2422 memset(&tx_power, 0, sizeof(tx_power));
2424 /* configure device for 'G' band */
2425 tx_power.ieee_mode = IPW_G_MODE;
2426 tx_power.num_channels = geo->bg_channels;
2427 for (i = 0; i < geo->bg_channels; i++) {
2428 max_power = geo->bg[i].max_power;
2429 tx_power.channels_tx_power[i].channel_number =
2430 geo->bg[i].channel;
2431 tx_power.channels_tx_power[i].tx_power = max_power ?
2432 min(max_power, priv->tx_power) : priv->tx_power;
2434 if (ipw_send_tx_power(priv, &tx_power))
2435 return -EIO;
2437 /* configure device to also handle 'B' band */
2438 tx_power.ieee_mode = IPW_B_MODE;
2439 if (ipw_send_tx_power(priv, &tx_power))
2440 return -EIO;
2442 /* configure device to also handle 'A' band */
2443 if (priv->ieee->abg_true) {
2444 tx_power.ieee_mode = IPW_A_MODE;
2445 tx_power.num_channels = geo->a_channels;
2446 for (i = 0; i < tx_power.num_channels; i++) {
2447 max_power = geo->a[i].max_power;
2448 tx_power.channels_tx_power[i].channel_number =
2449 geo->a[i].channel;
2450 tx_power.channels_tx_power[i].tx_power = max_power ?
2451 min(max_power, priv->tx_power) : priv->tx_power;
2453 if (ipw_send_tx_power(priv, &tx_power))
2454 return -EIO;
2456 return 0;
2459 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2461 struct ipw_rts_threshold rts_threshold = {
2462 .rts_threshold = cpu_to_le16(rts),
2465 if (!priv) {
2466 IPW_ERROR("Invalid args\n");
2467 return -1;
2470 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2471 sizeof(rts_threshold), &rts_threshold);
2474 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2476 struct ipw_frag_threshold frag_threshold = {
2477 .frag_threshold = cpu_to_le16(frag),
2480 if (!priv) {
2481 IPW_ERROR("Invalid args\n");
2482 return -1;
2485 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2486 sizeof(frag_threshold), &frag_threshold);
2489 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2491 __le32 param;
2493 if (!priv) {
2494 IPW_ERROR("Invalid args\n");
2495 return -1;
2498 /* If on battery, set to 3, if AC set to CAM, else user
2499 * level */
2500 switch (mode) {
2501 case IPW_POWER_BATTERY:
2502 param = cpu_to_le32(IPW_POWER_INDEX_3);
2503 break;
2504 case IPW_POWER_AC:
2505 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2506 break;
2507 default:
2508 param = cpu_to_le32(mode);
2509 break;
2512 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2513 &param);
2516 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2518 struct ipw_retry_limit retry_limit = {
2519 .short_retry_limit = slimit,
2520 .long_retry_limit = llimit
2523 if (!priv) {
2524 IPW_ERROR("Invalid args\n");
2525 return -1;
2528 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2529 &retry_limit);
2533 * The IPW device contains a Microwire compatible EEPROM that stores
2534 * various data like the MAC address. Usually the firmware has exclusive
2535 * access to the eeprom, but during device initialization (before the
2536 * device driver has sent the HostComplete command to the firmware) the
2537 * device driver has read access to the EEPROM by way of indirect addressing
2538 * through a couple of memory mapped registers.
2540 * The following is a simplified implementation for pulling data out of the
2541 * the eeprom, along with some helper functions to find information in
2542 * the per device private data's copy of the eeprom.
2544 * NOTE: To better understand how these functions work (i.e what is a chip
2545 * select and why do have to keep driving the eeprom clock?), read
2546 * just about any data sheet for a Microwire compatible EEPROM.
2549 /* write a 32 bit value into the indirect accessor register */
2550 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2552 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2554 /* the eeprom requires some time to complete the operation */
2555 udelay(p->eeprom_delay);
2557 return;
2560 /* perform a chip select operation */
2561 static void eeprom_cs(struct ipw_priv *priv)
2563 eeprom_write_reg(priv, 0);
2564 eeprom_write_reg(priv, EEPROM_BIT_CS);
2565 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2566 eeprom_write_reg(priv, EEPROM_BIT_CS);
2569 /* perform a chip select operation */
2570 static void eeprom_disable_cs(struct ipw_priv *priv)
2572 eeprom_write_reg(priv, EEPROM_BIT_CS);
2573 eeprom_write_reg(priv, 0);
2574 eeprom_write_reg(priv, EEPROM_BIT_SK);
2577 /* push a single bit down to the eeprom */
2578 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2580 int d = (bit ? EEPROM_BIT_DI : 0);
2581 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2582 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2585 /* push an opcode followed by an address down to the eeprom */
2586 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2588 int i;
2590 eeprom_cs(priv);
2591 eeprom_write_bit(priv, 1);
2592 eeprom_write_bit(priv, op & 2);
2593 eeprom_write_bit(priv, op & 1);
2594 for (i = 7; i >= 0; i--) {
2595 eeprom_write_bit(priv, addr & (1 << i));
2599 /* pull 16 bits off the eeprom, one bit at a time */
2600 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2602 int i;
2603 u16 r = 0;
2605 /* Send READ Opcode */
2606 eeprom_op(priv, EEPROM_CMD_READ, addr);
2608 /* Send dummy bit */
2609 eeprom_write_reg(priv, EEPROM_BIT_CS);
2611 /* Read the byte off the eeprom one bit at a time */
2612 for (i = 0; i < 16; i++) {
2613 u32 data = 0;
2614 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2615 eeprom_write_reg(priv, EEPROM_BIT_CS);
2616 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2617 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2620 /* Send another dummy bit */
2621 eeprom_write_reg(priv, 0);
2622 eeprom_disable_cs(priv);
2624 return r;
2627 /* helper function for pulling the mac address out of the private */
2628 /* data's copy of the eeprom data */
2629 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2631 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2635 * Either the device driver (i.e. the host) or the firmware can
2636 * load eeprom data into the designated region in SRAM. If neither
2637 * happens then the FW will shutdown with a fatal error.
2639 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2640 * bit needs region of shared SRAM needs to be non-zero.
2642 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2644 int i;
2645 __le16 *eeprom = (__le16 *) priv->eeprom;
2647 IPW_DEBUG_TRACE(">>\n");
2649 /* read entire contents of eeprom into private buffer */
2650 for (i = 0; i < 128; i++)
2651 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2654 If the data looks correct, then copy it to our private
2655 copy. Otherwise let the firmware know to perform the operation
2656 on its own.
2658 if (priv->eeprom[EEPROM_VERSION] != 0) {
2659 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2661 /* write the eeprom data to sram */
2662 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2663 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2665 /* Do not load eeprom data on fatal error or suspend */
2666 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2667 } else {
2668 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2670 /* Load eeprom data on fatal error or suspend */
2671 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2674 IPW_DEBUG_TRACE("<<\n");
2677 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2679 count >>= 2;
2680 if (!count)
2681 return;
2682 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2683 while (count--)
2684 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2687 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2689 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2690 CB_NUMBER_OF_ELEMENTS_SMALL *
2691 sizeof(struct command_block));
2694 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2695 { /* start dma engine but no transfers yet */
2697 IPW_DEBUG_FW(">> : \n");
2699 /* Start the dma */
2700 ipw_fw_dma_reset_command_blocks(priv);
2702 /* Write CB base address */
2703 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2705 IPW_DEBUG_FW("<< : \n");
2706 return 0;
2709 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2711 u32 control = 0;
2713 IPW_DEBUG_FW(">> :\n");
2715 /* set the Stop and Abort bit */
2716 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2717 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2718 priv->sram_desc.last_cb_index = 0;
2720 IPW_DEBUG_FW("<< \n");
2723 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2724 struct command_block *cb)
2726 u32 address =
2727 IPW_SHARED_SRAM_DMA_CONTROL +
2728 (sizeof(struct command_block) * index);
2729 IPW_DEBUG_FW(">> :\n");
2731 ipw_write_indirect(priv, address, (u8 *) cb,
2732 (int)sizeof(struct command_block));
2734 IPW_DEBUG_FW("<< :\n");
2735 return 0;
2739 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2741 u32 control = 0;
2742 u32 index = 0;
2744 IPW_DEBUG_FW(">> :\n");
2746 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2747 ipw_fw_dma_write_command_block(priv, index,
2748 &priv->sram_desc.cb_list[index]);
2750 /* Enable the DMA in the CSR register */
2751 ipw_clear_bit(priv, IPW_RESET_REG,
2752 IPW_RESET_REG_MASTER_DISABLED |
2753 IPW_RESET_REG_STOP_MASTER);
2755 /* Set the Start bit. */
2756 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2757 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2759 IPW_DEBUG_FW("<< :\n");
2760 return 0;
2763 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2765 u32 address;
2766 u32 register_value = 0;
2767 u32 cb_fields_address = 0;
2769 IPW_DEBUG_FW(">> :\n");
2770 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2771 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2773 /* Read the DMA Controlor register */
2774 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2775 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2777 /* Print the CB values */
2778 cb_fields_address = address;
2779 register_value = ipw_read_reg32(priv, cb_fields_address);
2780 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2782 cb_fields_address += sizeof(u32);
2783 register_value = ipw_read_reg32(priv, cb_fields_address);
2784 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2786 cb_fields_address += sizeof(u32);
2787 register_value = ipw_read_reg32(priv, cb_fields_address);
2788 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2789 register_value);
2791 cb_fields_address += sizeof(u32);
2792 register_value = ipw_read_reg32(priv, cb_fields_address);
2793 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2795 IPW_DEBUG_FW(">> :\n");
2798 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2800 u32 current_cb_address = 0;
2801 u32 current_cb_index = 0;
2803 IPW_DEBUG_FW("<< :\n");
2804 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2806 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2807 sizeof(struct command_block);
2809 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2810 current_cb_index, current_cb_address);
2812 IPW_DEBUG_FW(">> :\n");
2813 return current_cb_index;
2817 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2818 u32 src_address,
2819 u32 dest_address,
2820 u32 length,
2821 int interrupt_enabled, int is_last)
2824 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2825 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2826 CB_DEST_SIZE_LONG;
2827 struct command_block *cb;
2828 u32 last_cb_element = 0;
2830 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2831 src_address, dest_address, length);
2833 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2834 return -1;
2836 last_cb_element = priv->sram_desc.last_cb_index;
2837 cb = &priv->sram_desc.cb_list[last_cb_element];
2838 priv->sram_desc.last_cb_index++;
2840 /* Calculate the new CB control word */
2841 if (interrupt_enabled)
2842 control |= CB_INT_ENABLED;
2844 if (is_last)
2845 control |= CB_LAST_VALID;
2847 control |= length;
2849 /* Calculate the CB Element's checksum value */
2850 cb->status = control ^ src_address ^ dest_address;
2852 /* Copy the Source and Destination addresses */
2853 cb->dest_addr = dest_address;
2854 cb->source_addr = src_address;
2856 /* Copy the Control Word last */
2857 cb->control = control;
2859 return 0;
2862 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2863 u32 src_phys, u32 dest_address, u32 length)
2865 u32 bytes_left = length;
2866 u32 src_offset = 0;
2867 u32 dest_offset = 0;
2868 int status = 0;
2869 IPW_DEBUG_FW(">> \n");
2870 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2871 src_phys, dest_address, length);
2872 while (bytes_left > CB_MAX_LENGTH) {
2873 status = ipw_fw_dma_add_command_block(priv,
2874 src_phys + src_offset,
2875 dest_address +
2876 dest_offset,
2877 CB_MAX_LENGTH, 0, 0);
2878 if (status) {
2879 IPW_DEBUG_FW_INFO(": Failed\n");
2880 return -1;
2881 } else
2882 IPW_DEBUG_FW_INFO(": Added new cb\n");
2884 src_offset += CB_MAX_LENGTH;
2885 dest_offset += CB_MAX_LENGTH;
2886 bytes_left -= CB_MAX_LENGTH;
2889 /* add the buffer tail */
2890 if (bytes_left > 0) {
2891 status =
2892 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2893 dest_address + dest_offset,
2894 bytes_left, 0, 0);
2895 if (status) {
2896 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2897 return -1;
2898 } else
2899 IPW_DEBUG_FW_INFO
2900 (": Adding new cb - the buffer tail\n");
2903 IPW_DEBUG_FW("<< \n");
2904 return 0;
2907 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2909 u32 current_index = 0, previous_index;
2910 u32 watchdog = 0;
2912 IPW_DEBUG_FW(">> : \n");
2914 current_index = ipw_fw_dma_command_block_index(priv);
2915 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2916 (int)priv->sram_desc.last_cb_index);
2918 while (current_index < priv->sram_desc.last_cb_index) {
2919 udelay(50);
2920 previous_index = current_index;
2921 current_index = ipw_fw_dma_command_block_index(priv);
2923 if (previous_index < current_index) {
2924 watchdog = 0;
2925 continue;
2927 if (++watchdog > 400) {
2928 IPW_DEBUG_FW_INFO("Timeout\n");
2929 ipw_fw_dma_dump_command_block(priv);
2930 ipw_fw_dma_abort(priv);
2931 return -1;
2935 ipw_fw_dma_abort(priv);
2937 /*Disable the DMA in the CSR register */
2938 ipw_set_bit(priv, IPW_RESET_REG,
2939 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2941 IPW_DEBUG_FW("<< dmaWaitSync \n");
2942 return 0;
2945 static void ipw_remove_current_network(struct ipw_priv *priv)
2947 struct list_head *element, *safe;
2948 struct ieee80211_network *network = NULL;
2949 unsigned long flags;
2951 spin_lock_irqsave(&priv->ieee->lock, flags);
2952 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2953 network = list_entry(element, struct ieee80211_network, list);
2954 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2955 list_del(element);
2956 list_add_tail(&network->list,
2957 &priv->ieee->network_free_list);
2960 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2964 * Check that card is still alive.
2965 * Reads debug register from domain0.
2966 * If card is present, pre-defined value should
2967 * be found there.
2969 * @param priv
2970 * @return 1 if card is present, 0 otherwise
2972 static inline int ipw_alive(struct ipw_priv *priv)
2974 return ipw_read32(priv, 0x90) == 0xd55555d5;
2977 /* timeout in msec, attempted in 10-msec quanta */
2978 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2979 int timeout)
2981 int i = 0;
2983 do {
2984 if ((ipw_read32(priv, addr) & mask) == mask)
2985 return i;
2986 mdelay(10);
2987 i += 10;
2988 } while (i < timeout);
2990 return -ETIME;
2993 /* These functions load the firmware and micro code for the operation of
2994 * the ipw hardware. It assumes the buffer has all the bits for the
2995 * image and the caller is handling the memory allocation and clean up.
2998 static int ipw_stop_master(struct ipw_priv *priv)
3000 int rc;
3002 IPW_DEBUG_TRACE(">> \n");
3003 /* stop master. typical delay - 0 */
3004 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3006 /* timeout is in msec, polled in 10-msec quanta */
3007 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3008 IPW_RESET_REG_MASTER_DISABLED, 100);
3009 if (rc < 0) {
3010 IPW_ERROR("wait for stop master failed after 100ms\n");
3011 return -1;
3014 IPW_DEBUG_INFO("stop master %dms\n", rc);
3016 return rc;
3019 static void ipw_arc_release(struct ipw_priv *priv)
3021 IPW_DEBUG_TRACE(">> \n");
3022 mdelay(5);
3024 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3026 /* no one knows timing, for safety add some delay */
3027 mdelay(5);
3030 struct fw_chunk {
3031 __le32 address;
3032 __le32 length;
3035 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3037 int rc = 0, i, addr;
3038 u8 cr = 0;
3039 __le16 *image;
3041 image = (__le16 *) data;
3043 IPW_DEBUG_TRACE(">> \n");
3045 rc = ipw_stop_master(priv);
3047 if (rc < 0)
3048 return rc;
3050 for (addr = IPW_SHARED_LOWER_BOUND;
3051 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3052 ipw_write32(priv, addr, 0);
3055 /* no ucode (yet) */
3056 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3057 /* destroy DMA queues */
3058 /* reset sequence */
3060 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3061 ipw_arc_release(priv);
3062 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3063 mdelay(1);
3065 /* reset PHY */
3066 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3067 mdelay(1);
3069 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3070 mdelay(1);
3072 /* enable ucode store */
3073 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3074 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3075 mdelay(1);
3077 /* write ucode */
3079 * @bug
3080 * Do NOT set indirect address register once and then
3081 * store data to indirect data register in the loop.
3082 * It seems very reasonable, but in this case DINO do not
3083 * accept ucode. It is essential to set address each time.
3085 /* load new ipw uCode */
3086 for (i = 0; i < len / 2; i++)
3087 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3088 le16_to_cpu(image[i]));
3090 /* enable DINO */
3091 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3092 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3094 /* this is where the igx / win driver deveates from the VAP driver. */
3096 /* wait for alive response */
3097 for (i = 0; i < 100; i++) {
3098 /* poll for incoming data */
3099 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3100 if (cr & DINO_RXFIFO_DATA)
3101 break;
3102 mdelay(1);
3105 if (cr & DINO_RXFIFO_DATA) {
3106 /* alive_command_responce size is NOT multiple of 4 */
3107 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3109 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3110 response_buffer[i] =
3111 cpu_to_le32(ipw_read_reg32(priv,
3112 IPW_BASEBAND_RX_FIFO_READ));
3113 memcpy(&priv->dino_alive, response_buffer,
3114 sizeof(priv->dino_alive));
3115 if (priv->dino_alive.alive_command == 1
3116 && priv->dino_alive.ucode_valid == 1) {
3117 rc = 0;
3118 IPW_DEBUG_INFO
3119 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3120 "of %02d/%02d/%02d %02d:%02d\n",
3121 priv->dino_alive.software_revision,
3122 priv->dino_alive.software_revision,
3123 priv->dino_alive.device_identifier,
3124 priv->dino_alive.device_identifier,
3125 priv->dino_alive.time_stamp[0],
3126 priv->dino_alive.time_stamp[1],
3127 priv->dino_alive.time_stamp[2],
3128 priv->dino_alive.time_stamp[3],
3129 priv->dino_alive.time_stamp[4]);
3130 } else {
3131 IPW_DEBUG_INFO("Microcode is not alive\n");
3132 rc = -EINVAL;
3134 } else {
3135 IPW_DEBUG_INFO("No alive response from DINO\n");
3136 rc = -ETIME;
3139 /* disable DINO, otherwise for some reason
3140 firmware have problem getting alive resp. */
3141 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3143 return rc;
3146 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3148 int rc = -1;
3149 int offset = 0;
3150 struct fw_chunk *chunk;
3151 dma_addr_t shared_phys;
3152 u8 *shared_virt;
3154 IPW_DEBUG_TRACE("<< : \n");
3155 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3157 if (!shared_virt)
3158 return -ENOMEM;
3160 memmove(shared_virt, data, len);
3162 /* Start the Dma */
3163 rc = ipw_fw_dma_enable(priv);
3165 if (priv->sram_desc.last_cb_index > 0) {
3166 /* the DMA is already ready this would be a bug. */
3167 BUG();
3168 goto out;
3171 do {
3172 chunk = (struct fw_chunk *)(data + offset);
3173 offset += sizeof(struct fw_chunk);
3174 /* build DMA packet and queue up for sending */
3175 /* dma to chunk->address, the chunk->length bytes from data +
3176 * offeset*/
3177 /* Dma loading */
3178 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3179 le32_to_cpu(chunk->address),
3180 le32_to_cpu(chunk->length));
3181 if (rc) {
3182 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3183 goto out;
3186 offset += le32_to_cpu(chunk->length);
3187 } while (offset < len);
3189 /* Run the DMA and wait for the answer */
3190 rc = ipw_fw_dma_kick(priv);
3191 if (rc) {
3192 IPW_ERROR("dmaKick Failed\n");
3193 goto out;
3196 rc = ipw_fw_dma_wait(priv);
3197 if (rc) {
3198 IPW_ERROR("dmaWaitSync Failed\n");
3199 goto out;
3201 out:
3202 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3203 return rc;
3206 /* stop nic */
3207 static int ipw_stop_nic(struct ipw_priv *priv)
3209 int rc = 0;
3211 /* stop */
3212 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3214 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3215 IPW_RESET_REG_MASTER_DISABLED, 500);
3216 if (rc < 0) {
3217 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3218 return rc;
3221 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3223 return rc;
3226 static void ipw_start_nic(struct ipw_priv *priv)
3228 IPW_DEBUG_TRACE(">>\n");
3230 /* prvHwStartNic release ARC */
3231 ipw_clear_bit(priv, IPW_RESET_REG,
3232 IPW_RESET_REG_MASTER_DISABLED |
3233 IPW_RESET_REG_STOP_MASTER |
3234 CBD_RESET_REG_PRINCETON_RESET);
3236 /* enable power management */
3237 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3238 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3240 IPW_DEBUG_TRACE("<<\n");
3243 static int ipw_init_nic(struct ipw_priv *priv)
3245 int rc;
3247 IPW_DEBUG_TRACE(">>\n");
3248 /* reset */
3249 /*prvHwInitNic */
3250 /* set "initialization complete" bit to move adapter to D0 state */
3251 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3253 /* low-level PLL activation */
3254 ipw_write32(priv, IPW_READ_INT_REGISTER,
3255 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3257 /* wait for clock stabilization */
3258 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3259 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3260 if (rc < 0)
3261 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3263 /* assert SW reset */
3264 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3266 udelay(10);
3268 /* set "initialization complete" bit to move adapter to D0 state */
3269 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3271 IPW_DEBUG_TRACE(">>\n");
3272 return 0;
3275 /* Call this function from process context, it will sleep in request_firmware.
3276 * Probe is an ok place to call this from.
3278 static int ipw_reset_nic(struct ipw_priv *priv)
3280 int rc = 0;
3281 unsigned long flags;
3283 IPW_DEBUG_TRACE(">>\n");
3285 rc = ipw_init_nic(priv);
3287 spin_lock_irqsave(&priv->lock, flags);
3288 /* Clear the 'host command active' bit... */
3289 priv->status &= ~STATUS_HCMD_ACTIVE;
3290 wake_up_interruptible(&priv->wait_command_queue);
3291 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3292 wake_up_interruptible(&priv->wait_state);
3293 spin_unlock_irqrestore(&priv->lock, flags);
3295 IPW_DEBUG_TRACE("<<\n");
3296 return rc;
3300 struct ipw_fw {
3301 __le32 ver;
3302 __le32 boot_size;
3303 __le32 ucode_size;
3304 __le32 fw_size;
3305 u8 data[0];
3308 static int ipw_get_fw(struct ipw_priv *priv,
3309 const struct firmware **raw, const char *name)
3311 struct ipw_fw *fw;
3312 int rc;
3314 /* ask firmware_class module to get the boot firmware off disk */
3315 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3316 if (rc < 0) {
3317 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3318 return rc;
3321 if ((*raw)->size < sizeof(*fw)) {
3322 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3323 return -EINVAL;
3326 fw = (void *)(*raw)->data;
3328 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3329 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3330 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3331 name, (*raw)->size);
3332 return -EINVAL;
3335 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3336 name,
3337 le32_to_cpu(fw->ver) >> 16,
3338 le32_to_cpu(fw->ver) & 0xff,
3339 (*raw)->size - sizeof(*fw));
3340 return 0;
3343 #define IPW_RX_BUF_SIZE (3000)
3345 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3346 struct ipw_rx_queue *rxq)
3348 unsigned long flags;
3349 int i;
3351 spin_lock_irqsave(&rxq->lock, flags);
3353 INIT_LIST_HEAD(&rxq->rx_free);
3354 INIT_LIST_HEAD(&rxq->rx_used);
3356 /* Fill the rx_used queue with _all_ of the Rx buffers */
3357 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3358 /* In the reset function, these buffers may have been allocated
3359 * to an SKB, so we need to unmap and free potential storage */
3360 if (rxq->pool[i].skb != NULL) {
3361 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3362 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3363 dev_kfree_skb(rxq->pool[i].skb);
3364 rxq->pool[i].skb = NULL;
3366 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3369 /* Set us so that we have processed and used all buffers, but have
3370 * not restocked the Rx queue with fresh buffers */
3371 rxq->read = rxq->write = 0;
3372 rxq->free_count = 0;
3373 spin_unlock_irqrestore(&rxq->lock, flags);
3376 #ifdef CONFIG_PM
3377 static int fw_loaded = 0;
3378 static const struct firmware *raw = NULL;
3380 static void free_firmware(void)
3382 if (fw_loaded) {
3383 release_firmware(raw);
3384 raw = NULL;
3385 fw_loaded = 0;
3388 #else
3389 #define free_firmware() do {} while (0)
3390 #endif
3392 static int ipw_load(struct ipw_priv *priv)
3394 #ifndef CONFIG_PM
3395 const struct firmware *raw = NULL;
3396 #endif
3397 struct ipw_fw *fw;
3398 u8 *boot_img, *ucode_img, *fw_img;
3399 u8 *name = NULL;
3400 int rc = 0, retries = 3;
3402 switch (priv->ieee->iw_mode) {
3403 case IW_MODE_ADHOC:
3404 name = "ipw2200-ibss.fw";
3405 break;
3406 #ifdef CONFIG_IPW2200_MONITOR
3407 case IW_MODE_MONITOR:
3408 name = "ipw2200-sniffer.fw";
3409 break;
3410 #endif
3411 case IW_MODE_INFRA:
3412 name = "ipw2200-bss.fw";
3413 break;
3416 if (!name) {
3417 rc = -EINVAL;
3418 goto error;
3421 #ifdef CONFIG_PM
3422 if (!fw_loaded) {
3423 #endif
3424 rc = ipw_get_fw(priv, &raw, name);
3425 if (rc < 0)
3426 goto error;
3427 #ifdef CONFIG_PM
3429 #endif
3431 fw = (void *)raw->data;
3432 boot_img = &fw->data[0];
3433 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3434 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3435 le32_to_cpu(fw->ucode_size)];
3437 if (rc < 0)
3438 goto error;
3440 if (!priv->rxq)
3441 priv->rxq = ipw_rx_queue_alloc(priv);
3442 else
3443 ipw_rx_queue_reset(priv, priv->rxq);
3444 if (!priv->rxq) {
3445 IPW_ERROR("Unable to initialize Rx queue\n");
3446 goto error;
3449 retry:
3450 /* Ensure interrupts are disabled */
3451 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3452 priv->status &= ~STATUS_INT_ENABLED;
3454 /* ack pending interrupts */
3455 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3457 ipw_stop_nic(priv);
3459 rc = ipw_reset_nic(priv);
3460 if (rc < 0) {
3461 IPW_ERROR("Unable to reset NIC\n");
3462 goto error;
3465 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3466 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3468 /* DMA the initial boot firmware into the device */
3469 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3470 if (rc < 0) {
3471 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3472 goto error;
3475 /* kick start the device */
3476 ipw_start_nic(priv);
3478 /* wait for the device to finish its initial startup sequence */
3479 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3480 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3481 if (rc < 0) {
3482 IPW_ERROR("device failed to boot initial fw image\n");
3483 goto error;
3485 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3487 /* ack fw init done interrupt */
3488 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3490 /* DMA the ucode into the device */
3491 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3492 if (rc < 0) {
3493 IPW_ERROR("Unable to load ucode: %d\n", rc);
3494 goto error;
3497 /* stop nic */
3498 ipw_stop_nic(priv);
3500 /* DMA bss firmware into the device */
3501 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3502 if (rc < 0) {
3503 IPW_ERROR("Unable to load firmware: %d\n", rc);
3504 goto error;
3506 #ifdef CONFIG_PM
3507 fw_loaded = 1;
3508 #endif
3510 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3512 rc = ipw_queue_reset(priv);
3513 if (rc < 0) {
3514 IPW_ERROR("Unable to initialize queues\n");
3515 goto error;
3518 /* Ensure interrupts are disabled */
3519 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3520 /* ack pending interrupts */
3521 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3523 /* kick start the device */
3524 ipw_start_nic(priv);
3526 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3527 if (retries > 0) {
3528 IPW_WARNING("Parity error. Retrying init.\n");
3529 retries--;
3530 goto retry;
3533 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3534 rc = -EIO;
3535 goto error;
3538 /* wait for the device */
3539 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3540 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3541 if (rc < 0) {
3542 IPW_ERROR("device failed to start within 500ms\n");
3543 goto error;
3545 IPW_DEBUG_INFO("device response after %dms\n", rc);
3547 /* ack fw init done interrupt */
3548 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3550 /* read eeprom data and initialize the eeprom region of sram */
3551 priv->eeprom_delay = 1;
3552 ipw_eeprom_init_sram(priv);
3554 /* enable interrupts */
3555 ipw_enable_interrupts(priv);
3557 /* Ensure our queue has valid packets */
3558 ipw_rx_queue_replenish(priv);
3560 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3562 /* ack pending interrupts */
3563 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3565 #ifndef CONFIG_PM
3566 release_firmware(raw);
3567 #endif
3568 return 0;
3570 error:
3571 if (priv->rxq) {
3572 ipw_rx_queue_free(priv, priv->rxq);
3573 priv->rxq = NULL;
3575 ipw_tx_queue_free(priv);
3576 if (raw)
3577 release_firmware(raw);
3578 #ifdef CONFIG_PM
3579 fw_loaded = 0;
3580 raw = NULL;
3581 #endif
3583 return rc;
3587 * DMA services
3589 * Theory of operation
3591 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3592 * 2 empty entries always kept in the buffer to protect from overflow.
3594 * For Tx queue, there are low mark and high mark limits. If, after queuing
3595 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3596 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3597 * Tx queue resumed.
3599 * The IPW operates with six queues, one receive queue in the device's
3600 * sram, one transmit queue for sending commands to the device firmware,
3601 * and four transmit queues for data.
3603 * The four transmit queues allow for performing quality of service (qos)
3604 * transmissions as per the 802.11 protocol. Currently Linux does not
3605 * provide a mechanism to the user for utilizing prioritized queues, so
3606 * we only utilize the first data transmit queue (queue1).
3610 * Driver allocates buffers of this size for Rx
3614 * ipw_rx_queue_space - Return number of free slots available in queue.
3616 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3618 int s = q->read - q->write;
3619 if (s <= 0)
3620 s += RX_QUEUE_SIZE;
3621 /* keep some buffer to not confuse full and empty queue */
3622 s -= 2;
3623 if (s < 0)
3624 s = 0;
3625 return s;
3628 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3630 int s = q->last_used - q->first_empty;
3631 if (s <= 0)
3632 s += q->n_bd;
3633 s -= 2; /* keep some reserve to not confuse empty and full situations */
3634 if (s < 0)
3635 s = 0;
3636 return s;
3639 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3641 return (++index == n_bd) ? 0 : index;
3645 * Initialize common DMA queue structure
3647 * @param q queue to init
3648 * @param count Number of BD's to allocate. Should be power of 2
3649 * @param read_register Address for 'read' register
3650 * (not offset within BAR, full address)
3651 * @param write_register Address for 'write' register
3652 * (not offset within BAR, full address)
3653 * @param base_register Address for 'base' register
3654 * (not offset within BAR, full address)
3655 * @param size Address for 'size' register
3656 * (not offset within BAR, full address)
3658 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3659 int count, u32 read, u32 write, u32 base, u32 size)
3661 q->n_bd = count;
3663 q->low_mark = q->n_bd / 4;
3664 if (q->low_mark < 4)
3665 q->low_mark = 4;
3667 q->high_mark = q->n_bd / 8;
3668 if (q->high_mark < 2)
3669 q->high_mark = 2;
3671 q->first_empty = q->last_used = 0;
3672 q->reg_r = read;
3673 q->reg_w = write;
3675 ipw_write32(priv, base, q->dma_addr);
3676 ipw_write32(priv, size, count);
3677 ipw_write32(priv, read, 0);
3678 ipw_write32(priv, write, 0);
3680 _ipw_read32(priv, 0x90);
3683 static int ipw_queue_tx_init(struct ipw_priv *priv,
3684 struct clx2_tx_queue *q,
3685 int count, u32 read, u32 write, u32 base, u32 size)
3687 struct pci_dev *dev = priv->pci_dev;
3689 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3690 if (!q->txb) {
3691 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3692 return -ENOMEM;
3695 q->bd =
3696 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3697 if (!q->bd) {
3698 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3699 sizeof(q->bd[0]) * count);
3700 kfree(q->txb);
3701 q->txb = NULL;
3702 return -ENOMEM;
3705 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3706 return 0;
3710 * Free one TFD, those at index [txq->q.last_used].
3711 * Do NOT advance any indexes
3713 * @param dev
3714 * @param txq
3716 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3717 struct clx2_tx_queue *txq)
3719 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3720 struct pci_dev *dev = priv->pci_dev;
3721 int i;
3723 /* classify bd */
3724 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3725 /* nothing to cleanup after for host commands */
3726 return;
3728 /* sanity check */
3729 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3730 IPW_ERROR("Too many chunks: %i\n",
3731 le32_to_cpu(bd->u.data.num_chunks));
3732 /** @todo issue fatal error, it is quite serious situation */
3733 return;
3736 /* unmap chunks if any */
3737 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3738 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3739 le16_to_cpu(bd->u.data.chunk_len[i]),
3740 PCI_DMA_TODEVICE);
3741 if (txq->txb[txq->q.last_used]) {
3742 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3743 txq->txb[txq->q.last_used] = NULL;
3749 * Deallocate DMA queue.
3751 * Empty queue by removing and destroying all BD's.
3752 * Free all buffers.
3754 * @param dev
3755 * @param q
3757 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3759 struct clx2_queue *q = &txq->q;
3760 struct pci_dev *dev = priv->pci_dev;
3762 if (q->n_bd == 0)
3763 return;
3765 /* first, empty all BD's */
3766 for (; q->first_empty != q->last_used;
3767 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3768 ipw_queue_tx_free_tfd(priv, txq);
3771 /* free buffers belonging to queue itself */
3772 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3773 q->dma_addr);
3774 kfree(txq->txb);
3776 /* 0 fill whole structure */
3777 memset(txq, 0, sizeof(*txq));
3781 * Destroy all DMA queues and structures
3783 * @param priv
3785 static void ipw_tx_queue_free(struct ipw_priv *priv)
3787 /* Tx CMD queue */
3788 ipw_queue_tx_free(priv, &priv->txq_cmd);
3790 /* Tx queues */
3791 ipw_queue_tx_free(priv, &priv->txq[0]);
3792 ipw_queue_tx_free(priv, &priv->txq[1]);
3793 ipw_queue_tx_free(priv, &priv->txq[2]);
3794 ipw_queue_tx_free(priv, &priv->txq[3]);
3797 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3799 /* First 3 bytes are manufacturer */
3800 bssid[0] = priv->mac_addr[0];
3801 bssid[1] = priv->mac_addr[1];
3802 bssid[2] = priv->mac_addr[2];
3804 /* Last bytes are random */
3805 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3807 bssid[0] &= 0xfe; /* clear multicast bit */
3808 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3811 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3813 struct ipw_station_entry entry;
3814 int i;
3815 DECLARE_MAC_BUF(mac);
3817 for (i = 0; i < priv->num_stations; i++) {
3818 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3819 /* Another node is active in network */
3820 priv->missed_adhoc_beacons = 0;
3821 if (!(priv->config & CFG_STATIC_CHANNEL))
3822 /* when other nodes drop out, we drop out */
3823 priv->config &= ~CFG_ADHOC_PERSIST;
3825 return i;
3829 if (i == MAX_STATIONS)
3830 return IPW_INVALID_STATION;
3832 IPW_DEBUG_SCAN("Adding AdHoc station: %s\n", print_mac(mac, bssid));
3834 entry.reserved = 0;
3835 entry.support_mode = 0;
3836 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3837 memcpy(priv->stations[i], bssid, ETH_ALEN);
3838 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3839 &entry, sizeof(entry));
3840 priv->num_stations++;
3842 return i;
3845 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3847 int i;
3849 for (i = 0; i < priv->num_stations; i++)
3850 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3851 return i;
3853 return IPW_INVALID_STATION;
3856 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3858 int err;
3859 DECLARE_MAC_BUF(mac);
3861 if (priv->status & STATUS_ASSOCIATING) {
3862 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3863 queue_work(priv->workqueue, &priv->disassociate);
3864 return;
3867 if (!(priv->status & STATUS_ASSOCIATED)) {
3868 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3869 return;
3872 IPW_DEBUG_ASSOC("Disassocation attempt from %s "
3873 "on channel %d.\n",
3874 print_mac(mac, priv->assoc_request.bssid),
3875 priv->assoc_request.channel);
3877 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3878 priv->status |= STATUS_DISASSOCIATING;
3880 if (quiet)
3881 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3882 else
3883 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3885 err = ipw_send_associate(priv, &priv->assoc_request);
3886 if (err) {
3887 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3888 "failed.\n");
3889 return;
3894 static int ipw_disassociate(void *data)
3896 struct ipw_priv *priv = data;
3897 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3898 return 0;
3899 ipw_send_disassociate(data, 0);
3900 return 1;
3903 static void ipw_bg_disassociate(struct work_struct *work)
3905 struct ipw_priv *priv =
3906 container_of(work, struct ipw_priv, disassociate);
3907 mutex_lock(&priv->mutex);
3908 ipw_disassociate(priv);
3909 mutex_unlock(&priv->mutex);
3912 static void ipw_system_config(struct work_struct *work)
3914 struct ipw_priv *priv =
3915 container_of(work, struct ipw_priv, system_config);
3917 #ifdef CONFIG_IPW2200_PROMISCUOUS
3918 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3919 priv->sys_config.accept_all_data_frames = 1;
3920 priv->sys_config.accept_non_directed_frames = 1;
3921 priv->sys_config.accept_all_mgmt_bcpr = 1;
3922 priv->sys_config.accept_all_mgmt_frames = 1;
3924 #endif
3926 ipw_send_system_config(priv);
3929 struct ipw_status_code {
3930 u16 status;
3931 const char *reason;
3934 static const struct ipw_status_code ipw_status_codes[] = {
3935 {0x00, "Successful"},
3936 {0x01, "Unspecified failure"},
3937 {0x0A, "Cannot support all requested capabilities in the "
3938 "Capability information field"},
3939 {0x0B, "Reassociation denied due to inability to confirm that "
3940 "association exists"},
3941 {0x0C, "Association denied due to reason outside the scope of this "
3942 "standard"},
3943 {0x0D,
3944 "Responding station does not support the specified authentication "
3945 "algorithm"},
3946 {0x0E,
3947 "Received an Authentication frame with authentication sequence "
3948 "transaction sequence number out of expected sequence"},
3949 {0x0F, "Authentication rejected because of challenge failure"},
3950 {0x10, "Authentication rejected due to timeout waiting for next "
3951 "frame in sequence"},
3952 {0x11, "Association denied because AP is unable to handle additional "
3953 "associated stations"},
3954 {0x12,
3955 "Association denied due to requesting station not supporting all "
3956 "of the datarates in the BSSBasicServiceSet Parameter"},
3957 {0x13,
3958 "Association denied due to requesting station not supporting "
3959 "short preamble operation"},
3960 {0x14,
3961 "Association denied due to requesting station not supporting "
3962 "PBCC encoding"},
3963 {0x15,
3964 "Association denied due to requesting station not supporting "
3965 "channel agility"},
3966 {0x19,
3967 "Association denied due to requesting station not supporting "
3968 "short slot operation"},
3969 {0x1A,
3970 "Association denied due to requesting station not supporting "
3971 "DSSS-OFDM operation"},
3972 {0x28, "Invalid Information Element"},
3973 {0x29, "Group Cipher is not valid"},
3974 {0x2A, "Pairwise Cipher is not valid"},
3975 {0x2B, "AKMP is not valid"},
3976 {0x2C, "Unsupported RSN IE version"},
3977 {0x2D, "Invalid RSN IE Capabilities"},
3978 {0x2E, "Cipher suite is rejected per security policy"},
3981 static const char *ipw_get_status_code(u16 status)
3983 int i;
3984 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3985 if (ipw_status_codes[i].status == (status & 0xff))
3986 return ipw_status_codes[i].reason;
3987 return "Unknown status value.";
3990 static void inline average_init(struct average *avg)
3992 memset(avg, 0, sizeof(*avg));
3995 #define DEPTH_RSSI 8
3996 #define DEPTH_NOISE 16
3997 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3999 return ((depth-1)*prev_avg + val)/depth;
4002 static void average_add(struct average *avg, s16 val)
4004 avg->sum -= avg->entries[avg->pos];
4005 avg->sum += val;
4006 avg->entries[avg->pos++] = val;
4007 if (unlikely(avg->pos == AVG_ENTRIES)) {
4008 avg->init = 1;
4009 avg->pos = 0;
4013 static s16 average_value(struct average *avg)
4015 if (!unlikely(avg->init)) {
4016 if (avg->pos)
4017 return avg->sum / avg->pos;
4018 return 0;
4021 return avg->sum / AVG_ENTRIES;
4024 static void ipw_reset_stats(struct ipw_priv *priv)
4026 u32 len = sizeof(u32);
4028 priv->quality = 0;
4030 average_init(&priv->average_missed_beacons);
4031 priv->exp_avg_rssi = -60;
4032 priv->exp_avg_noise = -85 + 0x100;
4034 priv->last_rate = 0;
4035 priv->last_missed_beacons = 0;
4036 priv->last_rx_packets = 0;
4037 priv->last_tx_packets = 0;
4038 priv->last_tx_failures = 0;
4040 /* Firmware managed, reset only when NIC is restarted, so we have to
4041 * normalize on the current value */
4042 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4043 &priv->last_rx_err, &len);
4044 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4045 &priv->last_tx_failures, &len);
4047 /* Driver managed, reset with each association */
4048 priv->missed_adhoc_beacons = 0;
4049 priv->missed_beacons = 0;
4050 priv->tx_packets = 0;
4051 priv->rx_packets = 0;
4055 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4057 u32 i = 0x80000000;
4058 u32 mask = priv->rates_mask;
4059 /* If currently associated in B mode, restrict the maximum
4060 * rate match to B rates */
4061 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4062 mask &= IEEE80211_CCK_RATES_MASK;
4064 /* TODO: Verify that the rate is supported by the current rates
4065 * list. */
4067 while (i && !(mask & i))
4068 i >>= 1;
4069 switch (i) {
4070 case IEEE80211_CCK_RATE_1MB_MASK:
4071 return 1000000;
4072 case IEEE80211_CCK_RATE_2MB_MASK:
4073 return 2000000;
4074 case IEEE80211_CCK_RATE_5MB_MASK:
4075 return 5500000;
4076 case IEEE80211_OFDM_RATE_6MB_MASK:
4077 return 6000000;
4078 case IEEE80211_OFDM_RATE_9MB_MASK:
4079 return 9000000;
4080 case IEEE80211_CCK_RATE_11MB_MASK:
4081 return 11000000;
4082 case IEEE80211_OFDM_RATE_12MB_MASK:
4083 return 12000000;
4084 case IEEE80211_OFDM_RATE_18MB_MASK:
4085 return 18000000;
4086 case IEEE80211_OFDM_RATE_24MB_MASK:
4087 return 24000000;
4088 case IEEE80211_OFDM_RATE_36MB_MASK:
4089 return 36000000;
4090 case IEEE80211_OFDM_RATE_48MB_MASK:
4091 return 48000000;
4092 case IEEE80211_OFDM_RATE_54MB_MASK:
4093 return 54000000;
4096 if (priv->ieee->mode == IEEE_B)
4097 return 11000000;
4098 else
4099 return 54000000;
4102 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4104 u32 rate, len = sizeof(rate);
4105 int err;
4107 if (!(priv->status & STATUS_ASSOCIATED))
4108 return 0;
4110 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4111 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4112 &len);
4113 if (err) {
4114 IPW_DEBUG_INFO("failed querying ordinals.\n");
4115 return 0;
4117 } else
4118 return ipw_get_max_rate(priv);
4120 switch (rate) {
4121 case IPW_TX_RATE_1MB:
4122 return 1000000;
4123 case IPW_TX_RATE_2MB:
4124 return 2000000;
4125 case IPW_TX_RATE_5MB:
4126 return 5500000;
4127 case IPW_TX_RATE_6MB:
4128 return 6000000;
4129 case IPW_TX_RATE_9MB:
4130 return 9000000;
4131 case IPW_TX_RATE_11MB:
4132 return 11000000;
4133 case IPW_TX_RATE_12MB:
4134 return 12000000;
4135 case IPW_TX_RATE_18MB:
4136 return 18000000;
4137 case IPW_TX_RATE_24MB:
4138 return 24000000;
4139 case IPW_TX_RATE_36MB:
4140 return 36000000;
4141 case IPW_TX_RATE_48MB:
4142 return 48000000;
4143 case IPW_TX_RATE_54MB:
4144 return 54000000;
4147 return 0;
4150 #define IPW_STATS_INTERVAL (2 * HZ)
4151 static void ipw_gather_stats(struct ipw_priv *priv)
4153 u32 rx_err, rx_err_delta, rx_packets_delta;
4154 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4155 u32 missed_beacons_percent, missed_beacons_delta;
4156 u32 quality = 0;
4157 u32 len = sizeof(u32);
4158 s16 rssi;
4159 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4160 rate_quality;
4161 u32 max_rate;
4163 if (!(priv->status & STATUS_ASSOCIATED)) {
4164 priv->quality = 0;
4165 return;
4168 /* Update the statistics */
4169 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4170 &priv->missed_beacons, &len);
4171 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4172 priv->last_missed_beacons = priv->missed_beacons;
4173 if (priv->assoc_request.beacon_interval) {
4174 missed_beacons_percent = missed_beacons_delta *
4175 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4176 (IPW_STATS_INTERVAL * 10);
4177 } else {
4178 missed_beacons_percent = 0;
4180 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4182 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4183 rx_err_delta = rx_err - priv->last_rx_err;
4184 priv->last_rx_err = rx_err;
4186 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4187 tx_failures_delta = tx_failures - priv->last_tx_failures;
4188 priv->last_tx_failures = tx_failures;
4190 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4191 priv->last_rx_packets = priv->rx_packets;
4193 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4194 priv->last_tx_packets = priv->tx_packets;
4196 /* Calculate quality based on the following:
4198 * Missed beacon: 100% = 0, 0% = 70% missed
4199 * Rate: 60% = 1Mbs, 100% = Max
4200 * Rx and Tx errors represent a straight % of total Rx/Tx
4201 * RSSI: 100% = > -50, 0% = < -80
4202 * Rx errors: 100% = 0, 0% = 50% missed
4204 * The lowest computed quality is used.
4207 #define BEACON_THRESHOLD 5
4208 beacon_quality = 100 - missed_beacons_percent;
4209 if (beacon_quality < BEACON_THRESHOLD)
4210 beacon_quality = 0;
4211 else
4212 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4213 (100 - BEACON_THRESHOLD);
4214 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4215 beacon_quality, missed_beacons_percent);
4217 priv->last_rate = ipw_get_current_rate(priv);
4218 max_rate = ipw_get_max_rate(priv);
4219 rate_quality = priv->last_rate * 40 / max_rate + 60;
4220 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4221 rate_quality, priv->last_rate / 1000000);
4223 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4224 rx_quality = 100 - (rx_err_delta * 100) /
4225 (rx_packets_delta + rx_err_delta);
4226 else
4227 rx_quality = 100;
4228 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4229 rx_quality, rx_err_delta, rx_packets_delta);
4231 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4232 tx_quality = 100 - (tx_failures_delta * 100) /
4233 (tx_packets_delta + tx_failures_delta);
4234 else
4235 tx_quality = 100;
4236 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4237 tx_quality, tx_failures_delta, tx_packets_delta);
4239 rssi = priv->exp_avg_rssi;
4240 signal_quality =
4241 (100 *
4242 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4243 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4244 (priv->ieee->perfect_rssi - rssi) *
4245 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4246 62 * (priv->ieee->perfect_rssi - rssi))) /
4247 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4248 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4249 if (signal_quality > 100)
4250 signal_quality = 100;
4251 else if (signal_quality < 1)
4252 signal_quality = 0;
4254 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4255 signal_quality, rssi);
4257 quality = min(beacon_quality,
4258 min(rate_quality,
4259 min(tx_quality, min(rx_quality, signal_quality))));
4260 if (quality == beacon_quality)
4261 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4262 quality);
4263 if (quality == rate_quality)
4264 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4265 quality);
4266 if (quality == tx_quality)
4267 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4268 quality);
4269 if (quality == rx_quality)
4270 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4271 quality);
4272 if (quality == signal_quality)
4273 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4274 quality);
4276 priv->quality = quality;
4278 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4279 IPW_STATS_INTERVAL);
4282 static void ipw_bg_gather_stats(struct work_struct *work)
4284 struct ipw_priv *priv =
4285 container_of(work, struct ipw_priv, gather_stats.work);
4286 mutex_lock(&priv->mutex);
4287 ipw_gather_stats(priv);
4288 mutex_unlock(&priv->mutex);
4291 /* Missed beacon behavior:
4292 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4293 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4294 * Above disassociate threshold, give up and stop scanning.
4295 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4296 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4297 int missed_count)
4299 priv->notif_missed_beacons = missed_count;
4301 if (missed_count > priv->disassociate_threshold &&
4302 priv->status & STATUS_ASSOCIATED) {
4303 /* If associated and we've hit the missed
4304 * beacon threshold, disassociate, turn
4305 * off roaming, and abort any active scans */
4306 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4307 IPW_DL_STATE | IPW_DL_ASSOC,
4308 "Missed beacon: %d - disassociate\n", missed_count);
4309 priv->status &= ~STATUS_ROAMING;
4310 if (priv->status & STATUS_SCANNING) {
4311 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4312 IPW_DL_STATE,
4313 "Aborting scan with missed beacon.\n");
4314 queue_work(priv->workqueue, &priv->abort_scan);
4317 queue_work(priv->workqueue, &priv->disassociate);
4318 return;
4321 if (priv->status & STATUS_ROAMING) {
4322 /* If we are currently roaming, then just
4323 * print a debug statement... */
4324 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4325 "Missed beacon: %d - roam in progress\n",
4326 missed_count);
4327 return;
4330 if (roaming &&
4331 (missed_count > priv->roaming_threshold &&
4332 missed_count <= priv->disassociate_threshold)) {
4333 /* If we are not already roaming, set the ROAM
4334 * bit in the status and kick off a scan.
4335 * This can happen several times before we reach
4336 * disassociate_threshold. */
4337 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4338 "Missed beacon: %d - initiate "
4339 "roaming\n", missed_count);
4340 if (!(priv->status & STATUS_ROAMING)) {
4341 priv->status |= STATUS_ROAMING;
4342 if (!(priv->status & STATUS_SCANNING))
4343 queue_delayed_work(priv->workqueue,
4344 &priv->request_scan, 0);
4346 return;
4349 if (priv->status & STATUS_SCANNING) {
4350 /* Stop scan to keep fw from getting
4351 * stuck (only if we aren't roaming --
4352 * otherwise we'll never scan more than 2 or 3
4353 * channels..) */
4354 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4355 "Aborting scan with missed beacon.\n");
4356 queue_work(priv->workqueue, &priv->abort_scan);
4359 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4362 static void ipw_scan_event(struct work_struct *work)
4364 union iwreq_data wrqu;
4366 struct ipw_priv *priv =
4367 container_of(work, struct ipw_priv, scan_event.work);
4369 wrqu.data.length = 0;
4370 wrqu.data.flags = 0;
4371 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4374 static void handle_scan_event(struct ipw_priv *priv)
4376 /* Only userspace-requested scan completion events go out immediately */
4377 if (!priv->user_requested_scan) {
4378 if (!delayed_work_pending(&priv->scan_event))
4379 queue_delayed_work(priv->workqueue, &priv->scan_event,
4380 round_jiffies_relative(msecs_to_jiffies(4000)));
4381 } else {
4382 union iwreq_data wrqu;
4384 priv->user_requested_scan = 0;
4385 cancel_delayed_work(&priv->scan_event);
4387 wrqu.data.length = 0;
4388 wrqu.data.flags = 0;
4389 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4394 * Handle host notification packet.
4395 * Called from interrupt routine
4397 static void ipw_rx_notification(struct ipw_priv *priv,
4398 struct ipw_rx_notification *notif)
4400 DECLARE_MAC_BUF(mac);
4401 u16 size = le16_to_cpu(notif->size);
4402 notif->size = le16_to_cpu(notif->size);
4404 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4406 switch (notif->subtype) {
4407 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4408 struct notif_association *assoc = &notif->u.assoc;
4410 switch (assoc->state) {
4411 case CMAS_ASSOCIATED:{
4412 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4413 IPW_DL_ASSOC,
4414 "associated: '%s' %s"
4415 " \n",
4416 escape_essid(priv->essid,
4417 priv->essid_len),
4418 print_mac(mac, priv->bssid));
4420 switch (priv->ieee->iw_mode) {
4421 case IW_MODE_INFRA:
4422 memcpy(priv->ieee->bssid,
4423 priv->bssid, ETH_ALEN);
4424 break;
4426 case IW_MODE_ADHOC:
4427 memcpy(priv->ieee->bssid,
4428 priv->bssid, ETH_ALEN);
4430 /* clear out the station table */
4431 priv->num_stations = 0;
4433 IPW_DEBUG_ASSOC
4434 ("queueing adhoc check\n");
4435 queue_delayed_work(priv->
4436 workqueue,
4437 &priv->
4438 adhoc_check,
4439 le16_to_cpu(priv->
4440 assoc_request.
4441 beacon_interval));
4442 break;
4445 priv->status &= ~STATUS_ASSOCIATING;
4446 priv->status |= STATUS_ASSOCIATED;
4447 queue_work(priv->workqueue,
4448 &priv->system_config);
4450 #ifdef CONFIG_IPW2200_QOS
4451 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4452 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4453 if ((priv->status & STATUS_AUTH) &&
4454 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4455 == IEEE80211_STYPE_ASSOC_RESP)) {
4456 if ((sizeof
4457 (struct
4458 ieee80211_assoc_response)
4459 <= size)
4460 && (size <= 2314)) {
4461 struct
4462 ieee80211_rx_stats
4463 stats = {
4464 .len = size - 1,
4467 IPW_DEBUG_QOS
4468 ("QoS Associate "
4469 "size %d\n", size);
4470 ieee80211_rx_mgt(priv->
4471 ieee,
4472 (struct
4473 ieee80211_hdr_4addr
4475 &notif->u.raw, &stats);
4478 #endif
4480 schedule_work(&priv->link_up);
4482 break;
4485 case CMAS_AUTHENTICATED:{
4486 if (priv->
4487 status & (STATUS_ASSOCIATED |
4488 STATUS_AUTH)) {
4489 struct notif_authenticate *auth
4490 = &notif->u.auth;
4491 IPW_DEBUG(IPW_DL_NOTIF |
4492 IPW_DL_STATE |
4493 IPW_DL_ASSOC,
4494 "deauthenticated: '%s' "
4495 "%s"
4496 ": (0x%04X) - %s \n",
4497 escape_essid(priv->
4498 essid,
4499 priv->
4500 essid_len),
4501 print_mac(mac, priv->bssid),
4502 le16_to_cpu(auth->status),
4503 ipw_get_status_code
4504 (le16_to_cpu
4505 (auth->status)));
4507 priv->status &=
4508 ~(STATUS_ASSOCIATING |
4509 STATUS_AUTH |
4510 STATUS_ASSOCIATED);
4512 schedule_work(&priv->link_down);
4513 break;
4516 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4517 IPW_DL_ASSOC,
4518 "authenticated: '%s' %s"
4519 "\n",
4520 escape_essid(priv->essid,
4521 priv->essid_len),
4522 print_mac(mac, priv->bssid));
4523 break;
4526 case CMAS_INIT:{
4527 if (priv->status & STATUS_AUTH) {
4528 struct
4529 ieee80211_assoc_response
4530 *resp;
4531 resp =
4532 (struct
4533 ieee80211_assoc_response
4534 *)&notif->u.raw;
4535 IPW_DEBUG(IPW_DL_NOTIF |
4536 IPW_DL_STATE |
4537 IPW_DL_ASSOC,
4538 "association failed (0x%04X): %s\n",
4539 le16_to_cpu(resp->status),
4540 ipw_get_status_code
4541 (le16_to_cpu
4542 (resp->status)));
4545 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4546 IPW_DL_ASSOC,
4547 "disassociated: '%s' %s"
4548 " \n",
4549 escape_essid(priv->essid,
4550 priv->essid_len),
4551 print_mac(mac, priv->bssid));
4553 priv->status &=
4554 ~(STATUS_DISASSOCIATING |
4555 STATUS_ASSOCIATING |
4556 STATUS_ASSOCIATED | STATUS_AUTH);
4557 if (priv->assoc_network
4558 && (priv->assoc_network->
4559 capability &
4560 WLAN_CAPABILITY_IBSS))
4561 ipw_remove_current_network
4562 (priv);
4564 schedule_work(&priv->link_down);
4566 break;
4569 case CMAS_RX_ASSOC_RESP:
4570 break;
4572 default:
4573 IPW_ERROR("assoc: unknown (%d)\n",
4574 assoc->state);
4575 break;
4578 break;
4581 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4582 struct notif_authenticate *auth = &notif->u.auth;
4583 switch (auth->state) {
4584 case CMAS_AUTHENTICATED:
4585 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4586 "authenticated: '%s' %s \n",
4587 escape_essid(priv->essid,
4588 priv->essid_len),
4589 print_mac(mac, priv->bssid));
4590 priv->status |= STATUS_AUTH;
4591 break;
4593 case CMAS_INIT:
4594 if (priv->status & STATUS_AUTH) {
4595 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4596 IPW_DL_ASSOC,
4597 "authentication failed (0x%04X): %s\n",
4598 le16_to_cpu(auth->status),
4599 ipw_get_status_code(le16_to_cpu
4600 (auth->
4601 status)));
4603 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4604 IPW_DL_ASSOC,
4605 "deauthenticated: '%s' %s\n",
4606 escape_essid(priv->essid,
4607 priv->essid_len),
4608 print_mac(mac, priv->bssid));
4610 priv->status &= ~(STATUS_ASSOCIATING |
4611 STATUS_AUTH |
4612 STATUS_ASSOCIATED);
4614 schedule_work(&priv->link_down);
4615 break;
4617 case CMAS_TX_AUTH_SEQ_1:
4618 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4619 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4620 break;
4621 case CMAS_RX_AUTH_SEQ_2:
4622 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4623 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4624 break;
4625 case CMAS_AUTH_SEQ_1_PASS:
4626 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4627 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4628 break;
4629 case CMAS_AUTH_SEQ_1_FAIL:
4630 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4631 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4632 break;
4633 case CMAS_TX_AUTH_SEQ_3:
4634 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4635 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4636 break;
4637 case CMAS_RX_AUTH_SEQ_4:
4638 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4639 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4640 break;
4641 case CMAS_AUTH_SEQ_2_PASS:
4642 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4643 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4644 break;
4645 case CMAS_AUTH_SEQ_2_FAIL:
4646 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4647 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4648 break;
4649 case CMAS_TX_ASSOC:
4650 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4651 IPW_DL_ASSOC, "TX_ASSOC\n");
4652 break;
4653 case CMAS_RX_ASSOC_RESP:
4654 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4655 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4657 break;
4658 case CMAS_ASSOCIATED:
4659 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4660 IPW_DL_ASSOC, "ASSOCIATED\n");
4661 break;
4662 default:
4663 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4664 auth->state);
4665 break;
4667 break;
4670 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4671 struct notif_channel_result *x =
4672 &notif->u.channel_result;
4674 if (size == sizeof(*x)) {
4675 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4676 x->channel_num);
4677 } else {
4678 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4679 "(should be %zd)\n",
4680 size, sizeof(*x));
4682 break;
4685 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4686 struct notif_scan_complete *x = &notif->u.scan_complete;
4687 if (size == sizeof(*x)) {
4688 IPW_DEBUG_SCAN
4689 ("Scan completed: type %d, %d channels, "
4690 "%d status\n", x->scan_type,
4691 x->num_channels, x->status);
4692 } else {
4693 IPW_ERROR("Scan completed of wrong size %d "
4694 "(should be %zd)\n",
4695 size, sizeof(*x));
4698 priv->status &=
4699 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4701 wake_up_interruptible(&priv->wait_state);
4702 cancel_delayed_work(&priv->scan_check);
4704 if (priv->status & STATUS_EXIT_PENDING)
4705 break;
4707 priv->ieee->scans++;
4709 #ifdef CONFIG_IPW2200_MONITOR
4710 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4711 priv->status |= STATUS_SCAN_FORCED;
4712 queue_delayed_work(priv->workqueue,
4713 &priv->request_scan, 0);
4714 break;
4716 priv->status &= ~STATUS_SCAN_FORCED;
4717 #endif /* CONFIG_IPW2200_MONITOR */
4719 /* Do queued direct scans first */
4720 if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
4721 queue_delayed_work(priv->workqueue,
4722 &priv->request_direct_scan, 0);
4725 if (!(priv->status & (STATUS_ASSOCIATED |
4726 STATUS_ASSOCIATING |
4727 STATUS_ROAMING |
4728 STATUS_DISASSOCIATING)))
4729 queue_work(priv->workqueue, &priv->associate);
4730 else if (priv->status & STATUS_ROAMING) {
4731 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4732 /* If a scan completed and we are in roam mode, then
4733 * the scan that completed was the one requested as a
4734 * result of entering roam... so, schedule the
4735 * roam work */
4736 queue_work(priv->workqueue,
4737 &priv->roam);
4738 else
4739 /* Don't schedule if we aborted the scan */
4740 priv->status &= ~STATUS_ROAMING;
4741 } else if (priv->status & STATUS_SCAN_PENDING)
4742 queue_delayed_work(priv->workqueue,
4743 &priv->request_scan, 0);
4744 else if (priv->config & CFG_BACKGROUND_SCAN
4745 && priv->status & STATUS_ASSOCIATED)
4746 queue_delayed_work(priv->workqueue,
4747 &priv->request_scan,
4748 round_jiffies_relative(HZ));
4750 /* Send an empty event to user space.
4751 * We don't send the received data on the event because
4752 * it would require us to do complex transcoding, and
4753 * we want to minimise the work done in the irq handler
4754 * Use a request to extract the data.
4755 * Also, we generate this even for any scan, regardless
4756 * on how the scan was initiated. User space can just
4757 * sync on periodic scan to get fresh data...
4758 * Jean II */
4759 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4760 handle_scan_event(priv);
4761 break;
4764 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4765 struct notif_frag_length *x = &notif->u.frag_len;
4767 if (size == sizeof(*x))
4768 IPW_ERROR("Frag length: %d\n",
4769 le16_to_cpu(x->frag_length));
4770 else
4771 IPW_ERROR("Frag length of wrong size %d "
4772 "(should be %zd)\n",
4773 size, sizeof(*x));
4774 break;
4777 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4778 struct notif_link_deterioration *x =
4779 &notif->u.link_deterioration;
4781 if (size == sizeof(*x)) {
4782 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4783 "link deterioration: type %d, cnt %d\n",
4784 x->silence_notification_type,
4785 x->silence_count);
4786 memcpy(&priv->last_link_deterioration, x,
4787 sizeof(*x));
4788 } else {
4789 IPW_ERROR("Link Deterioration of wrong size %d "
4790 "(should be %zd)\n",
4791 size, sizeof(*x));
4793 break;
4796 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4797 IPW_ERROR("Dino config\n");
4798 if (priv->hcmd
4799 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4800 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4802 break;
4805 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4806 struct notif_beacon_state *x = &notif->u.beacon_state;
4807 if (size != sizeof(*x)) {
4808 IPW_ERROR
4809 ("Beacon state of wrong size %d (should "
4810 "be %zd)\n", size, sizeof(*x));
4811 break;
4814 if (le32_to_cpu(x->state) ==
4815 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4816 ipw_handle_missed_beacon(priv,
4817 le32_to_cpu(x->
4818 number));
4820 break;
4823 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4824 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4825 if (size == sizeof(*x)) {
4826 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4827 "0x%02x station %d\n",
4828 x->key_state, x->security_type,
4829 x->station_index);
4830 break;
4833 IPW_ERROR
4834 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4835 size, sizeof(*x));
4836 break;
4839 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4840 struct notif_calibration *x = &notif->u.calibration;
4842 if (size == sizeof(*x)) {
4843 memcpy(&priv->calib, x, sizeof(*x));
4844 IPW_DEBUG_INFO("TODO: Calibration\n");
4845 break;
4848 IPW_ERROR
4849 ("Calibration of wrong size %d (should be %zd)\n",
4850 size, sizeof(*x));
4851 break;
4854 case HOST_NOTIFICATION_NOISE_STATS:{
4855 if (size == sizeof(u32)) {
4856 priv->exp_avg_noise =
4857 exponential_average(priv->exp_avg_noise,
4858 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4859 DEPTH_NOISE);
4860 break;
4863 IPW_ERROR
4864 ("Noise stat is wrong size %d (should be %zd)\n",
4865 size, sizeof(u32));
4866 break;
4869 default:
4870 IPW_DEBUG_NOTIF("Unknown notification: "
4871 "subtype=%d,flags=0x%2x,size=%d\n",
4872 notif->subtype, notif->flags, size);
4877 * Destroys all DMA structures and initialise them again
4879 * @param priv
4880 * @return error code
4882 static int ipw_queue_reset(struct ipw_priv *priv)
4884 int rc = 0;
4885 /** @todo customize queue sizes */
4886 int nTx = 64, nTxCmd = 8;
4887 ipw_tx_queue_free(priv);
4888 /* Tx CMD queue */
4889 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4890 IPW_TX_CMD_QUEUE_READ_INDEX,
4891 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4892 IPW_TX_CMD_QUEUE_BD_BASE,
4893 IPW_TX_CMD_QUEUE_BD_SIZE);
4894 if (rc) {
4895 IPW_ERROR("Tx Cmd queue init failed\n");
4896 goto error;
4898 /* Tx queue(s) */
4899 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4900 IPW_TX_QUEUE_0_READ_INDEX,
4901 IPW_TX_QUEUE_0_WRITE_INDEX,
4902 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4903 if (rc) {
4904 IPW_ERROR("Tx 0 queue init failed\n");
4905 goto error;
4907 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4908 IPW_TX_QUEUE_1_READ_INDEX,
4909 IPW_TX_QUEUE_1_WRITE_INDEX,
4910 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4911 if (rc) {
4912 IPW_ERROR("Tx 1 queue init failed\n");
4913 goto error;
4915 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4916 IPW_TX_QUEUE_2_READ_INDEX,
4917 IPW_TX_QUEUE_2_WRITE_INDEX,
4918 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4919 if (rc) {
4920 IPW_ERROR("Tx 2 queue init failed\n");
4921 goto error;
4923 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4924 IPW_TX_QUEUE_3_READ_INDEX,
4925 IPW_TX_QUEUE_3_WRITE_INDEX,
4926 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4927 if (rc) {
4928 IPW_ERROR("Tx 3 queue init failed\n");
4929 goto error;
4931 /* statistics */
4932 priv->rx_bufs_min = 0;
4933 priv->rx_pend_max = 0;
4934 return rc;
4936 error:
4937 ipw_tx_queue_free(priv);
4938 return rc;
4942 * Reclaim Tx queue entries no more used by NIC.
4944 * When FW advances 'R' index, all entries between old and
4945 * new 'R' index need to be reclaimed. As result, some free space
4946 * forms. If there is enough free space (> low mark), wake Tx queue.
4948 * @note Need to protect against garbage in 'R' index
4949 * @param priv
4950 * @param txq
4951 * @param qindex
4952 * @return Number of used entries remains in the queue
4954 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4955 struct clx2_tx_queue *txq, int qindex)
4957 u32 hw_tail;
4958 int used;
4959 struct clx2_queue *q = &txq->q;
4961 hw_tail = ipw_read32(priv, q->reg_r);
4962 if (hw_tail >= q->n_bd) {
4963 IPW_ERROR
4964 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4965 hw_tail, q->n_bd);
4966 goto done;
4968 for (; q->last_used != hw_tail;
4969 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4970 ipw_queue_tx_free_tfd(priv, txq);
4971 priv->tx_packets++;
4973 done:
4974 if ((ipw_tx_queue_space(q) > q->low_mark) &&
4975 (qindex >= 0) &&
4976 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4977 netif_wake_queue(priv->net_dev);
4978 used = q->first_empty - q->last_used;
4979 if (used < 0)
4980 used += q->n_bd;
4982 return used;
4985 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4986 int len, int sync)
4988 struct clx2_tx_queue *txq = &priv->txq_cmd;
4989 struct clx2_queue *q = &txq->q;
4990 struct tfd_frame *tfd;
4992 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
4993 IPW_ERROR("No space for Tx\n");
4994 return -EBUSY;
4997 tfd = &txq->bd[q->first_empty];
4998 txq->txb[q->first_empty] = NULL;
5000 memset(tfd, 0, sizeof(*tfd));
5001 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5002 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5003 priv->hcmd_seq++;
5004 tfd->u.cmd.index = hcmd;
5005 tfd->u.cmd.length = len;
5006 memcpy(tfd->u.cmd.payload, buf, len);
5007 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5008 ipw_write32(priv, q->reg_w, q->first_empty);
5009 _ipw_read32(priv, 0x90);
5011 return 0;
5015 * Rx theory of operation
5017 * The host allocates 32 DMA target addresses and passes the host address
5018 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5019 * 0 to 31
5021 * Rx Queue Indexes
5022 * The host/firmware share two index registers for managing the Rx buffers.
5024 * The READ index maps to the first position that the firmware may be writing
5025 * to -- the driver can read up to (but not including) this position and get
5026 * good data.
5027 * The READ index is managed by the firmware once the card is enabled.
5029 * The WRITE index maps to the last position the driver has read from -- the
5030 * position preceding WRITE is the last slot the firmware can place a packet.
5032 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5033 * WRITE = READ.
5035 * During initialization the host sets up the READ queue position to the first
5036 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5038 * When the firmware places a packet in a buffer it will advance the READ index
5039 * and fire the RX interrupt. The driver can then query the READ index and
5040 * process as many packets as possible, moving the WRITE index forward as it
5041 * resets the Rx queue buffers with new memory.
5043 * The management in the driver is as follows:
5044 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5045 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5046 * to replensish the ipw->rxq->rx_free.
5047 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5048 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5049 * 'processed' and 'read' driver indexes as well)
5050 * + A received packet is processed and handed to the kernel network stack,
5051 * detached from the ipw->rxq. The driver 'processed' index is updated.
5052 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5053 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5054 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5055 * were enough free buffers and RX_STALLED is set it is cleared.
5058 * Driver sequence:
5060 * ipw_rx_queue_alloc() Allocates rx_free
5061 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5062 * ipw_rx_queue_restock
5063 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5064 * queue, updates firmware pointers, and updates
5065 * the WRITE index. If insufficient rx_free buffers
5066 * are available, schedules ipw_rx_queue_replenish
5068 * -- enable interrupts --
5069 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5070 * READ INDEX, detaching the SKB from the pool.
5071 * Moves the packet buffer from queue to rx_used.
5072 * Calls ipw_rx_queue_restock to refill any empty
5073 * slots.
5074 * ...
5079 * If there are slots in the RX queue that need to be restocked,
5080 * and we have free pre-allocated buffers, fill the ranks as much
5081 * as we can pulling from rx_free.
5083 * This moves the 'write' index forward to catch up with 'processed', and
5084 * also updates the memory address in the firmware to reference the new
5085 * target buffer.
5087 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5089 struct ipw_rx_queue *rxq = priv->rxq;
5090 struct list_head *element;
5091 struct ipw_rx_mem_buffer *rxb;
5092 unsigned long flags;
5093 int write;
5095 spin_lock_irqsave(&rxq->lock, flags);
5096 write = rxq->write;
5097 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5098 element = rxq->rx_free.next;
5099 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5100 list_del(element);
5102 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5103 rxb->dma_addr);
5104 rxq->queue[rxq->write] = rxb;
5105 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5106 rxq->free_count--;
5108 spin_unlock_irqrestore(&rxq->lock, flags);
5110 /* If the pre-allocated buffer pool is dropping low, schedule to
5111 * refill it */
5112 if (rxq->free_count <= RX_LOW_WATERMARK)
5113 queue_work(priv->workqueue, &priv->rx_replenish);
5115 /* If we've added more space for the firmware to place data, tell it */
5116 if (write != rxq->write)
5117 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5121 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5122 * Also restock the Rx queue via ipw_rx_queue_restock.
5124 * This is called as a scheduled work item (except for during intialization)
5126 static void ipw_rx_queue_replenish(void *data)
5128 struct ipw_priv *priv = data;
5129 struct ipw_rx_queue *rxq = priv->rxq;
5130 struct list_head *element;
5131 struct ipw_rx_mem_buffer *rxb;
5132 unsigned long flags;
5134 spin_lock_irqsave(&rxq->lock, flags);
5135 while (!list_empty(&rxq->rx_used)) {
5136 element = rxq->rx_used.next;
5137 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5138 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5139 if (!rxb->skb) {
5140 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5141 priv->net_dev->name);
5142 /* We don't reschedule replenish work here -- we will
5143 * call the restock method and if it still needs
5144 * more buffers it will schedule replenish */
5145 break;
5147 list_del(element);
5149 rxb->dma_addr =
5150 pci_map_single(priv->pci_dev, rxb->skb->data,
5151 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5153 list_add_tail(&rxb->list, &rxq->rx_free);
5154 rxq->free_count++;
5156 spin_unlock_irqrestore(&rxq->lock, flags);
5158 ipw_rx_queue_restock(priv);
5161 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5163 struct ipw_priv *priv =
5164 container_of(work, struct ipw_priv, rx_replenish);
5165 mutex_lock(&priv->mutex);
5166 ipw_rx_queue_replenish(priv);
5167 mutex_unlock(&priv->mutex);
5170 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5171 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5172 * This free routine walks the list of POOL entries and if SKB is set to
5173 * non NULL it is unmapped and freed
5175 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5177 int i;
5179 if (!rxq)
5180 return;
5182 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5183 if (rxq->pool[i].skb != NULL) {
5184 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5185 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5186 dev_kfree_skb(rxq->pool[i].skb);
5190 kfree(rxq);
5193 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5195 struct ipw_rx_queue *rxq;
5196 int i;
5198 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5199 if (unlikely(!rxq)) {
5200 IPW_ERROR("memory allocation failed\n");
5201 return NULL;
5203 spin_lock_init(&rxq->lock);
5204 INIT_LIST_HEAD(&rxq->rx_free);
5205 INIT_LIST_HEAD(&rxq->rx_used);
5207 /* Fill the rx_used queue with _all_ of the Rx buffers */
5208 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5209 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5211 /* Set us so that we have processed and used all buffers, but have
5212 * not restocked the Rx queue with fresh buffers */
5213 rxq->read = rxq->write = 0;
5214 rxq->free_count = 0;
5216 return rxq;
5219 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5221 rate &= ~IEEE80211_BASIC_RATE_MASK;
5222 if (ieee_mode == IEEE_A) {
5223 switch (rate) {
5224 case IEEE80211_OFDM_RATE_6MB:
5225 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5226 1 : 0;
5227 case IEEE80211_OFDM_RATE_9MB:
5228 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5229 1 : 0;
5230 case IEEE80211_OFDM_RATE_12MB:
5231 return priv->
5232 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5233 case IEEE80211_OFDM_RATE_18MB:
5234 return priv->
5235 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5236 case IEEE80211_OFDM_RATE_24MB:
5237 return priv->
5238 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5239 case IEEE80211_OFDM_RATE_36MB:
5240 return priv->
5241 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5242 case IEEE80211_OFDM_RATE_48MB:
5243 return priv->
5244 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5245 case IEEE80211_OFDM_RATE_54MB:
5246 return priv->
5247 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5248 default:
5249 return 0;
5253 /* B and G mixed */
5254 switch (rate) {
5255 case IEEE80211_CCK_RATE_1MB:
5256 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5257 case IEEE80211_CCK_RATE_2MB:
5258 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5259 case IEEE80211_CCK_RATE_5MB:
5260 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5261 case IEEE80211_CCK_RATE_11MB:
5262 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5265 /* If we are limited to B modulations, bail at this point */
5266 if (ieee_mode == IEEE_B)
5267 return 0;
5269 /* G */
5270 switch (rate) {
5271 case IEEE80211_OFDM_RATE_6MB:
5272 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5273 case IEEE80211_OFDM_RATE_9MB:
5274 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5275 case IEEE80211_OFDM_RATE_12MB:
5276 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5277 case IEEE80211_OFDM_RATE_18MB:
5278 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5279 case IEEE80211_OFDM_RATE_24MB:
5280 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5281 case IEEE80211_OFDM_RATE_36MB:
5282 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5283 case IEEE80211_OFDM_RATE_48MB:
5284 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5285 case IEEE80211_OFDM_RATE_54MB:
5286 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5289 return 0;
5292 static int ipw_compatible_rates(struct ipw_priv *priv,
5293 const struct ieee80211_network *network,
5294 struct ipw_supported_rates *rates)
5296 int num_rates, i;
5298 memset(rates, 0, sizeof(*rates));
5299 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5300 rates->num_rates = 0;
5301 for (i = 0; i < num_rates; i++) {
5302 if (!ipw_is_rate_in_mask(priv, network->mode,
5303 network->rates[i])) {
5305 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5306 IPW_DEBUG_SCAN("Adding masked mandatory "
5307 "rate %02X\n",
5308 network->rates[i]);
5309 rates->supported_rates[rates->num_rates++] =
5310 network->rates[i];
5311 continue;
5314 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5315 network->rates[i], priv->rates_mask);
5316 continue;
5319 rates->supported_rates[rates->num_rates++] = network->rates[i];
5322 num_rates = min(network->rates_ex_len,
5323 (u8) (IPW_MAX_RATES - num_rates));
5324 for (i = 0; i < num_rates; i++) {
5325 if (!ipw_is_rate_in_mask(priv, network->mode,
5326 network->rates_ex[i])) {
5327 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5328 IPW_DEBUG_SCAN("Adding masked mandatory "
5329 "rate %02X\n",
5330 network->rates_ex[i]);
5331 rates->supported_rates[rates->num_rates++] =
5332 network->rates[i];
5333 continue;
5336 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5337 network->rates_ex[i], priv->rates_mask);
5338 continue;
5341 rates->supported_rates[rates->num_rates++] =
5342 network->rates_ex[i];
5345 return 1;
5348 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5349 const struct ipw_supported_rates *src)
5351 u8 i;
5352 for (i = 0; i < src->num_rates; i++)
5353 dest->supported_rates[i] = src->supported_rates[i];
5354 dest->num_rates = src->num_rates;
5357 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5358 * mask should ever be used -- right now all callers to add the scan rates are
5359 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5360 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5361 u8 modulation, u32 rate_mask)
5363 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5364 IEEE80211_BASIC_RATE_MASK : 0;
5366 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5367 rates->supported_rates[rates->num_rates++] =
5368 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5370 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5371 rates->supported_rates[rates->num_rates++] =
5372 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5374 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5375 rates->supported_rates[rates->num_rates++] = basic_mask |
5376 IEEE80211_CCK_RATE_5MB;
5378 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5379 rates->supported_rates[rates->num_rates++] = basic_mask |
5380 IEEE80211_CCK_RATE_11MB;
5383 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5384 u8 modulation, u32 rate_mask)
5386 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5387 IEEE80211_BASIC_RATE_MASK : 0;
5389 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5390 rates->supported_rates[rates->num_rates++] = basic_mask |
5391 IEEE80211_OFDM_RATE_6MB;
5393 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5394 rates->supported_rates[rates->num_rates++] =
5395 IEEE80211_OFDM_RATE_9MB;
5397 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5398 rates->supported_rates[rates->num_rates++] = basic_mask |
5399 IEEE80211_OFDM_RATE_12MB;
5401 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5402 rates->supported_rates[rates->num_rates++] =
5403 IEEE80211_OFDM_RATE_18MB;
5405 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5406 rates->supported_rates[rates->num_rates++] = basic_mask |
5407 IEEE80211_OFDM_RATE_24MB;
5409 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5410 rates->supported_rates[rates->num_rates++] =
5411 IEEE80211_OFDM_RATE_36MB;
5413 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5414 rates->supported_rates[rates->num_rates++] =
5415 IEEE80211_OFDM_RATE_48MB;
5417 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5418 rates->supported_rates[rates->num_rates++] =
5419 IEEE80211_OFDM_RATE_54MB;
5422 struct ipw_network_match {
5423 struct ieee80211_network *network;
5424 struct ipw_supported_rates rates;
5427 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5428 struct ipw_network_match *match,
5429 struct ieee80211_network *network,
5430 int roaming)
5432 struct ipw_supported_rates rates;
5433 DECLARE_MAC_BUF(mac);
5434 DECLARE_MAC_BUF(mac2);
5436 /* Verify that this network's capability is compatible with the
5437 * current mode (AdHoc or Infrastructure) */
5438 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5439 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5440 IPW_DEBUG_MERGE("Network '%s (%s)' excluded due to "
5441 "capability mismatch.\n",
5442 escape_essid(network->ssid, network->ssid_len),
5443 print_mac(mac, network->bssid));
5444 return 0;
5447 /* If we do not have an ESSID for this AP, we can not associate with
5448 * it */
5449 if (network->flags & NETWORK_EMPTY_ESSID) {
5450 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5451 "because of hidden ESSID.\n",
5452 escape_essid(network->ssid, network->ssid_len),
5453 print_mac(mac, network->bssid));
5454 return 0;
5457 if (unlikely(roaming)) {
5458 /* If we are roaming, then ensure check if this is a valid
5459 * network to try and roam to */
5460 if ((network->ssid_len != match->network->ssid_len) ||
5461 memcmp(network->ssid, match->network->ssid,
5462 network->ssid_len)) {
5463 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5464 "because of non-network ESSID.\n",
5465 escape_essid(network->ssid,
5466 network->ssid_len),
5467 print_mac(mac, network->bssid));
5468 return 0;
5470 } else {
5471 /* If an ESSID has been configured then compare the broadcast
5472 * ESSID to ours */
5473 if ((priv->config & CFG_STATIC_ESSID) &&
5474 ((network->ssid_len != priv->essid_len) ||
5475 memcmp(network->ssid, priv->essid,
5476 min(network->ssid_len, priv->essid_len)))) {
5477 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5479 strncpy(escaped,
5480 escape_essid(network->ssid, network->ssid_len),
5481 sizeof(escaped));
5482 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5483 "because of ESSID mismatch: '%s'.\n",
5484 escaped, print_mac(mac, network->bssid),
5485 escape_essid(priv->essid,
5486 priv->essid_len));
5487 return 0;
5491 /* If the old network rate is better than this one, don't bother
5492 * testing everything else. */
5494 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5495 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5496 "current network.\n",
5497 escape_essid(match->network->ssid,
5498 match->network->ssid_len));
5499 return 0;
5500 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5501 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5502 "current network.\n",
5503 escape_essid(match->network->ssid,
5504 match->network->ssid_len));
5505 return 0;
5508 /* Now go through and see if the requested network is valid... */
5509 if (priv->ieee->scan_age != 0 &&
5510 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5511 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5512 "because of age: %ums.\n",
5513 escape_essid(network->ssid, network->ssid_len),
5514 print_mac(mac, network->bssid),
5515 jiffies_to_msecs(jiffies -
5516 network->last_scanned));
5517 return 0;
5520 if ((priv->config & CFG_STATIC_CHANNEL) &&
5521 (network->channel != priv->channel)) {
5522 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5523 "because of channel mismatch: %d != %d.\n",
5524 escape_essid(network->ssid, network->ssid_len),
5525 print_mac(mac, network->bssid),
5526 network->channel, priv->channel);
5527 return 0;
5530 /* Verify privacy compatability */
5531 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5532 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5533 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5534 "because of privacy mismatch: %s != %s.\n",
5535 escape_essid(network->ssid, network->ssid_len),
5536 print_mac(mac, network->bssid),
5537 priv->
5538 capability & CAP_PRIVACY_ON ? "on" : "off",
5539 network->
5540 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5541 "off");
5542 return 0;
5545 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5546 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5547 "because of the same BSSID match: %s"
5548 ".\n", escape_essid(network->ssid,
5549 network->ssid_len),
5550 print_mac(mac, network->bssid),
5551 print_mac(mac2, priv->bssid));
5552 return 0;
5555 /* Filter out any incompatible freq / mode combinations */
5556 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5557 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5558 "because of invalid frequency/mode "
5559 "combination.\n",
5560 escape_essid(network->ssid, network->ssid_len),
5561 print_mac(mac, network->bssid));
5562 return 0;
5565 /* Ensure that the rates supported by the driver are compatible with
5566 * this AP, including verification of basic rates (mandatory) */
5567 if (!ipw_compatible_rates(priv, network, &rates)) {
5568 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5569 "because configured rate mask excludes "
5570 "AP mandatory rate.\n",
5571 escape_essid(network->ssid, network->ssid_len),
5572 print_mac(mac, network->bssid));
5573 return 0;
5576 if (rates.num_rates == 0) {
5577 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5578 "because of no compatible rates.\n",
5579 escape_essid(network->ssid, network->ssid_len),
5580 print_mac(mac, network->bssid));
5581 return 0;
5584 /* TODO: Perform any further minimal comparititive tests. We do not
5585 * want to put too much policy logic here; intelligent scan selection
5586 * should occur within a generic IEEE 802.11 user space tool. */
5588 /* Set up 'new' AP to this network */
5589 ipw_copy_rates(&match->rates, &rates);
5590 match->network = network;
5591 IPW_DEBUG_MERGE("Network '%s (%s)' is a viable match.\n",
5592 escape_essid(network->ssid, network->ssid_len),
5593 print_mac(mac, network->bssid));
5595 return 1;
5598 static void ipw_merge_adhoc_network(struct work_struct *work)
5600 struct ipw_priv *priv =
5601 container_of(work, struct ipw_priv, merge_networks);
5602 struct ieee80211_network *network = NULL;
5603 struct ipw_network_match match = {
5604 .network = priv->assoc_network
5607 if ((priv->status & STATUS_ASSOCIATED) &&
5608 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5609 /* First pass through ROAM process -- look for a better
5610 * network */
5611 unsigned long flags;
5613 spin_lock_irqsave(&priv->ieee->lock, flags);
5614 list_for_each_entry(network, &priv->ieee->network_list, list) {
5615 if (network != priv->assoc_network)
5616 ipw_find_adhoc_network(priv, &match, network,
5619 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5621 if (match.network == priv->assoc_network) {
5622 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5623 "merge to.\n");
5624 return;
5627 mutex_lock(&priv->mutex);
5628 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5629 IPW_DEBUG_MERGE("remove network %s\n",
5630 escape_essid(priv->essid,
5631 priv->essid_len));
5632 ipw_remove_current_network(priv);
5635 ipw_disassociate(priv);
5636 priv->assoc_network = match.network;
5637 mutex_unlock(&priv->mutex);
5638 return;
5642 static int ipw_best_network(struct ipw_priv *priv,
5643 struct ipw_network_match *match,
5644 struct ieee80211_network *network, int roaming)
5646 struct ipw_supported_rates rates;
5647 DECLARE_MAC_BUF(mac);
5649 /* Verify that this network's capability is compatible with the
5650 * current mode (AdHoc or Infrastructure) */
5651 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5652 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5653 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5654 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5655 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded due to "
5656 "capability mismatch.\n",
5657 escape_essid(network->ssid, network->ssid_len),
5658 print_mac(mac, network->bssid));
5659 return 0;
5662 /* If we do not have an ESSID for this AP, we can not associate with
5663 * it */
5664 if (network->flags & NETWORK_EMPTY_ESSID) {
5665 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5666 "because of hidden ESSID.\n",
5667 escape_essid(network->ssid, network->ssid_len),
5668 print_mac(mac, network->bssid));
5669 return 0;
5672 if (unlikely(roaming)) {
5673 /* If we are roaming, then ensure check if this is a valid
5674 * network to try and roam to */
5675 if ((network->ssid_len != match->network->ssid_len) ||
5676 memcmp(network->ssid, match->network->ssid,
5677 network->ssid_len)) {
5678 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5679 "because of non-network ESSID.\n",
5680 escape_essid(network->ssid,
5681 network->ssid_len),
5682 print_mac(mac, network->bssid));
5683 return 0;
5685 } else {
5686 /* If an ESSID has been configured then compare the broadcast
5687 * ESSID to ours */
5688 if ((priv->config & CFG_STATIC_ESSID) &&
5689 ((network->ssid_len != priv->essid_len) ||
5690 memcmp(network->ssid, priv->essid,
5691 min(network->ssid_len, priv->essid_len)))) {
5692 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5693 strncpy(escaped,
5694 escape_essid(network->ssid, network->ssid_len),
5695 sizeof(escaped));
5696 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5697 "because of ESSID mismatch: '%s'.\n",
5698 escaped, print_mac(mac, network->bssid),
5699 escape_essid(priv->essid,
5700 priv->essid_len));
5701 return 0;
5705 /* If the old network rate is better than this one, don't bother
5706 * testing everything else. */
5707 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5708 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5709 strncpy(escaped,
5710 escape_essid(network->ssid, network->ssid_len),
5711 sizeof(escaped));
5712 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded because "
5713 "'%s (%s)' has a stronger signal.\n",
5714 escaped, print_mac(mac, network->bssid),
5715 escape_essid(match->network->ssid,
5716 match->network->ssid_len),
5717 print_mac(mac, match->network->bssid));
5718 return 0;
5721 /* If this network has already had an association attempt within the
5722 * last 3 seconds, do not try and associate again... */
5723 if (network->last_associate &&
5724 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5725 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5726 "because of storming (%ums since last "
5727 "assoc attempt).\n",
5728 escape_essid(network->ssid, network->ssid_len),
5729 print_mac(mac, network->bssid),
5730 jiffies_to_msecs(jiffies -
5731 network->last_associate));
5732 return 0;
5735 /* Now go through and see if the requested network is valid... */
5736 if (priv->ieee->scan_age != 0 &&
5737 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5738 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5739 "because of age: %ums.\n",
5740 escape_essid(network->ssid, network->ssid_len),
5741 print_mac(mac, network->bssid),
5742 jiffies_to_msecs(jiffies -
5743 network->last_scanned));
5744 return 0;
5747 if ((priv->config & CFG_STATIC_CHANNEL) &&
5748 (network->channel != priv->channel)) {
5749 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5750 "because of channel mismatch: %d != %d.\n",
5751 escape_essid(network->ssid, network->ssid_len),
5752 print_mac(mac, network->bssid),
5753 network->channel, priv->channel);
5754 return 0;
5757 /* Verify privacy compatability */
5758 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5759 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5760 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5761 "because of privacy mismatch: %s != %s.\n",
5762 escape_essid(network->ssid, network->ssid_len),
5763 print_mac(mac, network->bssid),
5764 priv->capability & CAP_PRIVACY_ON ? "on" :
5765 "off",
5766 network->capability &
5767 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5768 return 0;
5771 if ((priv->config & CFG_STATIC_BSSID) &&
5772 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5773 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5774 "because of BSSID mismatch: %s.\n",
5775 escape_essid(network->ssid, network->ssid_len),
5776 print_mac(mac, network->bssid), print_mac(mac, priv->bssid));
5777 return 0;
5780 /* Filter out any incompatible freq / mode combinations */
5781 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5782 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5783 "because of invalid frequency/mode "
5784 "combination.\n",
5785 escape_essid(network->ssid, network->ssid_len),
5786 print_mac(mac, network->bssid));
5787 return 0;
5790 /* Filter out invalid channel in current GEO */
5791 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5792 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5793 "because of invalid channel in current GEO\n",
5794 escape_essid(network->ssid, network->ssid_len),
5795 print_mac(mac, network->bssid));
5796 return 0;
5799 /* Ensure that the rates supported by the driver are compatible with
5800 * this AP, including verification of basic rates (mandatory) */
5801 if (!ipw_compatible_rates(priv, network, &rates)) {
5802 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5803 "because configured rate mask excludes "
5804 "AP mandatory rate.\n",
5805 escape_essid(network->ssid, network->ssid_len),
5806 print_mac(mac, network->bssid));
5807 return 0;
5810 if (rates.num_rates == 0) {
5811 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5812 "because of no compatible rates.\n",
5813 escape_essid(network->ssid, network->ssid_len),
5814 print_mac(mac, network->bssid));
5815 return 0;
5818 /* TODO: Perform any further minimal comparititive tests. We do not
5819 * want to put too much policy logic here; intelligent scan selection
5820 * should occur within a generic IEEE 802.11 user space tool. */
5822 /* Set up 'new' AP to this network */
5823 ipw_copy_rates(&match->rates, &rates);
5824 match->network = network;
5826 IPW_DEBUG_ASSOC("Network '%s (%s)' is a viable match.\n",
5827 escape_essid(network->ssid, network->ssid_len),
5828 print_mac(mac, network->bssid));
5830 return 1;
5833 static void ipw_adhoc_create(struct ipw_priv *priv,
5834 struct ieee80211_network *network)
5836 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5837 int i;
5840 * For the purposes of scanning, we can set our wireless mode
5841 * to trigger scans across combinations of bands, but when it
5842 * comes to creating a new ad-hoc network, we have tell the FW
5843 * exactly which band to use.
5845 * We also have the possibility of an invalid channel for the
5846 * chossen band. Attempting to create a new ad-hoc network
5847 * with an invalid channel for wireless mode will trigger a
5848 * FW fatal error.
5851 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5852 case IEEE80211_52GHZ_BAND:
5853 network->mode = IEEE_A;
5854 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5855 BUG_ON(i == -1);
5856 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5857 IPW_WARNING("Overriding invalid channel\n");
5858 priv->channel = geo->a[0].channel;
5860 break;
5862 case IEEE80211_24GHZ_BAND:
5863 if (priv->ieee->mode & IEEE_G)
5864 network->mode = IEEE_G;
5865 else
5866 network->mode = IEEE_B;
5867 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5868 BUG_ON(i == -1);
5869 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5870 IPW_WARNING("Overriding invalid channel\n");
5871 priv->channel = geo->bg[0].channel;
5873 break;
5875 default:
5876 IPW_WARNING("Overriding invalid channel\n");
5877 if (priv->ieee->mode & IEEE_A) {
5878 network->mode = IEEE_A;
5879 priv->channel = geo->a[0].channel;
5880 } else if (priv->ieee->mode & IEEE_G) {
5881 network->mode = IEEE_G;
5882 priv->channel = geo->bg[0].channel;
5883 } else {
5884 network->mode = IEEE_B;
5885 priv->channel = geo->bg[0].channel;
5887 break;
5890 network->channel = priv->channel;
5891 priv->config |= CFG_ADHOC_PERSIST;
5892 ipw_create_bssid(priv, network->bssid);
5893 network->ssid_len = priv->essid_len;
5894 memcpy(network->ssid, priv->essid, priv->essid_len);
5895 memset(&network->stats, 0, sizeof(network->stats));
5896 network->capability = WLAN_CAPABILITY_IBSS;
5897 if (!(priv->config & CFG_PREAMBLE_LONG))
5898 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5899 if (priv->capability & CAP_PRIVACY_ON)
5900 network->capability |= WLAN_CAPABILITY_PRIVACY;
5901 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5902 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5903 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5904 memcpy(network->rates_ex,
5905 &priv->rates.supported_rates[network->rates_len],
5906 network->rates_ex_len);
5907 network->last_scanned = 0;
5908 network->flags = 0;
5909 network->last_associate = 0;
5910 network->time_stamp[0] = 0;
5911 network->time_stamp[1] = 0;
5912 network->beacon_interval = 100; /* Default */
5913 network->listen_interval = 10; /* Default */
5914 network->atim_window = 0; /* Default */
5915 network->wpa_ie_len = 0;
5916 network->rsn_ie_len = 0;
5919 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5921 struct ipw_tgi_tx_key key;
5923 if (!(priv->ieee->sec.flags & (1 << index)))
5924 return;
5926 key.key_id = index;
5927 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5928 key.security_type = type;
5929 key.station_index = 0; /* always 0 for BSS */
5930 key.flags = 0;
5931 /* 0 for new key; previous value of counter (after fatal error) */
5932 key.tx_counter[0] = cpu_to_le32(0);
5933 key.tx_counter[1] = cpu_to_le32(0);
5935 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5938 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5940 struct ipw_wep_key key;
5941 int i;
5943 key.cmd_id = DINO_CMD_WEP_KEY;
5944 key.seq_num = 0;
5946 /* Note: AES keys cannot be set for multiple times.
5947 * Only set it at the first time. */
5948 for (i = 0; i < 4; i++) {
5949 key.key_index = i | type;
5950 if (!(priv->ieee->sec.flags & (1 << i))) {
5951 key.key_size = 0;
5952 continue;
5955 key.key_size = priv->ieee->sec.key_sizes[i];
5956 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5958 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5962 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5964 if (priv->ieee->host_encrypt)
5965 return;
5967 switch (level) {
5968 case SEC_LEVEL_3:
5969 priv->sys_config.disable_unicast_decryption = 0;
5970 priv->ieee->host_decrypt = 0;
5971 break;
5972 case SEC_LEVEL_2:
5973 priv->sys_config.disable_unicast_decryption = 1;
5974 priv->ieee->host_decrypt = 1;
5975 break;
5976 case SEC_LEVEL_1:
5977 priv->sys_config.disable_unicast_decryption = 0;
5978 priv->ieee->host_decrypt = 0;
5979 break;
5980 case SEC_LEVEL_0:
5981 priv->sys_config.disable_unicast_decryption = 1;
5982 break;
5983 default:
5984 break;
5988 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5990 if (priv->ieee->host_encrypt)
5991 return;
5993 switch (level) {
5994 case SEC_LEVEL_3:
5995 priv->sys_config.disable_multicast_decryption = 0;
5996 break;
5997 case SEC_LEVEL_2:
5998 priv->sys_config.disable_multicast_decryption = 1;
5999 break;
6000 case SEC_LEVEL_1:
6001 priv->sys_config.disable_multicast_decryption = 0;
6002 break;
6003 case SEC_LEVEL_0:
6004 priv->sys_config.disable_multicast_decryption = 1;
6005 break;
6006 default:
6007 break;
6011 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6013 switch (priv->ieee->sec.level) {
6014 case SEC_LEVEL_3:
6015 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6016 ipw_send_tgi_tx_key(priv,
6017 DCT_FLAG_EXT_SECURITY_CCM,
6018 priv->ieee->sec.active_key);
6020 if (!priv->ieee->host_mc_decrypt)
6021 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6022 break;
6023 case SEC_LEVEL_2:
6024 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6025 ipw_send_tgi_tx_key(priv,
6026 DCT_FLAG_EXT_SECURITY_TKIP,
6027 priv->ieee->sec.active_key);
6028 break;
6029 case SEC_LEVEL_1:
6030 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6031 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6032 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6033 break;
6034 case SEC_LEVEL_0:
6035 default:
6036 break;
6040 static void ipw_adhoc_check(void *data)
6042 struct ipw_priv *priv = data;
6044 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6045 !(priv->config & CFG_ADHOC_PERSIST)) {
6046 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6047 IPW_DL_STATE | IPW_DL_ASSOC,
6048 "Missed beacon: %d - disassociate\n",
6049 priv->missed_adhoc_beacons);
6050 ipw_remove_current_network(priv);
6051 ipw_disassociate(priv);
6052 return;
6055 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
6056 le16_to_cpu(priv->assoc_request.beacon_interval));
6059 static void ipw_bg_adhoc_check(struct work_struct *work)
6061 struct ipw_priv *priv =
6062 container_of(work, struct ipw_priv, adhoc_check.work);
6063 mutex_lock(&priv->mutex);
6064 ipw_adhoc_check(priv);
6065 mutex_unlock(&priv->mutex);
6068 static void ipw_debug_config(struct ipw_priv *priv)
6070 DECLARE_MAC_BUF(mac);
6071 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6072 "[CFG 0x%08X]\n", priv->config);
6073 if (priv->config & CFG_STATIC_CHANNEL)
6074 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6075 else
6076 IPW_DEBUG_INFO("Channel unlocked.\n");
6077 if (priv->config & CFG_STATIC_ESSID)
6078 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6079 escape_essid(priv->essid, priv->essid_len));
6080 else
6081 IPW_DEBUG_INFO("ESSID unlocked.\n");
6082 if (priv->config & CFG_STATIC_BSSID)
6083 IPW_DEBUG_INFO("BSSID locked to %s\n",
6084 print_mac(mac, priv->bssid));
6085 else
6086 IPW_DEBUG_INFO("BSSID unlocked.\n");
6087 if (priv->capability & CAP_PRIVACY_ON)
6088 IPW_DEBUG_INFO("PRIVACY on\n");
6089 else
6090 IPW_DEBUG_INFO("PRIVACY off\n");
6091 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6094 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6096 /* TODO: Verify that this works... */
6097 struct ipw_fixed_rate fr = {
6098 .tx_rates = priv->rates_mask
6100 u32 reg;
6101 u16 mask = 0;
6103 /* Identify 'current FW band' and match it with the fixed
6104 * Tx rates */
6106 switch (priv->ieee->freq_band) {
6107 case IEEE80211_52GHZ_BAND: /* A only */
6108 /* IEEE_A */
6109 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6110 /* Invalid fixed rate mask */
6111 IPW_DEBUG_WX
6112 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6113 fr.tx_rates = 0;
6114 break;
6117 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6118 break;
6120 default: /* 2.4Ghz or Mixed */
6121 /* IEEE_B */
6122 if (mode == IEEE_B) {
6123 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6124 /* Invalid fixed rate mask */
6125 IPW_DEBUG_WX
6126 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6127 fr.tx_rates = 0;
6129 break;
6132 /* IEEE_G */
6133 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6134 IEEE80211_OFDM_RATES_MASK)) {
6135 /* Invalid fixed rate mask */
6136 IPW_DEBUG_WX
6137 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6138 fr.tx_rates = 0;
6139 break;
6142 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6143 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6144 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6147 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6148 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6149 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6152 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6153 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6154 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6157 fr.tx_rates |= mask;
6158 break;
6161 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6162 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6165 static void ipw_abort_scan(struct ipw_priv *priv)
6167 int err;
6169 if (priv->status & STATUS_SCAN_ABORTING) {
6170 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6171 return;
6173 priv->status |= STATUS_SCAN_ABORTING;
6175 err = ipw_send_scan_abort(priv);
6176 if (err)
6177 IPW_DEBUG_HC("Request to abort scan failed.\n");
6180 static void ipw_add_scan_channels(struct ipw_priv *priv,
6181 struct ipw_scan_request_ext *scan,
6182 int scan_type)
6184 int channel_index = 0;
6185 const struct ieee80211_geo *geo;
6186 int i;
6188 geo = ieee80211_get_geo(priv->ieee);
6190 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6191 int start = channel_index;
6192 for (i = 0; i < geo->a_channels; i++) {
6193 if ((priv->status & STATUS_ASSOCIATED) &&
6194 geo->a[i].channel == priv->channel)
6195 continue;
6196 channel_index++;
6197 scan->channels_list[channel_index] = geo->a[i].channel;
6198 ipw_set_scan_type(scan, channel_index,
6199 geo->a[i].
6200 flags & IEEE80211_CH_PASSIVE_ONLY ?
6201 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6202 scan_type);
6205 if (start != channel_index) {
6206 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6207 (channel_index - start);
6208 channel_index++;
6212 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6213 int start = channel_index;
6214 if (priv->config & CFG_SPEED_SCAN) {
6215 int index;
6216 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6217 /* nop out the list */
6218 [0] = 0
6221 u8 channel;
6222 while (channel_index < IPW_SCAN_CHANNELS) {
6223 channel =
6224 priv->speed_scan[priv->speed_scan_pos];
6225 if (channel == 0) {
6226 priv->speed_scan_pos = 0;
6227 channel = priv->speed_scan[0];
6229 if ((priv->status & STATUS_ASSOCIATED) &&
6230 channel == priv->channel) {
6231 priv->speed_scan_pos++;
6232 continue;
6235 /* If this channel has already been
6236 * added in scan, break from loop
6237 * and this will be the first channel
6238 * in the next scan.
6240 if (channels[channel - 1] != 0)
6241 break;
6243 channels[channel - 1] = 1;
6244 priv->speed_scan_pos++;
6245 channel_index++;
6246 scan->channels_list[channel_index] = channel;
6247 index =
6248 ieee80211_channel_to_index(priv->ieee, channel);
6249 ipw_set_scan_type(scan, channel_index,
6250 geo->bg[index].
6251 flags &
6252 IEEE80211_CH_PASSIVE_ONLY ?
6253 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6254 : scan_type);
6256 } else {
6257 for (i = 0; i < geo->bg_channels; i++) {
6258 if ((priv->status & STATUS_ASSOCIATED) &&
6259 geo->bg[i].channel == priv->channel)
6260 continue;
6261 channel_index++;
6262 scan->channels_list[channel_index] =
6263 geo->bg[i].channel;
6264 ipw_set_scan_type(scan, channel_index,
6265 geo->bg[i].
6266 flags &
6267 IEEE80211_CH_PASSIVE_ONLY ?
6268 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6269 : scan_type);
6273 if (start != channel_index) {
6274 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6275 (channel_index - start);
6280 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6282 struct ipw_scan_request_ext scan;
6283 int err = 0, scan_type;
6285 if (!(priv->status & STATUS_INIT) ||
6286 (priv->status & STATUS_EXIT_PENDING))
6287 return 0;
6289 mutex_lock(&priv->mutex);
6291 if (direct && (priv->direct_scan_ssid_len == 0)) {
6292 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6293 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6294 goto done;
6297 if (priv->status & STATUS_SCANNING) {
6298 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6299 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6300 STATUS_SCAN_PENDING;
6301 goto done;
6304 if (!(priv->status & STATUS_SCAN_FORCED) &&
6305 priv->status & STATUS_SCAN_ABORTING) {
6306 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6307 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6308 STATUS_SCAN_PENDING;
6309 goto done;
6312 if (priv->status & STATUS_RF_KILL_MASK) {
6313 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6314 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6315 STATUS_SCAN_PENDING;
6316 goto done;
6319 memset(&scan, 0, sizeof(scan));
6320 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6322 if (type == IW_SCAN_TYPE_PASSIVE) {
6323 IPW_DEBUG_WX("use passive scanning\n");
6324 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6325 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6326 cpu_to_le16(120);
6327 ipw_add_scan_channels(priv, &scan, scan_type);
6328 goto send_request;
6331 /* Use active scan by default. */
6332 if (priv->config & CFG_SPEED_SCAN)
6333 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6334 cpu_to_le16(30);
6335 else
6336 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6337 cpu_to_le16(20);
6339 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6340 cpu_to_le16(20);
6342 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6343 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6345 #ifdef CONFIG_IPW2200_MONITOR
6346 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6347 u8 channel;
6348 u8 band = 0;
6350 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6351 case IEEE80211_52GHZ_BAND:
6352 band = (u8) (IPW_A_MODE << 6) | 1;
6353 channel = priv->channel;
6354 break;
6356 case IEEE80211_24GHZ_BAND:
6357 band = (u8) (IPW_B_MODE << 6) | 1;
6358 channel = priv->channel;
6359 break;
6361 default:
6362 band = (u8) (IPW_B_MODE << 6) | 1;
6363 channel = 9;
6364 break;
6367 scan.channels_list[0] = band;
6368 scan.channels_list[1] = channel;
6369 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6371 /* NOTE: The card will sit on this channel for this time
6372 * period. Scan aborts are timing sensitive and frequently
6373 * result in firmware restarts. As such, it is best to
6374 * set a small dwell_time here and just keep re-issuing
6375 * scans. Otherwise fast channel hopping will not actually
6376 * hop channels.
6378 * TODO: Move SPEED SCAN support to all modes and bands */
6379 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6380 cpu_to_le16(2000);
6381 } else {
6382 #endif /* CONFIG_IPW2200_MONITOR */
6383 /* Honor direct scans first, otherwise if we are roaming make
6384 * this a direct scan for the current network. Finally,
6385 * ensure that every other scan is a fast channel hop scan */
6386 if (direct) {
6387 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6388 priv->direct_scan_ssid_len);
6389 if (err) {
6390 IPW_DEBUG_HC("Attempt to send SSID command "
6391 "failed\n");
6392 goto done;
6395 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6396 } else if ((priv->status & STATUS_ROAMING)
6397 || (!(priv->status & STATUS_ASSOCIATED)
6398 && (priv->config & CFG_STATIC_ESSID)
6399 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6400 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6401 if (err) {
6402 IPW_DEBUG_HC("Attempt to send SSID command "
6403 "failed.\n");
6404 goto done;
6407 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6408 } else
6409 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6411 ipw_add_scan_channels(priv, &scan, scan_type);
6412 #ifdef CONFIG_IPW2200_MONITOR
6414 #endif
6416 send_request:
6417 err = ipw_send_scan_request_ext(priv, &scan);
6418 if (err) {
6419 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6420 goto done;
6423 priv->status |= STATUS_SCANNING;
6424 if (direct) {
6425 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6426 priv->direct_scan_ssid_len = 0;
6427 } else
6428 priv->status &= ~STATUS_SCAN_PENDING;
6430 queue_delayed_work(priv->workqueue, &priv->scan_check,
6431 IPW_SCAN_CHECK_WATCHDOG);
6432 done:
6433 mutex_unlock(&priv->mutex);
6434 return err;
6437 static void ipw_request_passive_scan(struct work_struct *work)
6439 struct ipw_priv *priv =
6440 container_of(work, struct ipw_priv, request_passive_scan.work);
6441 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6444 static void ipw_request_scan(struct work_struct *work)
6446 struct ipw_priv *priv =
6447 container_of(work, struct ipw_priv, request_scan.work);
6448 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6451 static void ipw_request_direct_scan(struct work_struct *work)
6453 struct ipw_priv *priv =
6454 container_of(work, struct ipw_priv, request_direct_scan.work);
6455 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6458 static void ipw_bg_abort_scan(struct work_struct *work)
6460 struct ipw_priv *priv =
6461 container_of(work, struct ipw_priv, abort_scan);
6462 mutex_lock(&priv->mutex);
6463 ipw_abort_scan(priv);
6464 mutex_unlock(&priv->mutex);
6467 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6469 /* This is called when wpa_supplicant loads and closes the driver
6470 * interface. */
6471 priv->ieee->wpa_enabled = value;
6472 return 0;
6475 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6477 struct ieee80211_device *ieee = priv->ieee;
6478 struct ieee80211_security sec = {
6479 .flags = SEC_AUTH_MODE,
6481 int ret = 0;
6483 if (value & IW_AUTH_ALG_SHARED_KEY) {
6484 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6485 ieee->open_wep = 0;
6486 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6487 sec.auth_mode = WLAN_AUTH_OPEN;
6488 ieee->open_wep = 1;
6489 } else if (value & IW_AUTH_ALG_LEAP) {
6490 sec.auth_mode = WLAN_AUTH_LEAP;
6491 ieee->open_wep = 1;
6492 } else
6493 return -EINVAL;
6495 if (ieee->set_security)
6496 ieee->set_security(ieee->dev, &sec);
6497 else
6498 ret = -EOPNOTSUPP;
6500 return ret;
6503 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6504 int wpa_ie_len)
6506 /* make sure WPA is enabled */
6507 ipw_wpa_enable(priv, 1);
6510 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6511 char *capabilities, int length)
6513 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6515 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6516 capabilities);
6520 * WE-18 support
6523 /* SIOCSIWGENIE */
6524 static int ipw_wx_set_genie(struct net_device *dev,
6525 struct iw_request_info *info,
6526 union iwreq_data *wrqu, char *extra)
6528 struct ipw_priv *priv = ieee80211_priv(dev);
6529 struct ieee80211_device *ieee = priv->ieee;
6530 u8 *buf;
6531 int err = 0;
6533 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6534 (wrqu->data.length && extra == NULL))
6535 return -EINVAL;
6537 if (wrqu->data.length) {
6538 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6539 if (buf == NULL) {
6540 err = -ENOMEM;
6541 goto out;
6544 memcpy(buf, extra, wrqu->data.length);
6545 kfree(ieee->wpa_ie);
6546 ieee->wpa_ie = buf;
6547 ieee->wpa_ie_len = wrqu->data.length;
6548 } else {
6549 kfree(ieee->wpa_ie);
6550 ieee->wpa_ie = NULL;
6551 ieee->wpa_ie_len = 0;
6554 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6555 out:
6556 return err;
6559 /* SIOCGIWGENIE */
6560 static int ipw_wx_get_genie(struct net_device *dev,
6561 struct iw_request_info *info,
6562 union iwreq_data *wrqu, char *extra)
6564 struct ipw_priv *priv = ieee80211_priv(dev);
6565 struct ieee80211_device *ieee = priv->ieee;
6566 int err = 0;
6568 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6569 wrqu->data.length = 0;
6570 goto out;
6573 if (wrqu->data.length < ieee->wpa_ie_len) {
6574 err = -E2BIG;
6575 goto out;
6578 wrqu->data.length = ieee->wpa_ie_len;
6579 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6581 out:
6582 return err;
6585 static int wext_cipher2level(int cipher)
6587 switch (cipher) {
6588 case IW_AUTH_CIPHER_NONE:
6589 return SEC_LEVEL_0;
6590 case IW_AUTH_CIPHER_WEP40:
6591 case IW_AUTH_CIPHER_WEP104:
6592 return SEC_LEVEL_1;
6593 case IW_AUTH_CIPHER_TKIP:
6594 return SEC_LEVEL_2;
6595 case IW_AUTH_CIPHER_CCMP:
6596 return SEC_LEVEL_3;
6597 default:
6598 return -1;
6602 /* SIOCSIWAUTH */
6603 static int ipw_wx_set_auth(struct net_device *dev,
6604 struct iw_request_info *info,
6605 union iwreq_data *wrqu, char *extra)
6607 struct ipw_priv *priv = ieee80211_priv(dev);
6608 struct ieee80211_device *ieee = priv->ieee;
6609 struct iw_param *param = &wrqu->param;
6610 struct ieee80211_crypt_data *crypt;
6611 unsigned long flags;
6612 int ret = 0;
6614 switch (param->flags & IW_AUTH_INDEX) {
6615 case IW_AUTH_WPA_VERSION:
6616 break;
6617 case IW_AUTH_CIPHER_PAIRWISE:
6618 ipw_set_hw_decrypt_unicast(priv,
6619 wext_cipher2level(param->value));
6620 break;
6621 case IW_AUTH_CIPHER_GROUP:
6622 ipw_set_hw_decrypt_multicast(priv,
6623 wext_cipher2level(param->value));
6624 break;
6625 case IW_AUTH_KEY_MGMT:
6627 * ipw2200 does not use these parameters
6629 break;
6631 case IW_AUTH_TKIP_COUNTERMEASURES:
6632 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6633 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6634 break;
6636 flags = crypt->ops->get_flags(crypt->priv);
6638 if (param->value)
6639 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6640 else
6641 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6643 crypt->ops->set_flags(flags, crypt->priv);
6645 break;
6647 case IW_AUTH_DROP_UNENCRYPTED:{
6648 /* HACK:
6650 * wpa_supplicant calls set_wpa_enabled when the driver
6651 * is loaded and unloaded, regardless of if WPA is being
6652 * used. No other calls are made which can be used to
6653 * determine if encryption will be used or not prior to
6654 * association being expected. If encryption is not being
6655 * used, drop_unencrypted is set to false, else true -- we
6656 * can use this to determine if the CAP_PRIVACY_ON bit should
6657 * be set.
6659 struct ieee80211_security sec = {
6660 .flags = SEC_ENABLED,
6661 .enabled = param->value,
6663 priv->ieee->drop_unencrypted = param->value;
6664 /* We only change SEC_LEVEL for open mode. Others
6665 * are set by ipw_wpa_set_encryption.
6667 if (!param->value) {
6668 sec.flags |= SEC_LEVEL;
6669 sec.level = SEC_LEVEL_0;
6670 } else {
6671 sec.flags |= SEC_LEVEL;
6672 sec.level = SEC_LEVEL_1;
6674 if (priv->ieee->set_security)
6675 priv->ieee->set_security(priv->ieee->dev, &sec);
6676 break;
6679 case IW_AUTH_80211_AUTH_ALG:
6680 ret = ipw_wpa_set_auth_algs(priv, param->value);
6681 break;
6683 case IW_AUTH_WPA_ENABLED:
6684 ret = ipw_wpa_enable(priv, param->value);
6685 ipw_disassociate(priv);
6686 break;
6688 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6689 ieee->ieee802_1x = param->value;
6690 break;
6692 case IW_AUTH_PRIVACY_INVOKED:
6693 ieee->privacy_invoked = param->value;
6694 break;
6696 default:
6697 return -EOPNOTSUPP;
6699 return ret;
6702 /* SIOCGIWAUTH */
6703 static int ipw_wx_get_auth(struct net_device *dev,
6704 struct iw_request_info *info,
6705 union iwreq_data *wrqu, char *extra)
6707 struct ipw_priv *priv = ieee80211_priv(dev);
6708 struct ieee80211_device *ieee = priv->ieee;
6709 struct ieee80211_crypt_data *crypt;
6710 struct iw_param *param = &wrqu->param;
6711 int ret = 0;
6713 switch (param->flags & IW_AUTH_INDEX) {
6714 case IW_AUTH_WPA_VERSION:
6715 case IW_AUTH_CIPHER_PAIRWISE:
6716 case IW_AUTH_CIPHER_GROUP:
6717 case IW_AUTH_KEY_MGMT:
6719 * wpa_supplicant will control these internally
6721 ret = -EOPNOTSUPP;
6722 break;
6724 case IW_AUTH_TKIP_COUNTERMEASURES:
6725 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6726 if (!crypt || !crypt->ops->get_flags)
6727 break;
6729 param->value = (crypt->ops->get_flags(crypt->priv) &
6730 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6732 break;
6734 case IW_AUTH_DROP_UNENCRYPTED:
6735 param->value = ieee->drop_unencrypted;
6736 break;
6738 case IW_AUTH_80211_AUTH_ALG:
6739 param->value = ieee->sec.auth_mode;
6740 break;
6742 case IW_AUTH_WPA_ENABLED:
6743 param->value = ieee->wpa_enabled;
6744 break;
6746 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6747 param->value = ieee->ieee802_1x;
6748 break;
6750 case IW_AUTH_ROAMING_CONTROL:
6751 case IW_AUTH_PRIVACY_INVOKED:
6752 param->value = ieee->privacy_invoked;
6753 break;
6755 default:
6756 return -EOPNOTSUPP;
6758 return 0;
6761 /* SIOCSIWENCODEEXT */
6762 static int ipw_wx_set_encodeext(struct net_device *dev,
6763 struct iw_request_info *info,
6764 union iwreq_data *wrqu, char *extra)
6766 struct ipw_priv *priv = ieee80211_priv(dev);
6767 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6769 if (hwcrypto) {
6770 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6771 /* IPW HW can't build TKIP MIC,
6772 host decryption still needed */
6773 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6774 priv->ieee->host_mc_decrypt = 1;
6775 else {
6776 priv->ieee->host_encrypt = 0;
6777 priv->ieee->host_encrypt_msdu = 1;
6778 priv->ieee->host_decrypt = 1;
6780 } else {
6781 priv->ieee->host_encrypt = 0;
6782 priv->ieee->host_encrypt_msdu = 0;
6783 priv->ieee->host_decrypt = 0;
6784 priv->ieee->host_mc_decrypt = 0;
6788 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6791 /* SIOCGIWENCODEEXT */
6792 static int ipw_wx_get_encodeext(struct net_device *dev,
6793 struct iw_request_info *info,
6794 union iwreq_data *wrqu, char *extra)
6796 struct ipw_priv *priv = ieee80211_priv(dev);
6797 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6800 /* SIOCSIWMLME */
6801 static int ipw_wx_set_mlme(struct net_device *dev,
6802 struct iw_request_info *info,
6803 union iwreq_data *wrqu, char *extra)
6805 struct ipw_priv *priv = ieee80211_priv(dev);
6806 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6807 __le16 reason;
6809 reason = cpu_to_le16(mlme->reason_code);
6811 switch (mlme->cmd) {
6812 case IW_MLME_DEAUTH:
6813 /* silently ignore */
6814 break;
6816 case IW_MLME_DISASSOC:
6817 ipw_disassociate(priv);
6818 break;
6820 default:
6821 return -EOPNOTSUPP;
6823 return 0;
6826 #ifdef CONFIG_IPW2200_QOS
6828 /* QoS */
6830 * get the modulation type of the current network or
6831 * the card current mode
6833 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6835 u8 mode = 0;
6837 if (priv->status & STATUS_ASSOCIATED) {
6838 unsigned long flags;
6840 spin_lock_irqsave(&priv->ieee->lock, flags);
6841 mode = priv->assoc_network->mode;
6842 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6843 } else {
6844 mode = priv->ieee->mode;
6846 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6847 return mode;
6851 * Handle management frame beacon and probe response
6853 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6854 int active_network,
6855 struct ieee80211_network *network)
6857 u32 size = sizeof(struct ieee80211_qos_parameters);
6859 if (network->capability & WLAN_CAPABILITY_IBSS)
6860 network->qos_data.active = network->qos_data.supported;
6862 if (network->flags & NETWORK_HAS_QOS_MASK) {
6863 if (active_network &&
6864 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6865 network->qos_data.active = network->qos_data.supported;
6867 if ((network->qos_data.active == 1) && (active_network == 1) &&
6868 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6869 (network->qos_data.old_param_count !=
6870 network->qos_data.param_count)) {
6871 network->qos_data.old_param_count =
6872 network->qos_data.param_count;
6873 schedule_work(&priv->qos_activate);
6874 IPW_DEBUG_QOS("QoS parameters change call "
6875 "qos_activate\n");
6877 } else {
6878 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6879 memcpy(&network->qos_data.parameters,
6880 &def_parameters_CCK, size);
6881 else
6882 memcpy(&network->qos_data.parameters,
6883 &def_parameters_OFDM, size);
6885 if ((network->qos_data.active == 1) && (active_network == 1)) {
6886 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6887 schedule_work(&priv->qos_activate);
6890 network->qos_data.active = 0;
6891 network->qos_data.supported = 0;
6893 if ((priv->status & STATUS_ASSOCIATED) &&
6894 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6895 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6896 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6897 !(network->flags & NETWORK_EMPTY_ESSID))
6898 if ((network->ssid_len ==
6899 priv->assoc_network->ssid_len) &&
6900 !memcmp(network->ssid,
6901 priv->assoc_network->ssid,
6902 network->ssid_len)) {
6903 queue_work(priv->workqueue,
6904 &priv->merge_networks);
6908 return 0;
6912 * This function set up the firmware to support QoS. It sends
6913 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6915 static int ipw_qos_activate(struct ipw_priv *priv,
6916 struct ieee80211_qos_data *qos_network_data)
6918 int err;
6919 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6920 struct ieee80211_qos_parameters *active_one = NULL;
6921 u32 size = sizeof(struct ieee80211_qos_parameters);
6922 u32 burst_duration;
6923 int i;
6924 u8 type;
6926 type = ipw_qos_current_mode(priv);
6928 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6929 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6930 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6931 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6933 if (qos_network_data == NULL) {
6934 if (type == IEEE_B) {
6935 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6936 active_one = &def_parameters_CCK;
6937 } else
6938 active_one = &def_parameters_OFDM;
6940 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6941 burst_duration = ipw_qos_get_burst_duration(priv);
6942 for (i = 0; i < QOS_QUEUE_NUM; i++)
6943 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6944 cpu_to_le16(burst_duration);
6945 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6946 if (type == IEEE_B) {
6947 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6948 type);
6949 if (priv->qos_data.qos_enable == 0)
6950 active_one = &def_parameters_CCK;
6951 else
6952 active_one = priv->qos_data.def_qos_parm_CCK;
6953 } else {
6954 if (priv->qos_data.qos_enable == 0)
6955 active_one = &def_parameters_OFDM;
6956 else
6957 active_one = priv->qos_data.def_qos_parm_OFDM;
6959 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6960 } else {
6961 unsigned long flags;
6962 int active;
6964 spin_lock_irqsave(&priv->ieee->lock, flags);
6965 active_one = &(qos_network_data->parameters);
6966 qos_network_data->old_param_count =
6967 qos_network_data->param_count;
6968 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6969 active = qos_network_data->supported;
6970 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6972 if (active == 0) {
6973 burst_duration = ipw_qos_get_burst_duration(priv);
6974 for (i = 0; i < QOS_QUEUE_NUM; i++)
6975 qos_parameters[QOS_PARAM_SET_ACTIVE].
6976 tx_op_limit[i] = cpu_to_le16(burst_duration);
6980 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6981 err = ipw_send_qos_params_command(priv,
6982 (struct ieee80211_qos_parameters *)
6983 &(qos_parameters[0]));
6984 if (err)
6985 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6987 return err;
6991 * send IPW_CMD_WME_INFO to the firmware
6993 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6995 int ret = 0;
6996 struct ieee80211_qos_information_element qos_info;
6998 if (priv == NULL)
6999 return -1;
7001 qos_info.elementID = QOS_ELEMENT_ID;
7002 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
7004 qos_info.version = QOS_VERSION_1;
7005 qos_info.ac_info = 0;
7007 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7008 qos_info.qui_type = QOS_OUI_TYPE;
7009 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7011 ret = ipw_send_qos_info_command(priv, &qos_info);
7012 if (ret != 0) {
7013 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7015 return ret;
7019 * Set the QoS parameter with the association request structure
7021 static int ipw_qos_association(struct ipw_priv *priv,
7022 struct ieee80211_network *network)
7024 int err = 0;
7025 struct ieee80211_qos_data *qos_data = NULL;
7026 struct ieee80211_qos_data ibss_data = {
7027 .supported = 1,
7028 .active = 1,
7031 switch (priv->ieee->iw_mode) {
7032 case IW_MODE_ADHOC:
7033 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7035 qos_data = &ibss_data;
7036 break;
7038 case IW_MODE_INFRA:
7039 qos_data = &network->qos_data;
7040 break;
7042 default:
7043 BUG();
7044 break;
7047 err = ipw_qos_activate(priv, qos_data);
7048 if (err) {
7049 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7050 return err;
7053 if (priv->qos_data.qos_enable && qos_data->supported) {
7054 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7055 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7056 return ipw_qos_set_info_element(priv);
7059 return 0;
7063 * handling the beaconing responses. if we get different QoS setting
7064 * off the network from the associated setting, adjust the QoS
7065 * setting
7067 static int ipw_qos_association_resp(struct ipw_priv *priv,
7068 struct ieee80211_network *network)
7070 int ret = 0;
7071 unsigned long flags;
7072 u32 size = sizeof(struct ieee80211_qos_parameters);
7073 int set_qos_param = 0;
7075 if ((priv == NULL) || (network == NULL) ||
7076 (priv->assoc_network == NULL))
7077 return ret;
7079 if (!(priv->status & STATUS_ASSOCIATED))
7080 return ret;
7082 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7083 return ret;
7085 spin_lock_irqsave(&priv->ieee->lock, flags);
7086 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7087 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7088 sizeof(struct ieee80211_qos_data));
7089 priv->assoc_network->qos_data.active = 1;
7090 if ((network->qos_data.old_param_count !=
7091 network->qos_data.param_count)) {
7092 set_qos_param = 1;
7093 network->qos_data.old_param_count =
7094 network->qos_data.param_count;
7097 } else {
7098 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7099 memcpy(&priv->assoc_network->qos_data.parameters,
7100 &def_parameters_CCK, size);
7101 else
7102 memcpy(&priv->assoc_network->qos_data.parameters,
7103 &def_parameters_OFDM, size);
7104 priv->assoc_network->qos_data.active = 0;
7105 priv->assoc_network->qos_data.supported = 0;
7106 set_qos_param = 1;
7109 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7111 if (set_qos_param == 1)
7112 schedule_work(&priv->qos_activate);
7114 return ret;
7117 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7119 u32 ret = 0;
7121 if ((priv == NULL))
7122 return 0;
7124 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
7125 ret = priv->qos_data.burst_duration_CCK;
7126 else
7127 ret = priv->qos_data.burst_duration_OFDM;
7129 return ret;
7133 * Initialize the setting of QoS global
7135 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7136 int burst_enable, u32 burst_duration_CCK,
7137 u32 burst_duration_OFDM)
7139 priv->qos_data.qos_enable = enable;
7141 if (priv->qos_data.qos_enable) {
7142 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7143 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7144 IPW_DEBUG_QOS("QoS is enabled\n");
7145 } else {
7146 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7147 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7148 IPW_DEBUG_QOS("QoS is not enabled\n");
7151 priv->qos_data.burst_enable = burst_enable;
7153 if (burst_enable) {
7154 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7155 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7156 } else {
7157 priv->qos_data.burst_duration_CCK = 0;
7158 priv->qos_data.burst_duration_OFDM = 0;
7163 * map the packet priority to the right TX Queue
7165 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7167 if (priority > 7 || !priv->qos_data.qos_enable)
7168 priority = 0;
7170 return from_priority_to_tx_queue[priority] - 1;
7173 static int ipw_is_qos_active(struct net_device *dev,
7174 struct sk_buff *skb)
7176 struct ipw_priv *priv = ieee80211_priv(dev);
7177 struct ieee80211_qos_data *qos_data = NULL;
7178 int active, supported;
7179 u8 *daddr = skb->data + ETH_ALEN;
7180 int unicast = !is_multicast_ether_addr(daddr);
7182 if (!(priv->status & STATUS_ASSOCIATED))
7183 return 0;
7185 qos_data = &priv->assoc_network->qos_data;
7187 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7188 if (unicast == 0)
7189 qos_data->active = 0;
7190 else
7191 qos_data->active = qos_data->supported;
7193 active = qos_data->active;
7194 supported = qos_data->supported;
7195 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7196 "unicast %d\n",
7197 priv->qos_data.qos_enable, active, supported, unicast);
7198 if (active && priv->qos_data.qos_enable)
7199 return 1;
7201 return 0;
7205 * add QoS parameter to the TX command
7207 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7208 u16 priority,
7209 struct tfd_data *tfd)
7211 int tx_queue_id = 0;
7214 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7215 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7217 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7218 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7219 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7221 return 0;
7225 * background support to run QoS activate functionality
7227 static void ipw_bg_qos_activate(struct work_struct *work)
7229 struct ipw_priv *priv =
7230 container_of(work, struct ipw_priv, qos_activate);
7232 if (priv == NULL)
7233 return;
7235 mutex_lock(&priv->mutex);
7237 if (priv->status & STATUS_ASSOCIATED)
7238 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7240 mutex_unlock(&priv->mutex);
7243 static int ipw_handle_probe_response(struct net_device *dev,
7244 struct ieee80211_probe_response *resp,
7245 struct ieee80211_network *network)
7247 struct ipw_priv *priv = ieee80211_priv(dev);
7248 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7249 (network == priv->assoc_network));
7251 ipw_qos_handle_probe_response(priv, active_network, network);
7253 return 0;
7256 static int ipw_handle_beacon(struct net_device *dev,
7257 struct ieee80211_beacon *resp,
7258 struct ieee80211_network *network)
7260 struct ipw_priv *priv = ieee80211_priv(dev);
7261 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7262 (network == priv->assoc_network));
7264 ipw_qos_handle_probe_response(priv, active_network, network);
7266 return 0;
7269 static int ipw_handle_assoc_response(struct net_device *dev,
7270 struct ieee80211_assoc_response *resp,
7271 struct ieee80211_network *network)
7273 struct ipw_priv *priv = ieee80211_priv(dev);
7274 ipw_qos_association_resp(priv, network);
7275 return 0;
7278 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7279 *qos_param)
7281 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7282 sizeof(*qos_param) * 3, qos_param);
7285 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7286 *qos_param)
7288 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7289 qos_param);
7292 #endif /* CONFIG_IPW2200_QOS */
7294 static int ipw_associate_network(struct ipw_priv *priv,
7295 struct ieee80211_network *network,
7296 struct ipw_supported_rates *rates, int roaming)
7298 int err;
7299 DECLARE_MAC_BUF(mac);
7301 if (priv->config & CFG_FIXED_RATE)
7302 ipw_set_fixed_rate(priv, network->mode);
7304 if (!(priv->config & CFG_STATIC_ESSID)) {
7305 priv->essid_len = min(network->ssid_len,
7306 (u8) IW_ESSID_MAX_SIZE);
7307 memcpy(priv->essid, network->ssid, priv->essid_len);
7310 network->last_associate = jiffies;
7312 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7313 priv->assoc_request.channel = network->channel;
7314 priv->assoc_request.auth_key = 0;
7316 if ((priv->capability & CAP_PRIVACY_ON) &&
7317 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7318 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7319 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7321 if (priv->ieee->sec.level == SEC_LEVEL_1)
7322 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7324 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7325 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7326 priv->assoc_request.auth_type = AUTH_LEAP;
7327 else
7328 priv->assoc_request.auth_type = AUTH_OPEN;
7330 if (priv->ieee->wpa_ie_len) {
7331 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7332 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7333 priv->ieee->wpa_ie_len);
7337 * It is valid for our ieee device to support multiple modes, but
7338 * when it comes to associating to a given network we have to choose
7339 * just one mode.
7341 if (network->mode & priv->ieee->mode & IEEE_A)
7342 priv->assoc_request.ieee_mode = IPW_A_MODE;
7343 else if (network->mode & priv->ieee->mode & IEEE_G)
7344 priv->assoc_request.ieee_mode = IPW_G_MODE;
7345 else if (network->mode & priv->ieee->mode & IEEE_B)
7346 priv->assoc_request.ieee_mode = IPW_B_MODE;
7348 priv->assoc_request.capability = cpu_to_le16(network->capability);
7349 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7350 && !(priv->config & CFG_PREAMBLE_LONG)) {
7351 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7352 } else {
7353 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7355 /* Clear the short preamble if we won't be supporting it */
7356 priv->assoc_request.capability &=
7357 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7360 /* Clear capability bits that aren't used in Ad Hoc */
7361 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7362 priv->assoc_request.capability &=
7363 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7365 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7366 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7367 roaming ? "Rea" : "A",
7368 escape_essid(priv->essid, priv->essid_len),
7369 network->channel,
7370 ipw_modes[priv->assoc_request.ieee_mode],
7371 rates->num_rates,
7372 (priv->assoc_request.preamble_length ==
7373 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7374 network->capability &
7375 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7376 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7377 priv->capability & CAP_PRIVACY_ON ?
7378 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7379 "(open)") : "",
7380 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7381 priv->capability & CAP_PRIVACY_ON ?
7382 '1' + priv->ieee->sec.active_key : '.',
7383 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7385 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7386 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7387 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7388 priv->assoc_request.assoc_type = HC_IBSS_START;
7389 priv->assoc_request.assoc_tsf_msw = 0;
7390 priv->assoc_request.assoc_tsf_lsw = 0;
7391 } else {
7392 if (unlikely(roaming))
7393 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7394 else
7395 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7396 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7397 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7400 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7402 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7403 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7404 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7405 } else {
7406 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7407 priv->assoc_request.atim_window = 0;
7410 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7412 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7413 if (err) {
7414 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7415 return err;
7418 rates->ieee_mode = priv->assoc_request.ieee_mode;
7419 rates->purpose = IPW_RATE_CONNECT;
7420 ipw_send_supported_rates(priv, rates);
7422 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7423 priv->sys_config.dot11g_auto_detection = 1;
7424 else
7425 priv->sys_config.dot11g_auto_detection = 0;
7427 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7428 priv->sys_config.answer_broadcast_ssid_probe = 1;
7429 else
7430 priv->sys_config.answer_broadcast_ssid_probe = 0;
7432 err = ipw_send_system_config(priv);
7433 if (err) {
7434 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7435 return err;
7438 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7439 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7440 if (err) {
7441 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7442 return err;
7446 * If preemption is enabled, it is possible for the association
7447 * to complete before we return from ipw_send_associate. Therefore
7448 * we have to be sure and update our priviate data first.
7450 priv->channel = network->channel;
7451 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7452 priv->status |= STATUS_ASSOCIATING;
7453 priv->status &= ~STATUS_SECURITY_UPDATED;
7455 priv->assoc_network = network;
7457 #ifdef CONFIG_IPW2200_QOS
7458 ipw_qos_association(priv, network);
7459 #endif
7461 err = ipw_send_associate(priv, &priv->assoc_request);
7462 if (err) {
7463 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7464 return err;
7467 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %s \n",
7468 escape_essid(priv->essid, priv->essid_len),
7469 print_mac(mac, priv->bssid));
7471 return 0;
7474 static void ipw_roam(void *data)
7476 struct ipw_priv *priv = data;
7477 struct ieee80211_network *network = NULL;
7478 struct ipw_network_match match = {
7479 .network = priv->assoc_network
7482 /* The roaming process is as follows:
7484 * 1. Missed beacon threshold triggers the roaming process by
7485 * setting the status ROAM bit and requesting a scan.
7486 * 2. When the scan completes, it schedules the ROAM work
7487 * 3. The ROAM work looks at all of the known networks for one that
7488 * is a better network than the currently associated. If none
7489 * found, the ROAM process is over (ROAM bit cleared)
7490 * 4. If a better network is found, a disassociation request is
7491 * sent.
7492 * 5. When the disassociation completes, the roam work is again
7493 * scheduled. The second time through, the driver is no longer
7494 * associated, and the newly selected network is sent an
7495 * association request.
7496 * 6. At this point ,the roaming process is complete and the ROAM
7497 * status bit is cleared.
7500 /* If we are no longer associated, and the roaming bit is no longer
7501 * set, then we are not actively roaming, so just return */
7502 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7503 return;
7505 if (priv->status & STATUS_ASSOCIATED) {
7506 /* First pass through ROAM process -- look for a better
7507 * network */
7508 unsigned long flags;
7509 u8 rssi = priv->assoc_network->stats.rssi;
7510 priv->assoc_network->stats.rssi = -128;
7511 spin_lock_irqsave(&priv->ieee->lock, flags);
7512 list_for_each_entry(network, &priv->ieee->network_list, list) {
7513 if (network != priv->assoc_network)
7514 ipw_best_network(priv, &match, network, 1);
7516 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7517 priv->assoc_network->stats.rssi = rssi;
7519 if (match.network == priv->assoc_network) {
7520 IPW_DEBUG_ASSOC("No better APs in this network to "
7521 "roam to.\n");
7522 priv->status &= ~STATUS_ROAMING;
7523 ipw_debug_config(priv);
7524 return;
7527 ipw_send_disassociate(priv, 1);
7528 priv->assoc_network = match.network;
7530 return;
7533 /* Second pass through ROAM process -- request association */
7534 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7535 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7536 priv->status &= ~STATUS_ROAMING;
7539 static void ipw_bg_roam(struct work_struct *work)
7541 struct ipw_priv *priv =
7542 container_of(work, struct ipw_priv, roam);
7543 mutex_lock(&priv->mutex);
7544 ipw_roam(priv);
7545 mutex_unlock(&priv->mutex);
7548 static int ipw_associate(void *data)
7550 struct ipw_priv *priv = data;
7552 struct ieee80211_network *network = NULL;
7553 struct ipw_network_match match = {
7554 .network = NULL
7556 struct ipw_supported_rates *rates;
7557 struct list_head *element;
7558 unsigned long flags;
7560 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7561 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7562 return 0;
7565 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7566 IPW_DEBUG_ASSOC("Not attempting association (already in "
7567 "progress)\n");
7568 return 0;
7571 if (priv->status & STATUS_DISASSOCIATING) {
7572 IPW_DEBUG_ASSOC("Not attempting association (in "
7573 "disassociating)\n ");
7574 queue_work(priv->workqueue, &priv->associate);
7575 return 0;
7578 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7579 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7580 "initialized)\n");
7581 return 0;
7584 if (!(priv->config & CFG_ASSOCIATE) &&
7585 !(priv->config & (CFG_STATIC_ESSID |
7586 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7587 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7588 return 0;
7591 /* Protect our use of the network_list */
7592 spin_lock_irqsave(&priv->ieee->lock, flags);
7593 list_for_each_entry(network, &priv->ieee->network_list, list)
7594 ipw_best_network(priv, &match, network, 0);
7596 network = match.network;
7597 rates = &match.rates;
7599 if (network == NULL &&
7600 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7601 priv->config & CFG_ADHOC_CREATE &&
7602 priv->config & CFG_STATIC_ESSID &&
7603 priv->config & CFG_STATIC_CHANNEL) {
7604 /* Use oldest network if the free list is empty */
7605 if (list_empty(&priv->ieee->network_free_list)) {
7606 struct ieee80211_network *oldest = NULL;
7607 struct ieee80211_network *target;
7608 DECLARE_MAC_BUF(mac);
7610 list_for_each_entry(target, &priv->ieee->network_list, list) {
7611 if ((oldest == NULL) ||
7612 (target->last_scanned < oldest->last_scanned))
7613 oldest = target;
7616 /* If there are no more slots, expire the oldest */
7617 list_del(&oldest->list);
7618 target = oldest;
7619 IPW_DEBUG_ASSOC("Expired '%s' (%s) from "
7620 "network list.\n",
7621 escape_essid(target->ssid,
7622 target->ssid_len),
7623 print_mac(mac, target->bssid));
7624 list_add_tail(&target->list,
7625 &priv->ieee->network_free_list);
7628 element = priv->ieee->network_free_list.next;
7629 network = list_entry(element, struct ieee80211_network, list);
7630 ipw_adhoc_create(priv, network);
7631 rates = &priv->rates;
7632 list_del(element);
7633 list_add_tail(&network->list, &priv->ieee->network_list);
7635 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7637 /* If we reached the end of the list, then we don't have any valid
7638 * matching APs */
7639 if (!network) {
7640 ipw_debug_config(priv);
7642 if (!(priv->status & STATUS_SCANNING)) {
7643 if (!(priv->config & CFG_SPEED_SCAN))
7644 queue_delayed_work(priv->workqueue,
7645 &priv->request_scan,
7646 SCAN_INTERVAL);
7647 else
7648 queue_delayed_work(priv->workqueue,
7649 &priv->request_scan, 0);
7652 return 0;
7655 ipw_associate_network(priv, network, rates, 0);
7657 return 1;
7660 static void ipw_bg_associate(struct work_struct *work)
7662 struct ipw_priv *priv =
7663 container_of(work, struct ipw_priv, associate);
7664 mutex_lock(&priv->mutex);
7665 ipw_associate(priv);
7666 mutex_unlock(&priv->mutex);
7669 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7670 struct sk_buff *skb)
7672 struct ieee80211_hdr *hdr;
7673 u16 fc;
7675 hdr = (struct ieee80211_hdr *)skb->data;
7676 fc = le16_to_cpu(hdr->frame_ctl);
7677 if (!(fc & IEEE80211_FCTL_PROTECTED))
7678 return;
7680 fc &= ~IEEE80211_FCTL_PROTECTED;
7681 hdr->frame_ctl = cpu_to_le16(fc);
7682 switch (priv->ieee->sec.level) {
7683 case SEC_LEVEL_3:
7684 /* Remove CCMP HDR */
7685 memmove(skb->data + IEEE80211_3ADDR_LEN,
7686 skb->data + IEEE80211_3ADDR_LEN + 8,
7687 skb->len - IEEE80211_3ADDR_LEN - 8);
7688 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7689 break;
7690 case SEC_LEVEL_2:
7691 break;
7692 case SEC_LEVEL_1:
7693 /* Remove IV */
7694 memmove(skb->data + IEEE80211_3ADDR_LEN,
7695 skb->data + IEEE80211_3ADDR_LEN + 4,
7696 skb->len - IEEE80211_3ADDR_LEN - 4);
7697 skb_trim(skb, skb->len - 8); /* IV + ICV */
7698 break;
7699 case SEC_LEVEL_0:
7700 break;
7701 default:
7702 printk(KERN_ERR "Unknow security level %d\n",
7703 priv->ieee->sec.level);
7704 break;
7708 static void ipw_handle_data_packet(struct ipw_priv *priv,
7709 struct ipw_rx_mem_buffer *rxb,
7710 struct ieee80211_rx_stats *stats)
7712 struct ieee80211_hdr_4addr *hdr;
7713 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7715 /* We received data from the HW, so stop the watchdog */
7716 priv->net_dev->trans_start = jiffies;
7718 /* We only process data packets if the
7719 * interface is open */
7720 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7721 skb_tailroom(rxb->skb))) {
7722 priv->ieee->stats.rx_errors++;
7723 priv->wstats.discard.misc++;
7724 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7725 return;
7726 } else if (unlikely(!netif_running(priv->net_dev))) {
7727 priv->ieee->stats.rx_dropped++;
7728 priv->wstats.discard.misc++;
7729 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7730 return;
7733 /* Advance skb->data to the start of the actual payload */
7734 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7736 /* Set the size of the skb to the size of the frame */
7737 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7739 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7741 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7742 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7743 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7744 (is_multicast_ether_addr(hdr->addr1) ?
7745 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7746 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7748 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7749 priv->ieee->stats.rx_errors++;
7750 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7751 rxb->skb = NULL;
7752 __ipw_led_activity_on(priv);
7756 #ifdef CONFIG_IPW2200_RADIOTAP
7757 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7758 struct ipw_rx_mem_buffer *rxb,
7759 struct ieee80211_rx_stats *stats)
7761 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7762 struct ipw_rx_frame *frame = &pkt->u.frame;
7764 /* initial pull of some data */
7765 u16 received_channel = frame->received_channel;
7766 u8 antennaAndPhy = frame->antennaAndPhy;
7767 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7768 u16 pktrate = frame->rate;
7770 /* Magic struct that slots into the radiotap header -- no reason
7771 * to build this manually element by element, we can write it much
7772 * more efficiently than we can parse it. ORDER MATTERS HERE */
7773 struct ipw_rt_hdr *ipw_rt;
7775 short len = le16_to_cpu(pkt->u.frame.length);
7777 /* We received data from the HW, so stop the watchdog */
7778 priv->net_dev->trans_start = jiffies;
7780 /* We only process data packets if the
7781 * interface is open */
7782 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7783 skb_tailroom(rxb->skb))) {
7784 priv->ieee->stats.rx_errors++;
7785 priv->wstats.discard.misc++;
7786 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7787 return;
7788 } else if (unlikely(!netif_running(priv->net_dev))) {
7789 priv->ieee->stats.rx_dropped++;
7790 priv->wstats.discard.misc++;
7791 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7792 return;
7795 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7796 * that now */
7797 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7798 /* FIXME: Should alloc bigger skb instead */
7799 priv->ieee->stats.rx_dropped++;
7800 priv->wstats.discard.misc++;
7801 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7802 return;
7805 /* copy the frame itself */
7806 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7807 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7809 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7810 * part of our real header, saves a little time.
7812 * No longer necessary since we fill in all our data. Purge before merging
7813 * patch officially.
7814 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7815 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7818 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7820 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7821 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7822 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7824 /* Big bitfield of all the fields we provide in radiotap */
7825 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7826 (1 << IEEE80211_RADIOTAP_TSFT) |
7827 (1 << IEEE80211_RADIOTAP_FLAGS) |
7828 (1 << IEEE80211_RADIOTAP_RATE) |
7829 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7830 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7831 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7832 (1 << IEEE80211_RADIOTAP_ANTENNA));
7834 /* Zero the flags, we'll add to them as we go */
7835 ipw_rt->rt_flags = 0;
7836 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7837 frame->parent_tsf[2] << 16 |
7838 frame->parent_tsf[1] << 8 |
7839 frame->parent_tsf[0]);
7841 /* Convert signal to DBM */
7842 ipw_rt->rt_dbmsignal = antsignal;
7843 ipw_rt->rt_dbmnoise = frame->noise;
7845 /* Convert the channel data and set the flags */
7846 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7847 if (received_channel > 14) { /* 802.11a */
7848 ipw_rt->rt_chbitmask =
7849 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7850 } else if (antennaAndPhy & 32) { /* 802.11b */
7851 ipw_rt->rt_chbitmask =
7852 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7853 } else { /* 802.11g */
7854 ipw_rt->rt_chbitmask =
7855 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7858 /* set the rate in multiples of 500k/s */
7859 switch (pktrate) {
7860 case IPW_TX_RATE_1MB:
7861 ipw_rt->rt_rate = 2;
7862 break;
7863 case IPW_TX_RATE_2MB:
7864 ipw_rt->rt_rate = 4;
7865 break;
7866 case IPW_TX_RATE_5MB:
7867 ipw_rt->rt_rate = 10;
7868 break;
7869 case IPW_TX_RATE_6MB:
7870 ipw_rt->rt_rate = 12;
7871 break;
7872 case IPW_TX_RATE_9MB:
7873 ipw_rt->rt_rate = 18;
7874 break;
7875 case IPW_TX_RATE_11MB:
7876 ipw_rt->rt_rate = 22;
7877 break;
7878 case IPW_TX_RATE_12MB:
7879 ipw_rt->rt_rate = 24;
7880 break;
7881 case IPW_TX_RATE_18MB:
7882 ipw_rt->rt_rate = 36;
7883 break;
7884 case IPW_TX_RATE_24MB:
7885 ipw_rt->rt_rate = 48;
7886 break;
7887 case IPW_TX_RATE_36MB:
7888 ipw_rt->rt_rate = 72;
7889 break;
7890 case IPW_TX_RATE_48MB:
7891 ipw_rt->rt_rate = 96;
7892 break;
7893 case IPW_TX_RATE_54MB:
7894 ipw_rt->rt_rate = 108;
7895 break;
7896 default:
7897 ipw_rt->rt_rate = 0;
7898 break;
7901 /* antenna number */
7902 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7904 /* set the preamble flag if we have it */
7905 if ((antennaAndPhy & 64))
7906 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7908 /* Set the size of the skb to the size of the frame */
7909 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7911 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7913 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7914 priv->ieee->stats.rx_errors++;
7915 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7916 rxb->skb = NULL;
7917 /* no LED during capture */
7920 #endif
7922 #ifdef CONFIG_IPW2200_PROMISCUOUS
7923 #define ieee80211_is_probe_response(fc) \
7924 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7925 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7927 #define ieee80211_is_management(fc) \
7928 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7930 #define ieee80211_is_control(fc) \
7931 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7933 #define ieee80211_is_data(fc) \
7934 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7936 #define ieee80211_is_assoc_request(fc) \
7937 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7939 #define ieee80211_is_reassoc_request(fc) \
7940 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7942 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7943 struct ipw_rx_mem_buffer *rxb,
7944 struct ieee80211_rx_stats *stats)
7946 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7947 struct ipw_rx_frame *frame = &pkt->u.frame;
7948 struct ipw_rt_hdr *ipw_rt;
7950 /* First cache any information we need before we overwrite
7951 * the information provided in the skb from the hardware */
7952 struct ieee80211_hdr *hdr;
7953 u16 channel = frame->received_channel;
7954 u8 phy_flags = frame->antennaAndPhy;
7955 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7956 s8 noise = frame->noise;
7957 u8 rate = frame->rate;
7958 short len = le16_to_cpu(pkt->u.frame.length);
7959 struct sk_buff *skb;
7960 int hdr_only = 0;
7961 u16 filter = priv->prom_priv->filter;
7963 /* If the filter is set to not include Rx frames then return */
7964 if (filter & IPW_PROM_NO_RX)
7965 return;
7967 /* We received data from the HW, so stop the watchdog */
7968 priv->prom_net_dev->trans_start = jiffies;
7970 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7971 priv->prom_priv->ieee->stats.rx_errors++;
7972 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7973 return;
7976 /* We only process data packets if the interface is open */
7977 if (unlikely(!netif_running(priv->prom_net_dev))) {
7978 priv->prom_priv->ieee->stats.rx_dropped++;
7979 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7980 return;
7983 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7984 * that now */
7985 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7986 /* FIXME: Should alloc bigger skb instead */
7987 priv->prom_priv->ieee->stats.rx_dropped++;
7988 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7989 return;
7992 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7993 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
7994 if (filter & IPW_PROM_NO_MGMT)
7995 return;
7996 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7997 hdr_only = 1;
7998 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
7999 if (filter & IPW_PROM_NO_CTL)
8000 return;
8001 if (filter & IPW_PROM_CTL_HEADER_ONLY)
8002 hdr_only = 1;
8003 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
8004 if (filter & IPW_PROM_NO_DATA)
8005 return;
8006 if (filter & IPW_PROM_DATA_HEADER_ONLY)
8007 hdr_only = 1;
8010 /* Copy the SKB since this is for the promiscuous side */
8011 skb = skb_copy(rxb->skb, GFP_ATOMIC);
8012 if (skb == NULL) {
8013 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8014 return;
8017 /* copy the frame data to write after where the radiotap header goes */
8018 ipw_rt = (void *)skb->data;
8020 if (hdr_only)
8021 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
8023 memcpy(ipw_rt->payload, hdr, len);
8025 /* Zero the radiotap static buffer ... We only need to zero the bytes
8026 * NOT part of our real header, saves a little time.
8028 * No longer necessary since we fill in all our data. Purge before
8029 * merging patch officially.
8030 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
8031 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
8034 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8035 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8036 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
8038 /* Set the size of the skb to the size of the frame */
8039 skb_put(skb, sizeof(*ipw_rt) + len);
8041 /* Big bitfield of all the fields we provide in radiotap */
8042 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8043 (1 << IEEE80211_RADIOTAP_TSFT) |
8044 (1 << IEEE80211_RADIOTAP_FLAGS) |
8045 (1 << IEEE80211_RADIOTAP_RATE) |
8046 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8047 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8048 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8049 (1 << IEEE80211_RADIOTAP_ANTENNA));
8051 /* Zero the flags, we'll add to them as we go */
8052 ipw_rt->rt_flags = 0;
8053 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8054 frame->parent_tsf[2] << 16 |
8055 frame->parent_tsf[1] << 8 |
8056 frame->parent_tsf[0]);
8058 /* Convert to DBM */
8059 ipw_rt->rt_dbmsignal = signal;
8060 ipw_rt->rt_dbmnoise = noise;
8062 /* Convert the channel data and set the flags */
8063 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8064 if (channel > 14) { /* 802.11a */
8065 ipw_rt->rt_chbitmask =
8066 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8067 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8068 ipw_rt->rt_chbitmask =
8069 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8070 } else { /* 802.11g */
8071 ipw_rt->rt_chbitmask =
8072 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8075 /* set the rate in multiples of 500k/s */
8076 switch (rate) {
8077 case IPW_TX_RATE_1MB:
8078 ipw_rt->rt_rate = 2;
8079 break;
8080 case IPW_TX_RATE_2MB:
8081 ipw_rt->rt_rate = 4;
8082 break;
8083 case IPW_TX_RATE_5MB:
8084 ipw_rt->rt_rate = 10;
8085 break;
8086 case IPW_TX_RATE_6MB:
8087 ipw_rt->rt_rate = 12;
8088 break;
8089 case IPW_TX_RATE_9MB:
8090 ipw_rt->rt_rate = 18;
8091 break;
8092 case IPW_TX_RATE_11MB:
8093 ipw_rt->rt_rate = 22;
8094 break;
8095 case IPW_TX_RATE_12MB:
8096 ipw_rt->rt_rate = 24;
8097 break;
8098 case IPW_TX_RATE_18MB:
8099 ipw_rt->rt_rate = 36;
8100 break;
8101 case IPW_TX_RATE_24MB:
8102 ipw_rt->rt_rate = 48;
8103 break;
8104 case IPW_TX_RATE_36MB:
8105 ipw_rt->rt_rate = 72;
8106 break;
8107 case IPW_TX_RATE_48MB:
8108 ipw_rt->rt_rate = 96;
8109 break;
8110 case IPW_TX_RATE_54MB:
8111 ipw_rt->rt_rate = 108;
8112 break;
8113 default:
8114 ipw_rt->rt_rate = 0;
8115 break;
8118 /* antenna number */
8119 ipw_rt->rt_antenna = (phy_flags & 3);
8121 /* set the preamble flag if we have it */
8122 if (phy_flags & (1 << 6))
8123 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8125 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8127 if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
8128 priv->prom_priv->ieee->stats.rx_errors++;
8129 dev_kfree_skb_any(skb);
8132 #endif
8134 static int is_network_packet(struct ipw_priv *priv,
8135 struct ieee80211_hdr_4addr *header)
8137 /* Filter incoming packets to determine if they are targetted toward
8138 * this network, discarding packets coming from ourselves */
8139 switch (priv->ieee->iw_mode) {
8140 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8141 /* packets from our adapter are dropped (echo) */
8142 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8143 return 0;
8145 /* {broad,multi}cast packets to our BSSID go through */
8146 if (is_multicast_ether_addr(header->addr1))
8147 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8149 /* packets to our adapter go through */
8150 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8151 ETH_ALEN);
8153 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8154 /* packets from our adapter are dropped (echo) */
8155 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8156 return 0;
8158 /* {broad,multi}cast packets to our BSS go through */
8159 if (is_multicast_ether_addr(header->addr1))
8160 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8162 /* packets to our adapter go through */
8163 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8164 ETH_ALEN);
8167 return 1;
8170 #define IPW_PACKET_RETRY_TIME HZ
8172 static int is_duplicate_packet(struct ipw_priv *priv,
8173 struct ieee80211_hdr_4addr *header)
8175 u16 sc = le16_to_cpu(header->seq_ctl);
8176 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8177 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8178 u16 *last_seq, *last_frag;
8179 unsigned long *last_time;
8181 switch (priv->ieee->iw_mode) {
8182 case IW_MODE_ADHOC:
8184 struct list_head *p;
8185 struct ipw_ibss_seq *entry = NULL;
8186 u8 *mac = header->addr2;
8187 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8189 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8190 entry =
8191 list_entry(p, struct ipw_ibss_seq, list);
8192 if (!memcmp(entry->mac, mac, ETH_ALEN))
8193 break;
8195 if (p == &priv->ibss_mac_hash[index]) {
8196 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8197 if (!entry) {
8198 IPW_ERROR
8199 ("Cannot malloc new mac entry\n");
8200 return 0;
8202 memcpy(entry->mac, mac, ETH_ALEN);
8203 entry->seq_num = seq;
8204 entry->frag_num = frag;
8205 entry->packet_time = jiffies;
8206 list_add(&entry->list,
8207 &priv->ibss_mac_hash[index]);
8208 return 0;
8210 last_seq = &entry->seq_num;
8211 last_frag = &entry->frag_num;
8212 last_time = &entry->packet_time;
8213 break;
8215 case IW_MODE_INFRA:
8216 last_seq = &priv->last_seq_num;
8217 last_frag = &priv->last_frag_num;
8218 last_time = &priv->last_packet_time;
8219 break;
8220 default:
8221 return 0;
8223 if ((*last_seq == seq) &&
8224 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8225 if (*last_frag == frag)
8226 goto drop;
8227 if (*last_frag + 1 != frag)
8228 /* out-of-order fragment */
8229 goto drop;
8230 } else
8231 *last_seq = seq;
8233 *last_frag = frag;
8234 *last_time = jiffies;
8235 return 0;
8237 drop:
8238 /* Comment this line now since we observed the card receives
8239 * duplicate packets but the FCTL_RETRY bit is not set in the
8240 * IBSS mode with fragmentation enabled.
8241 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
8242 return 1;
8245 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8246 struct ipw_rx_mem_buffer *rxb,
8247 struct ieee80211_rx_stats *stats)
8249 struct sk_buff *skb = rxb->skb;
8250 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8251 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8252 (skb->data + IPW_RX_FRAME_SIZE);
8254 ieee80211_rx_mgt(priv->ieee, header, stats);
8256 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8257 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8258 IEEE80211_STYPE_PROBE_RESP) ||
8259 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8260 IEEE80211_STYPE_BEACON))) {
8261 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8262 ipw_add_station(priv, header->addr2);
8265 if (priv->config & CFG_NET_STATS) {
8266 IPW_DEBUG_HC("sending stat packet\n");
8268 /* Set the size of the skb to the size of the full
8269 * ipw header and 802.11 frame */
8270 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8271 IPW_RX_FRAME_SIZE);
8273 /* Advance past the ipw packet header to the 802.11 frame */
8274 skb_pull(skb, IPW_RX_FRAME_SIZE);
8276 /* Push the ieee80211_rx_stats before the 802.11 frame */
8277 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8279 skb->dev = priv->ieee->dev;
8281 /* Point raw at the ieee80211_stats */
8282 skb_reset_mac_header(skb);
8284 skb->pkt_type = PACKET_OTHERHOST;
8285 skb->protocol = __constant_htons(ETH_P_80211_STATS);
8286 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8287 netif_rx(skb);
8288 rxb->skb = NULL;
8293 * Main entry function for recieving a packet with 80211 headers. This
8294 * should be called when ever the FW has notified us that there is a new
8295 * skb in the recieve queue.
8297 static void ipw_rx(struct ipw_priv *priv)
8299 struct ipw_rx_mem_buffer *rxb;
8300 struct ipw_rx_packet *pkt;
8301 struct ieee80211_hdr_4addr *header;
8302 u32 r, w, i;
8303 u8 network_packet;
8304 u8 fill_rx = 0;
8305 DECLARE_MAC_BUF(mac);
8306 DECLARE_MAC_BUF(mac2);
8307 DECLARE_MAC_BUF(mac3);
8309 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8310 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8311 i = priv->rxq->read;
8313 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8314 fill_rx = 1;
8316 while (i != r) {
8317 rxb = priv->rxq->queue[i];
8318 if (unlikely(rxb == NULL)) {
8319 printk(KERN_CRIT "Queue not allocated!\n");
8320 break;
8322 priv->rxq->queue[i] = NULL;
8324 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8325 IPW_RX_BUF_SIZE,
8326 PCI_DMA_FROMDEVICE);
8328 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8329 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8330 pkt->header.message_type,
8331 pkt->header.rx_seq_num, pkt->header.control_bits);
8333 switch (pkt->header.message_type) {
8334 case RX_FRAME_TYPE: /* 802.11 frame */ {
8335 struct ieee80211_rx_stats stats = {
8336 .rssi = pkt->u.frame.rssi_dbm -
8337 IPW_RSSI_TO_DBM,
8338 .signal =
8339 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8340 IPW_RSSI_TO_DBM + 0x100,
8341 .noise =
8342 le16_to_cpu(pkt->u.frame.noise),
8343 .rate = pkt->u.frame.rate,
8344 .mac_time = jiffies,
8345 .received_channel =
8346 pkt->u.frame.received_channel,
8347 .freq =
8348 (pkt->u.frame.
8349 control & (1 << 0)) ?
8350 IEEE80211_24GHZ_BAND :
8351 IEEE80211_52GHZ_BAND,
8352 .len = le16_to_cpu(pkt->u.frame.length),
8355 if (stats.rssi != 0)
8356 stats.mask |= IEEE80211_STATMASK_RSSI;
8357 if (stats.signal != 0)
8358 stats.mask |= IEEE80211_STATMASK_SIGNAL;
8359 if (stats.noise != 0)
8360 stats.mask |= IEEE80211_STATMASK_NOISE;
8361 if (stats.rate != 0)
8362 stats.mask |= IEEE80211_STATMASK_RATE;
8364 priv->rx_packets++;
8366 #ifdef CONFIG_IPW2200_PROMISCUOUS
8367 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8368 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8369 #endif
8371 #ifdef CONFIG_IPW2200_MONITOR
8372 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8373 #ifdef CONFIG_IPW2200_RADIOTAP
8375 ipw_handle_data_packet_monitor(priv,
8376 rxb,
8377 &stats);
8378 #else
8379 ipw_handle_data_packet(priv, rxb,
8380 &stats);
8381 #endif
8382 break;
8384 #endif
8386 header =
8387 (struct ieee80211_hdr_4addr *)(rxb->skb->
8388 data +
8389 IPW_RX_FRAME_SIZE);
8390 /* TODO: Check Ad-Hoc dest/source and make sure
8391 * that we are actually parsing these packets
8392 * correctly -- we should probably use the
8393 * frame control of the packet and disregard
8394 * the current iw_mode */
8396 network_packet =
8397 is_network_packet(priv, header);
8398 if (network_packet && priv->assoc_network) {
8399 priv->assoc_network->stats.rssi =
8400 stats.rssi;
8401 priv->exp_avg_rssi =
8402 exponential_average(priv->exp_avg_rssi,
8403 stats.rssi, DEPTH_RSSI);
8406 IPW_DEBUG_RX("Frame: len=%u\n",
8407 le16_to_cpu(pkt->u.frame.length));
8409 if (le16_to_cpu(pkt->u.frame.length) <
8410 ieee80211_get_hdrlen(le16_to_cpu(
8411 header->frame_ctl))) {
8412 IPW_DEBUG_DROP
8413 ("Received packet is too small. "
8414 "Dropping.\n");
8415 priv->ieee->stats.rx_errors++;
8416 priv->wstats.discard.misc++;
8417 break;
8420 switch (WLAN_FC_GET_TYPE
8421 (le16_to_cpu(header->frame_ctl))) {
8423 case IEEE80211_FTYPE_MGMT:
8424 ipw_handle_mgmt_packet(priv, rxb,
8425 &stats);
8426 break;
8428 case IEEE80211_FTYPE_CTL:
8429 break;
8431 case IEEE80211_FTYPE_DATA:
8432 if (unlikely(!network_packet ||
8433 is_duplicate_packet(priv,
8434 header)))
8436 IPW_DEBUG_DROP("Dropping: "
8437 "%s, "
8438 "%s, "
8439 "%s\n",
8440 print_mac(mac,
8441 header->
8442 addr1),
8443 print_mac(mac2,
8444 header->
8445 addr2),
8446 print_mac(mac3,
8447 header->
8448 addr3));
8449 break;
8452 ipw_handle_data_packet(priv, rxb,
8453 &stats);
8455 break;
8457 break;
8460 case RX_HOST_NOTIFICATION_TYPE:{
8461 IPW_DEBUG_RX
8462 ("Notification: subtype=%02X flags=%02X size=%d\n",
8463 pkt->u.notification.subtype,
8464 pkt->u.notification.flags,
8465 le16_to_cpu(pkt->u.notification.size));
8466 ipw_rx_notification(priv, &pkt->u.notification);
8467 break;
8470 default:
8471 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8472 pkt->header.message_type);
8473 break;
8476 /* For now we just don't re-use anything. We can tweak this
8477 * later to try and re-use notification packets and SKBs that
8478 * fail to Rx correctly */
8479 if (rxb->skb != NULL) {
8480 dev_kfree_skb_any(rxb->skb);
8481 rxb->skb = NULL;
8484 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8485 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8486 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8488 i = (i + 1) % RX_QUEUE_SIZE;
8490 /* If there are a lot of unsued frames, restock the Rx queue
8491 * so the ucode won't assert */
8492 if (fill_rx) {
8493 priv->rxq->read = i;
8494 ipw_rx_queue_replenish(priv);
8498 /* Backtrack one entry */
8499 priv->rxq->read = i;
8500 ipw_rx_queue_restock(priv);
8503 #define DEFAULT_RTS_THRESHOLD 2304U
8504 #define MIN_RTS_THRESHOLD 1U
8505 #define MAX_RTS_THRESHOLD 2304U
8506 #define DEFAULT_BEACON_INTERVAL 100U
8507 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8508 #define DEFAULT_LONG_RETRY_LIMIT 4U
8511 * ipw_sw_reset
8512 * @option: options to control different reset behaviour
8513 * 0 = reset everything except the 'disable' module_param
8514 * 1 = reset everything and print out driver info (for probe only)
8515 * 2 = reset everything
8517 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8519 int band, modulation;
8520 int old_mode = priv->ieee->iw_mode;
8522 /* Initialize module parameter values here */
8523 priv->config = 0;
8525 /* We default to disabling the LED code as right now it causes
8526 * too many systems to lock up... */
8527 if (!led)
8528 priv->config |= CFG_NO_LED;
8530 if (associate)
8531 priv->config |= CFG_ASSOCIATE;
8532 else
8533 IPW_DEBUG_INFO("Auto associate disabled.\n");
8535 if (auto_create)
8536 priv->config |= CFG_ADHOC_CREATE;
8537 else
8538 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8540 priv->config &= ~CFG_STATIC_ESSID;
8541 priv->essid_len = 0;
8542 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8544 if (disable && option) {
8545 priv->status |= STATUS_RF_KILL_SW;
8546 IPW_DEBUG_INFO("Radio disabled.\n");
8549 if (channel != 0) {
8550 priv->config |= CFG_STATIC_CHANNEL;
8551 priv->channel = channel;
8552 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8553 /* TODO: Validate that provided channel is in range */
8555 #ifdef CONFIG_IPW2200_QOS
8556 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8557 burst_duration_CCK, burst_duration_OFDM);
8558 #endif /* CONFIG_IPW2200_QOS */
8560 switch (mode) {
8561 case 1:
8562 priv->ieee->iw_mode = IW_MODE_ADHOC;
8563 priv->net_dev->type = ARPHRD_ETHER;
8565 break;
8566 #ifdef CONFIG_IPW2200_MONITOR
8567 case 2:
8568 priv->ieee->iw_mode = IW_MODE_MONITOR;
8569 #ifdef CONFIG_IPW2200_RADIOTAP
8570 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8571 #else
8572 priv->net_dev->type = ARPHRD_IEEE80211;
8573 #endif
8574 break;
8575 #endif
8576 default:
8577 case 0:
8578 priv->net_dev->type = ARPHRD_ETHER;
8579 priv->ieee->iw_mode = IW_MODE_INFRA;
8580 break;
8583 if (hwcrypto) {
8584 priv->ieee->host_encrypt = 0;
8585 priv->ieee->host_encrypt_msdu = 0;
8586 priv->ieee->host_decrypt = 0;
8587 priv->ieee->host_mc_decrypt = 0;
8589 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8591 /* IPW2200/2915 is abled to do hardware fragmentation. */
8592 priv->ieee->host_open_frag = 0;
8594 if ((priv->pci_dev->device == 0x4223) ||
8595 (priv->pci_dev->device == 0x4224)) {
8596 if (option == 1)
8597 printk(KERN_INFO DRV_NAME
8598 ": Detected Intel PRO/Wireless 2915ABG Network "
8599 "Connection\n");
8600 priv->ieee->abg_true = 1;
8601 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8602 modulation = IEEE80211_OFDM_MODULATION |
8603 IEEE80211_CCK_MODULATION;
8604 priv->adapter = IPW_2915ABG;
8605 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8606 } else {
8607 if (option == 1)
8608 printk(KERN_INFO DRV_NAME
8609 ": Detected Intel PRO/Wireless 2200BG Network "
8610 "Connection\n");
8612 priv->ieee->abg_true = 0;
8613 band = IEEE80211_24GHZ_BAND;
8614 modulation = IEEE80211_OFDM_MODULATION |
8615 IEEE80211_CCK_MODULATION;
8616 priv->adapter = IPW_2200BG;
8617 priv->ieee->mode = IEEE_G | IEEE_B;
8620 priv->ieee->freq_band = band;
8621 priv->ieee->modulation = modulation;
8623 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8625 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8626 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8628 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8629 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8630 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8632 /* If power management is turned on, default to AC mode */
8633 priv->power_mode = IPW_POWER_AC;
8634 priv->tx_power = IPW_TX_POWER_DEFAULT;
8636 return old_mode == priv->ieee->iw_mode;
8640 * This file defines the Wireless Extension handlers. It does not
8641 * define any methods of hardware manipulation and relies on the
8642 * functions defined in ipw_main to provide the HW interaction.
8644 * The exception to this is the use of the ipw_get_ordinal()
8645 * function used to poll the hardware vs. making unecessary calls.
8649 static int ipw_wx_get_name(struct net_device *dev,
8650 struct iw_request_info *info,
8651 union iwreq_data *wrqu, char *extra)
8653 struct ipw_priv *priv = ieee80211_priv(dev);
8654 mutex_lock(&priv->mutex);
8655 if (priv->status & STATUS_RF_KILL_MASK)
8656 strcpy(wrqu->name, "radio off");
8657 else if (!(priv->status & STATUS_ASSOCIATED))
8658 strcpy(wrqu->name, "unassociated");
8659 else
8660 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8661 ipw_modes[priv->assoc_request.ieee_mode]);
8662 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8663 mutex_unlock(&priv->mutex);
8664 return 0;
8667 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8669 if (channel == 0) {
8670 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8671 priv->config &= ~CFG_STATIC_CHANNEL;
8672 IPW_DEBUG_ASSOC("Attempting to associate with new "
8673 "parameters.\n");
8674 ipw_associate(priv);
8675 return 0;
8678 priv->config |= CFG_STATIC_CHANNEL;
8680 if (priv->channel == channel) {
8681 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8682 channel);
8683 return 0;
8686 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8687 priv->channel = channel;
8689 #ifdef CONFIG_IPW2200_MONITOR
8690 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8691 int i;
8692 if (priv->status & STATUS_SCANNING) {
8693 IPW_DEBUG_SCAN("Scan abort triggered due to "
8694 "channel change.\n");
8695 ipw_abort_scan(priv);
8698 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8699 udelay(10);
8701 if (priv->status & STATUS_SCANNING)
8702 IPW_DEBUG_SCAN("Still scanning...\n");
8703 else
8704 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8705 1000 - i);
8707 return 0;
8709 #endif /* CONFIG_IPW2200_MONITOR */
8711 /* Network configuration changed -- force [re]association */
8712 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8713 if (!ipw_disassociate(priv))
8714 ipw_associate(priv);
8716 return 0;
8719 static int ipw_wx_set_freq(struct net_device *dev,
8720 struct iw_request_info *info,
8721 union iwreq_data *wrqu, char *extra)
8723 struct ipw_priv *priv = ieee80211_priv(dev);
8724 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8725 struct iw_freq *fwrq = &wrqu->freq;
8726 int ret = 0, i;
8727 u8 channel, flags;
8728 int band;
8730 if (fwrq->m == 0) {
8731 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8732 mutex_lock(&priv->mutex);
8733 ret = ipw_set_channel(priv, 0);
8734 mutex_unlock(&priv->mutex);
8735 return ret;
8737 /* if setting by freq convert to channel */
8738 if (fwrq->e == 1) {
8739 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8740 if (channel == 0)
8741 return -EINVAL;
8742 } else
8743 channel = fwrq->m;
8745 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8746 return -EINVAL;
8748 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8749 i = ieee80211_channel_to_index(priv->ieee, channel);
8750 if (i == -1)
8751 return -EINVAL;
8753 flags = (band == IEEE80211_24GHZ_BAND) ?
8754 geo->bg[i].flags : geo->a[i].flags;
8755 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8756 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8757 return -EINVAL;
8761 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8762 mutex_lock(&priv->mutex);
8763 ret = ipw_set_channel(priv, channel);
8764 mutex_unlock(&priv->mutex);
8765 return ret;
8768 static int ipw_wx_get_freq(struct net_device *dev,
8769 struct iw_request_info *info,
8770 union iwreq_data *wrqu, char *extra)
8772 struct ipw_priv *priv = ieee80211_priv(dev);
8774 wrqu->freq.e = 0;
8776 /* If we are associated, trying to associate, or have a statically
8777 * configured CHANNEL then return that; otherwise return ANY */
8778 mutex_lock(&priv->mutex);
8779 if (priv->config & CFG_STATIC_CHANNEL ||
8780 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8781 int i;
8783 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
8784 BUG_ON(i == -1);
8785 wrqu->freq.e = 1;
8787 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
8788 case IEEE80211_52GHZ_BAND:
8789 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8790 break;
8792 case IEEE80211_24GHZ_BAND:
8793 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8794 break;
8796 default:
8797 BUG();
8799 } else
8800 wrqu->freq.m = 0;
8802 mutex_unlock(&priv->mutex);
8803 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8804 return 0;
8807 static int ipw_wx_set_mode(struct net_device *dev,
8808 struct iw_request_info *info,
8809 union iwreq_data *wrqu, char *extra)
8811 struct ipw_priv *priv = ieee80211_priv(dev);
8812 int err = 0;
8814 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8816 switch (wrqu->mode) {
8817 #ifdef CONFIG_IPW2200_MONITOR
8818 case IW_MODE_MONITOR:
8819 #endif
8820 case IW_MODE_ADHOC:
8821 case IW_MODE_INFRA:
8822 break;
8823 case IW_MODE_AUTO:
8824 wrqu->mode = IW_MODE_INFRA;
8825 break;
8826 default:
8827 return -EINVAL;
8829 if (wrqu->mode == priv->ieee->iw_mode)
8830 return 0;
8832 mutex_lock(&priv->mutex);
8834 ipw_sw_reset(priv, 0);
8836 #ifdef CONFIG_IPW2200_MONITOR
8837 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8838 priv->net_dev->type = ARPHRD_ETHER;
8840 if (wrqu->mode == IW_MODE_MONITOR)
8841 #ifdef CONFIG_IPW2200_RADIOTAP
8842 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8843 #else
8844 priv->net_dev->type = ARPHRD_IEEE80211;
8845 #endif
8846 #endif /* CONFIG_IPW2200_MONITOR */
8848 /* Free the existing firmware and reset the fw_loaded
8849 * flag so ipw_load() will bring in the new firmawre */
8850 free_firmware();
8852 priv->ieee->iw_mode = wrqu->mode;
8854 queue_work(priv->workqueue, &priv->adapter_restart);
8855 mutex_unlock(&priv->mutex);
8856 return err;
8859 static int ipw_wx_get_mode(struct net_device *dev,
8860 struct iw_request_info *info,
8861 union iwreq_data *wrqu, char *extra)
8863 struct ipw_priv *priv = ieee80211_priv(dev);
8864 mutex_lock(&priv->mutex);
8865 wrqu->mode = priv->ieee->iw_mode;
8866 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8867 mutex_unlock(&priv->mutex);
8868 return 0;
8871 /* Values are in microsecond */
8872 static const s32 timeout_duration[] = {
8873 350000,
8874 250000,
8875 75000,
8876 37000,
8877 25000,
8880 static const s32 period_duration[] = {
8881 400000,
8882 700000,
8883 1000000,
8884 1000000,
8885 1000000
8888 static int ipw_wx_get_range(struct net_device *dev,
8889 struct iw_request_info *info,
8890 union iwreq_data *wrqu, char *extra)
8892 struct ipw_priv *priv = ieee80211_priv(dev);
8893 struct iw_range *range = (struct iw_range *)extra;
8894 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8895 int i = 0, j;
8897 wrqu->data.length = sizeof(*range);
8898 memset(range, 0, sizeof(*range));
8900 /* 54Mbs == ~27 Mb/s real (802.11g) */
8901 range->throughput = 27 * 1000 * 1000;
8903 range->max_qual.qual = 100;
8904 /* TODO: Find real max RSSI and stick here */
8905 range->max_qual.level = 0;
8906 range->max_qual.noise = 0;
8907 range->max_qual.updated = 7; /* Updated all three */
8909 range->avg_qual.qual = 70;
8910 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8911 range->avg_qual.level = 0; /* FIXME to real average level */
8912 range->avg_qual.noise = 0;
8913 range->avg_qual.updated = 7; /* Updated all three */
8914 mutex_lock(&priv->mutex);
8915 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8917 for (i = 0; i < range->num_bitrates; i++)
8918 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8919 500000;
8921 range->max_rts = DEFAULT_RTS_THRESHOLD;
8922 range->min_frag = MIN_FRAG_THRESHOLD;
8923 range->max_frag = MAX_FRAG_THRESHOLD;
8925 range->encoding_size[0] = 5;
8926 range->encoding_size[1] = 13;
8927 range->num_encoding_sizes = 2;
8928 range->max_encoding_tokens = WEP_KEYS;
8930 /* Set the Wireless Extension versions */
8931 range->we_version_compiled = WIRELESS_EXT;
8932 range->we_version_source = 18;
8934 i = 0;
8935 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8936 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8937 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8938 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8939 continue;
8941 range->freq[i].i = geo->bg[j].channel;
8942 range->freq[i].m = geo->bg[j].freq * 100000;
8943 range->freq[i].e = 1;
8944 i++;
8948 if (priv->ieee->mode & IEEE_A) {
8949 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8950 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8951 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8952 continue;
8954 range->freq[i].i = geo->a[j].channel;
8955 range->freq[i].m = geo->a[j].freq * 100000;
8956 range->freq[i].e = 1;
8957 i++;
8961 range->num_channels = i;
8962 range->num_frequency = i;
8964 mutex_unlock(&priv->mutex);
8966 /* Event capability (kernel + driver) */
8967 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8968 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8969 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8970 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8971 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8973 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8974 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8976 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8978 IPW_DEBUG_WX("GET Range\n");
8979 return 0;
8982 static int ipw_wx_set_wap(struct net_device *dev,
8983 struct iw_request_info *info,
8984 union iwreq_data *wrqu, char *extra)
8986 struct ipw_priv *priv = ieee80211_priv(dev);
8987 DECLARE_MAC_BUF(mac);
8989 static const unsigned char any[] = {
8990 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8992 static const unsigned char off[] = {
8993 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8996 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8997 return -EINVAL;
8998 mutex_lock(&priv->mutex);
8999 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
9000 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9001 /* we disable mandatory BSSID association */
9002 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
9003 priv->config &= ~CFG_STATIC_BSSID;
9004 IPW_DEBUG_ASSOC("Attempting to associate with new "
9005 "parameters.\n");
9006 ipw_associate(priv);
9007 mutex_unlock(&priv->mutex);
9008 return 0;
9011 priv->config |= CFG_STATIC_BSSID;
9012 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9013 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
9014 mutex_unlock(&priv->mutex);
9015 return 0;
9018 IPW_DEBUG_WX("Setting mandatory BSSID to %s\n",
9019 print_mac(mac, wrqu->ap_addr.sa_data));
9021 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9023 /* Network configuration changed -- force [re]association */
9024 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9025 if (!ipw_disassociate(priv))
9026 ipw_associate(priv);
9028 mutex_unlock(&priv->mutex);
9029 return 0;
9032 static int ipw_wx_get_wap(struct net_device *dev,
9033 struct iw_request_info *info,
9034 union iwreq_data *wrqu, char *extra)
9036 struct ipw_priv *priv = ieee80211_priv(dev);
9037 DECLARE_MAC_BUF(mac);
9039 /* If we are associated, trying to associate, or have a statically
9040 * configured BSSID then return that; otherwise return ANY */
9041 mutex_lock(&priv->mutex);
9042 if (priv->config & CFG_STATIC_BSSID ||
9043 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9044 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9045 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9046 } else
9047 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9049 IPW_DEBUG_WX("Getting WAP BSSID: %s\n",
9050 print_mac(mac, wrqu->ap_addr.sa_data));
9051 mutex_unlock(&priv->mutex);
9052 return 0;
9055 static int ipw_wx_set_essid(struct net_device *dev,
9056 struct iw_request_info *info,
9057 union iwreq_data *wrqu, char *extra)
9059 struct ipw_priv *priv = ieee80211_priv(dev);
9060 int length;
9062 mutex_lock(&priv->mutex);
9064 if (!wrqu->essid.flags)
9066 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9067 ipw_disassociate(priv);
9068 priv->config &= ~CFG_STATIC_ESSID;
9069 ipw_associate(priv);
9070 mutex_unlock(&priv->mutex);
9071 return 0;
9074 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9076 priv->config |= CFG_STATIC_ESSID;
9078 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9079 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9080 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9081 mutex_unlock(&priv->mutex);
9082 return 0;
9085 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(extra, length),
9086 length);
9088 priv->essid_len = length;
9089 memcpy(priv->essid, extra, priv->essid_len);
9091 /* Network configuration changed -- force [re]association */
9092 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9093 if (!ipw_disassociate(priv))
9094 ipw_associate(priv);
9096 mutex_unlock(&priv->mutex);
9097 return 0;
9100 static int ipw_wx_get_essid(struct net_device *dev,
9101 struct iw_request_info *info,
9102 union iwreq_data *wrqu, char *extra)
9104 struct ipw_priv *priv = ieee80211_priv(dev);
9106 /* If we are associated, trying to associate, or have a statically
9107 * configured ESSID then return that; otherwise return ANY */
9108 mutex_lock(&priv->mutex);
9109 if (priv->config & CFG_STATIC_ESSID ||
9110 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9111 IPW_DEBUG_WX("Getting essid: '%s'\n",
9112 escape_essid(priv->essid, priv->essid_len));
9113 memcpy(extra, priv->essid, priv->essid_len);
9114 wrqu->essid.length = priv->essid_len;
9115 wrqu->essid.flags = 1; /* active */
9116 } else {
9117 IPW_DEBUG_WX("Getting essid: ANY\n");
9118 wrqu->essid.length = 0;
9119 wrqu->essid.flags = 0; /* active */
9121 mutex_unlock(&priv->mutex);
9122 return 0;
9125 static int ipw_wx_set_nick(struct net_device *dev,
9126 struct iw_request_info *info,
9127 union iwreq_data *wrqu, char *extra)
9129 struct ipw_priv *priv = ieee80211_priv(dev);
9131 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9132 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9133 return -E2BIG;
9134 mutex_lock(&priv->mutex);
9135 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9136 memset(priv->nick, 0, sizeof(priv->nick));
9137 memcpy(priv->nick, extra, wrqu->data.length);
9138 IPW_DEBUG_TRACE("<<\n");
9139 mutex_unlock(&priv->mutex);
9140 return 0;
9144 static int ipw_wx_get_nick(struct net_device *dev,
9145 struct iw_request_info *info,
9146 union iwreq_data *wrqu, char *extra)
9148 struct ipw_priv *priv = ieee80211_priv(dev);
9149 IPW_DEBUG_WX("Getting nick\n");
9150 mutex_lock(&priv->mutex);
9151 wrqu->data.length = strlen(priv->nick);
9152 memcpy(extra, priv->nick, wrqu->data.length);
9153 wrqu->data.flags = 1; /* active */
9154 mutex_unlock(&priv->mutex);
9155 return 0;
9158 static int ipw_wx_set_sens(struct net_device *dev,
9159 struct iw_request_info *info,
9160 union iwreq_data *wrqu, char *extra)
9162 struct ipw_priv *priv = ieee80211_priv(dev);
9163 int err = 0;
9165 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9166 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9167 mutex_lock(&priv->mutex);
9169 if (wrqu->sens.fixed == 0)
9171 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9172 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9173 goto out;
9175 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9176 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9177 err = -EINVAL;
9178 goto out;
9181 priv->roaming_threshold = wrqu->sens.value;
9182 priv->disassociate_threshold = 3*wrqu->sens.value;
9183 out:
9184 mutex_unlock(&priv->mutex);
9185 return err;
9188 static int ipw_wx_get_sens(struct net_device *dev,
9189 struct iw_request_info *info,
9190 union iwreq_data *wrqu, char *extra)
9192 struct ipw_priv *priv = ieee80211_priv(dev);
9193 mutex_lock(&priv->mutex);
9194 wrqu->sens.fixed = 1;
9195 wrqu->sens.value = priv->roaming_threshold;
9196 mutex_unlock(&priv->mutex);
9198 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9199 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9201 return 0;
9204 static int ipw_wx_set_rate(struct net_device *dev,
9205 struct iw_request_info *info,
9206 union iwreq_data *wrqu, char *extra)
9208 /* TODO: We should use semaphores or locks for access to priv */
9209 struct ipw_priv *priv = ieee80211_priv(dev);
9210 u32 target_rate = wrqu->bitrate.value;
9211 u32 fixed, mask;
9213 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9214 /* value = X, fixed = 1 means only rate X */
9215 /* value = X, fixed = 0 means all rates lower equal X */
9217 if (target_rate == -1) {
9218 fixed = 0;
9219 mask = IEEE80211_DEFAULT_RATES_MASK;
9220 /* Now we should reassociate */
9221 goto apply;
9224 mask = 0;
9225 fixed = wrqu->bitrate.fixed;
9227 if (target_rate == 1000000 || !fixed)
9228 mask |= IEEE80211_CCK_RATE_1MB_MASK;
9229 if (target_rate == 1000000)
9230 goto apply;
9232 if (target_rate == 2000000 || !fixed)
9233 mask |= IEEE80211_CCK_RATE_2MB_MASK;
9234 if (target_rate == 2000000)
9235 goto apply;
9237 if (target_rate == 5500000 || !fixed)
9238 mask |= IEEE80211_CCK_RATE_5MB_MASK;
9239 if (target_rate == 5500000)
9240 goto apply;
9242 if (target_rate == 6000000 || !fixed)
9243 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9244 if (target_rate == 6000000)
9245 goto apply;
9247 if (target_rate == 9000000 || !fixed)
9248 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9249 if (target_rate == 9000000)
9250 goto apply;
9252 if (target_rate == 11000000 || !fixed)
9253 mask |= IEEE80211_CCK_RATE_11MB_MASK;
9254 if (target_rate == 11000000)
9255 goto apply;
9257 if (target_rate == 12000000 || !fixed)
9258 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9259 if (target_rate == 12000000)
9260 goto apply;
9262 if (target_rate == 18000000 || !fixed)
9263 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9264 if (target_rate == 18000000)
9265 goto apply;
9267 if (target_rate == 24000000 || !fixed)
9268 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9269 if (target_rate == 24000000)
9270 goto apply;
9272 if (target_rate == 36000000 || !fixed)
9273 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9274 if (target_rate == 36000000)
9275 goto apply;
9277 if (target_rate == 48000000 || !fixed)
9278 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9279 if (target_rate == 48000000)
9280 goto apply;
9282 if (target_rate == 54000000 || !fixed)
9283 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9284 if (target_rate == 54000000)
9285 goto apply;
9287 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9288 return -EINVAL;
9290 apply:
9291 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9292 mask, fixed ? "fixed" : "sub-rates");
9293 mutex_lock(&priv->mutex);
9294 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9295 priv->config &= ~CFG_FIXED_RATE;
9296 ipw_set_fixed_rate(priv, priv->ieee->mode);
9297 } else
9298 priv->config |= CFG_FIXED_RATE;
9300 if (priv->rates_mask == mask) {
9301 IPW_DEBUG_WX("Mask set to current mask.\n");
9302 mutex_unlock(&priv->mutex);
9303 return 0;
9306 priv->rates_mask = mask;
9308 /* Network configuration changed -- force [re]association */
9309 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9310 if (!ipw_disassociate(priv))
9311 ipw_associate(priv);
9313 mutex_unlock(&priv->mutex);
9314 return 0;
9317 static int ipw_wx_get_rate(struct net_device *dev,
9318 struct iw_request_info *info,
9319 union iwreq_data *wrqu, char *extra)
9321 struct ipw_priv *priv = ieee80211_priv(dev);
9322 mutex_lock(&priv->mutex);
9323 wrqu->bitrate.value = priv->last_rate;
9324 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9325 mutex_unlock(&priv->mutex);
9326 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9327 return 0;
9330 static int ipw_wx_set_rts(struct net_device *dev,
9331 struct iw_request_info *info,
9332 union iwreq_data *wrqu, char *extra)
9334 struct ipw_priv *priv = ieee80211_priv(dev);
9335 mutex_lock(&priv->mutex);
9336 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9337 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9338 else {
9339 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9340 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9341 mutex_unlock(&priv->mutex);
9342 return -EINVAL;
9344 priv->rts_threshold = wrqu->rts.value;
9347 ipw_send_rts_threshold(priv, priv->rts_threshold);
9348 mutex_unlock(&priv->mutex);
9349 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9350 return 0;
9353 static int ipw_wx_get_rts(struct net_device *dev,
9354 struct iw_request_info *info,
9355 union iwreq_data *wrqu, char *extra)
9357 struct ipw_priv *priv = ieee80211_priv(dev);
9358 mutex_lock(&priv->mutex);
9359 wrqu->rts.value = priv->rts_threshold;
9360 wrqu->rts.fixed = 0; /* no auto select */
9361 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9362 mutex_unlock(&priv->mutex);
9363 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9364 return 0;
9367 static int ipw_wx_set_txpow(struct net_device *dev,
9368 struct iw_request_info *info,
9369 union iwreq_data *wrqu, char *extra)
9371 struct ipw_priv *priv = ieee80211_priv(dev);
9372 int err = 0;
9374 mutex_lock(&priv->mutex);
9375 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9376 err = -EINPROGRESS;
9377 goto out;
9380 if (!wrqu->power.fixed)
9381 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9383 if (wrqu->power.flags != IW_TXPOW_DBM) {
9384 err = -EINVAL;
9385 goto out;
9388 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9389 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9390 err = -EINVAL;
9391 goto out;
9394 priv->tx_power = wrqu->power.value;
9395 err = ipw_set_tx_power(priv);
9396 out:
9397 mutex_unlock(&priv->mutex);
9398 return err;
9401 static int ipw_wx_get_txpow(struct net_device *dev,
9402 struct iw_request_info *info,
9403 union iwreq_data *wrqu, char *extra)
9405 struct ipw_priv *priv = ieee80211_priv(dev);
9406 mutex_lock(&priv->mutex);
9407 wrqu->power.value = priv->tx_power;
9408 wrqu->power.fixed = 1;
9409 wrqu->power.flags = IW_TXPOW_DBM;
9410 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9411 mutex_unlock(&priv->mutex);
9413 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9414 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9416 return 0;
9419 static int ipw_wx_set_frag(struct net_device *dev,
9420 struct iw_request_info *info,
9421 union iwreq_data *wrqu, char *extra)
9423 struct ipw_priv *priv = ieee80211_priv(dev);
9424 mutex_lock(&priv->mutex);
9425 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9426 priv->ieee->fts = DEFAULT_FTS;
9427 else {
9428 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9429 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9430 mutex_unlock(&priv->mutex);
9431 return -EINVAL;
9434 priv->ieee->fts = wrqu->frag.value & ~0x1;
9437 ipw_send_frag_threshold(priv, wrqu->frag.value);
9438 mutex_unlock(&priv->mutex);
9439 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9440 return 0;
9443 static int ipw_wx_get_frag(struct net_device *dev,
9444 struct iw_request_info *info,
9445 union iwreq_data *wrqu, char *extra)
9447 struct ipw_priv *priv = ieee80211_priv(dev);
9448 mutex_lock(&priv->mutex);
9449 wrqu->frag.value = priv->ieee->fts;
9450 wrqu->frag.fixed = 0; /* no auto select */
9451 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9452 mutex_unlock(&priv->mutex);
9453 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9455 return 0;
9458 static int ipw_wx_set_retry(struct net_device *dev,
9459 struct iw_request_info *info,
9460 union iwreq_data *wrqu, char *extra)
9462 struct ipw_priv *priv = ieee80211_priv(dev);
9464 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9465 return -EINVAL;
9467 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9468 return 0;
9470 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9471 return -EINVAL;
9473 mutex_lock(&priv->mutex);
9474 if (wrqu->retry.flags & IW_RETRY_SHORT)
9475 priv->short_retry_limit = (u8) wrqu->retry.value;
9476 else if (wrqu->retry.flags & IW_RETRY_LONG)
9477 priv->long_retry_limit = (u8) wrqu->retry.value;
9478 else {
9479 priv->short_retry_limit = (u8) wrqu->retry.value;
9480 priv->long_retry_limit = (u8) wrqu->retry.value;
9483 ipw_send_retry_limit(priv, priv->short_retry_limit,
9484 priv->long_retry_limit);
9485 mutex_unlock(&priv->mutex);
9486 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9487 priv->short_retry_limit, priv->long_retry_limit);
9488 return 0;
9491 static int ipw_wx_get_retry(struct net_device *dev,
9492 struct iw_request_info *info,
9493 union iwreq_data *wrqu, char *extra)
9495 struct ipw_priv *priv = ieee80211_priv(dev);
9497 mutex_lock(&priv->mutex);
9498 wrqu->retry.disabled = 0;
9500 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9501 mutex_unlock(&priv->mutex);
9502 return -EINVAL;
9505 if (wrqu->retry.flags & IW_RETRY_LONG) {
9506 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9507 wrqu->retry.value = priv->long_retry_limit;
9508 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9509 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9510 wrqu->retry.value = priv->short_retry_limit;
9511 } else {
9512 wrqu->retry.flags = IW_RETRY_LIMIT;
9513 wrqu->retry.value = priv->short_retry_limit;
9515 mutex_unlock(&priv->mutex);
9517 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9519 return 0;
9522 static int ipw_wx_set_scan(struct net_device *dev,
9523 struct iw_request_info *info,
9524 union iwreq_data *wrqu, char *extra)
9526 struct ipw_priv *priv = ieee80211_priv(dev);
9527 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9528 struct delayed_work *work = NULL;
9530 mutex_lock(&priv->mutex);
9532 priv->user_requested_scan = 1;
9534 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9535 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9536 int len = min((int)req->essid_len,
9537 (int)sizeof(priv->direct_scan_ssid));
9538 memcpy(priv->direct_scan_ssid, req->essid, len);
9539 priv->direct_scan_ssid_len = len;
9540 work = &priv->request_direct_scan;
9541 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9542 work = &priv->request_passive_scan;
9544 } else {
9545 /* Normal active broadcast scan */
9546 work = &priv->request_scan;
9549 mutex_unlock(&priv->mutex);
9551 IPW_DEBUG_WX("Start scan\n");
9553 queue_delayed_work(priv->workqueue, work, 0);
9555 return 0;
9558 static int ipw_wx_get_scan(struct net_device *dev,
9559 struct iw_request_info *info,
9560 union iwreq_data *wrqu, char *extra)
9562 struct ipw_priv *priv = ieee80211_priv(dev);
9563 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9566 static int ipw_wx_set_encode(struct net_device *dev,
9567 struct iw_request_info *info,
9568 union iwreq_data *wrqu, char *key)
9570 struct ipw_priv *priv = ieee80211_priv(dev);
9571 int ret;
9572 u32 cap = priv->capability;
9574 mutex_lock(&priv->mutex);
9575 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9577 /* In IBSS mode, we need to notify the firmware to update
9578 * the beacon info after we changed the capability. */
9579 if (cap != priv->capability &&
9580 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9581 priv->status & STATUS_ASSOCIATED)
9582 ipw_disassociate(priv);
9584 mutex_unlock(&priv->mutex);
9585 return ret;
9588 static int ipw_wx_get_encode(struct net_device *dev,
9589 struct iw_request_info *info,
9590 union iwreq_data *wrqu, char *key)
9592 struct ipw_priv *priv = ieee80211_priv(dev);
9593 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9596 static int ipw_wx_set_power(struct net_device *dev,
9597 struct iw_request_info *info,
9598 union iwreq_data *wrqu, char *extra)
9600 struct ipw_priv *priv = ieee80211_priv(dev);
9601 int err;
9602 mutex_lock(&priv->mutex);
9603 if (wrqu->power.disabled) {
9604 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9605 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9606 if (err) {
9607 IPW_DEBUG_WX("failed setting power mode.\n");
9608 mutex_unlock(&priv->mutex);
9609 return err;
9611 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9612 mutex_unlock(&priv->mutex);
9613 return 0;
9616 switch (wrqu->power.flags & IW_POWER_MODE) {
9617 case IW_POWER_ON: /* If not specified */
9618 case IW_POWER_MODE: /* If set all mask */
9619 case IW_POWER_ALL_R: /* If explicitly state all */
9620 break;
9621 default: /* Otherwise we don't support it */
9622 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9623 wrqu->power.flags);
9624 mutex_unlock(&priv->mutex);
9625 return -EOPNOTSUPP;
9628 /* If the user hasn't specified a power management mode yet, default
9629 * to BATTERY */
9630 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9631 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9632 else
9633 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9635 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9636 if (err) {
9637 IPW_DEBUG_WX("failed setting power mode.\n");
9638 mutex_unlock(&priv->mutex);
9639 return err;
9642 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9643 mutex_unlock(&priv->mutex);
9644 return 0;
9647 static int ipw_wx_get_power(struct net_device *dev,
9648 struct iw_request_info *info,
9649 union iwreq_data *wrqu, char *extra)
9651 struct ipw_priv *priv = ieee80211_priv(dev);
9652 mutex_lock(&priv->mutex);
9653 if (!(priv->power_mode & IPW_POWER_ENABLED))
9654 wrqu->power.disabled = 1;
9655 else
9656 wrqu->power.disabled = 0;
9658 mutex_unlock(&priv->mutex);
9659 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9661 return 0;
9664 static int ipw_wx_set_powermode(struct net_device *dev,
9665 struct iw_request_info *info,
9666 union iwreq_data *wrqu, char *extra)
9668 struct ipw_priv *priv = ieee80211_priv(dev);
9669 int mode = *(int *)extra;
9670 int err;
9672 mutex_lock(&priv->mutex);
9673 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9674 mode = IPW_POWER_AC;
9676 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9677 err = ipw_send_power_mode(priv, mode);
9678 if (err) {
9679 IPW_DEBUG_WX("failed setting power mode.\n");
9680 mutex_unlock(&priv->mutex);
9681 return err;
9683 priv->power_mode = IPW_POWER_ENABLED | mode;
9685 mutex_unlock(&priv->mutex);
9686 return 0;
9689 #define MAX_WX_STRING 80
9690 static int ipw_wx_get_powermode(struct net_device *dev,
9691 struct iw_request_info *info,
9692 union iwreq_data *wrqu, char *extra)
9694 struct ipw_priv *priv = ieee80211_priv(dev);
9695 int level = IPW_POWER_LEVEL(priv->power_mode);
9696 char *p = extra;
9698 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9700 switch (level) {
9701 case IPW_POWER_AC:
9702 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9703 break;
9704 case IPW_POWER_BATTERY:
9705 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9706 break;
9707 default:
9708 p += snprintf(p, MAX_WX_STRING - (p - extra),
9709 "(Timeout %dms, Period %dms)",
9710 timeout_duration[level - 1] / 1000,
9711 period_duration[level - 1] / 1000);
9714 if (!(priv->power_mode & IPW_POWER_ENABLED))
9715 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9717 wrqu->data.length = p - extra + 1;
9719 return 0;
9722 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9723 struct iw_request_info *info,
9724 union iwreq_data *wrqu, char *extra)
9726 struct ipw_priv *priv = ieee80211_priv(dev);
9727 int mode = *(int *)extra;
9728 u8 band = 0, modulation = 0;
9730 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9731 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9732 return -EINVAL;
9734 mutex_lock(&priv->mutex);
9735 if (priv->adapter == IPW_2915ABG) {
9736 priv->ieee->abg_true = 1;
9737 if (mode & IEEE_A) {
9738 band |= IEEE80211_52GHZ_BAND;
9739 modulation |= IEEE80211_OFDM_MODULATION;
9740 } else
9741 priv->ieee->abg_true = 0;
9742 } else {
9743 if (mode & IEEE_A) {
9744 IPW_WARNING("Attempt to set 2200BG into "
9745 "802.11a mode\n");
9746 mutex_unlock(&priv->mutex);
9747 return -EINVAL;
9750 priv->ieee->abg_true = 0;
9753 if (mode & IEEE_B) {
9754 band |= IEEE80211_24GHZ_BAND;
9755 modulation |= IEEE80211_CCK_MODULATION;
9756 } else
9757 priv->ieee->abg_true = 0;
9759 if (mode & IEEE_G) {
9760 band |= IEEE80211_24GHZ_BAND;
9761 modulation |= IEEE80211_OFDM_MODULATION;
9762 } else
9763 priv->ieee->abg_true = 0;
9765 priv->ieee->mode = mode;
9766 priv->ieee->freq_band = band;
9767 priv->ieee->modulation = modulation;
9768 init_supported_rates(priv, &priv->rates);
9770 /* Network configuration changed -- force [re]association */
9771 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9772 if (!ipw_disassociate(priv)) {
9773 ipw_send_supported_rates(priv, &priv->rates);
9774 ipw_associate(priv);
9777 /* Update the band LEDs */
9778 ipw_led_band_on(priv);
9780 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9781 mode & IEEE_A ? 'a' : '.',
9782 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9783 mutex_unlock(&priv->mutex);
9784 return 0;
9787 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9788 struct iw_request_info *info,
9789 union iwreq_data *wrqu, char *extra)
9791 struct ipw_priv *priv = ieee80211_priv(dev);
9792 mutex_lock(&priv->mutex);
9793 switch (priv->ieee->mode) {
9794 case IEEE_A:
9795 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9796 break;
9797 case IEEE_B:
9798 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9799 break;
9800 case IEEE_A | IEEE_B:
9801 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9802 break;
9803 case IEEE_G:
9804 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9805 break;
9806 case IEEE_A | IEEE_G:
9807 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9808 break;
9809 case IEEE_B | IEEE_G:
9810 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9811 break;
9812 case IEEE_A | IEEE_B | IEEE_G:
9813 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9814 break;
9815 default:
9816 strncpy(extra, "unknown", MAX_WX_STRING);
9817 break;
9820 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9822 wrqu->data.length = strlen(extra) + 1;
9823 mutex_unlock(&priv->mutex);
9825 return 0;
9828 static int ipw_wx_set_preamble(struct net_device *dev,
9829 struct iw_request_info *info,
9830 union iwreq_data *wrqu, char *extra)
9832 struct ipw_priv *priv = ieee80211_priv(dev);
9833 int mode = *(int *)extra;
9834 mutex_lock(&priv->mutex);
9835 /* Switching from SHORT -> LONG requires a disassociation */
9836 if (mode == 1) {
9837 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9838 priv->config |= CFG_PREAMBLE_LONG;
9840 /* Network configuration changed -- force [re]association */
9841 IPW_DEBUG_ASSOC
9842 ("[re]association triggered due to preamble change.\n");
9843 if (!ipw_disassociate(priv))
9844 ipw_associate(priv);
9846 goto done;
9849 if (mode == 0) {
9850 priv->config &= ~CFG_PREAMBLE_LONG;
9851 goto done;
9853 mutex_unlock(&priv->mutex);
9854 return -EINVAL;
9856 done:
9857 mutex_unlock(&priv->mutex);
9858 return 0;
9861 static int ipw_wx_get_preamble(struct net_device *dev,
9862 struct iw_request_info *info,
9863 union iwreq_data *wrqu, char *extra)
9865 struct ipw_priv *priv = ieee80211_priv(dev);
9866 mutex_lock(&priv->mutex);
9867 if (priv->config & CFG_PREAMBLE_LONG)
9868 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9869 else
9870 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9871 mutex_unlock(&priv->mutex);
9872 return 0;
9875 #ifdef CONFIG_IPW2200_MONITOR
9876 static int ipw_wx_set_monitor(struct net_device *dev,
9877 struct iw_request_info *info,
9878 union iwreq_data *wrqu, char *extra)
9880 struct ipw_priv *priv = ieee80211_priv(dev);
9881 int *parms = (int *)extra;
9882 int enable = (parms[0] > 0);
9883 mutex_lock(&priv->mutex);
9884 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9885 if (enable) {
9886 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9887 #ifdef CONFIG_IPW2200_RADIOTAP
9888 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9889 #else
9890 priv->net_dev->type = ARPHRD_IEEE80211;
9891 #endif
9892 queue_work(priv->workqueue, &priv->adapter_restart);
9895 ipw_set_channel(priv, parms[1]);
9896 } else {
9897 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9898 mutex_unlock(&priv->mutex);
9899 return 0;
9901 priv->net_dev->type = ARPHRD_ETHER;
9902 queue_work(priv->workqueue, &priv->adapter_restart);
9904 mutex_unlock(&priv->mutex);
9905 return 0;
9908 #endif /* CONFIG_IPW2200_MONITOR */
9910 static int ipw_wx_reset(struct net_device *dev,
9911 struct iw_request_info *info,
9912 union iwreq_data *wrqu, char *extra)
9914 struct ipw_priv *priv = ieee80211_priv(dev);
9915 IPW_DEBUG_WX("RESET\n");
9916 queue_work(priv->workqueue, &priv->adapter_restart);
9917 return 0;
9920 static int ipw_wx_sw_reset(struct net_device *dev,
9921 struct iw_request_info *info,
9922 union iwreq_data *wrqu, char *extra)
9924 struct ipw_priv *priv = ieee80211_priv(dev);
9925 union iwreq_data wrqu_sec = {
9926 .encoding = {
9927 .flags = IW_ENCODE_DISABLED,
9930 int ret;
9932 IPW_DEBUG_WX("SW_RESET\n");
9934 mutex_lock(&priv->mutex);
9936 ret = ipw_sw_reset(priv, 2);
9937 if (!ret) {
9938 free_firmware();
9939 ipw_adapter_restart(priv);
9942 /* The SW reset bit might have been toggled on by the 'disable'
9943 * module parameter, so take appropriate action */
9944 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9946 mutex_unlock(&priv->mutex);
9947 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9948 mutex_lock(&priv->mutex);
9950 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9951 /* Configuration likely changed -- force [re]association */
9952 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9953 "reset.\n");
9954 if (!ipw_disassociate(priv))
9955 ipw_associate(priv);
9958 mutex_unlock(&priv->mutex);
9960 return 0;
9963 /* Rebase the WE IOCTLs to zero for the handler array */
9964 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9965 static iw_handler ipw_wx_handlers[] = {
9966 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9967 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9968 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9969 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9970 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9971 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9972 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9973 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9974 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9975 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9976 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9977 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9978 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9979 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9980 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9981 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9982 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9983 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9984 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9985 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9986 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9987 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9988 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9989 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9990 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9991 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9992 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9993 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9994 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9995 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9996 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9997 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9998 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9999 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
10000 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
10001 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
10002 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
10003 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
10004 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
10005 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
10006 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
10009 enum {
10010 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
10011 IPW_PRIV_GET_POWER,
10012 IPW_PRIV_SET_MODE,
10013 IPW_PRIV_GET_MODE,
10014 IPW_PRIV_SET_PREAMBLE,
10015 IPW_PRIV_GET_PREAMBLE,
10016 IPW_PRIV_RESET,
10017 IPW_PRIV_SW_RESET,
10018 #ifdef CONFIG_IPW2200_MONITOR
10019 IPW_PRIV_SET_MONITOR,
10020 #endif
10023 static struct iw_priv_args ipw_priv_args[] = {
10025 .cmd = IPW_PRIV_SET_POWER,
10026 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10027 .name = "set_power"},
10029 .cmd = IPW_PRIV_GET_POWER,
10030 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10031 .name = "get_power"},
10033 .cmd = IPW_PRIV_SET_MODE,
10034 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10035 .name = "set_mode"},
10037 .cmd = IPW_PRIV_GET_MODE,
10038 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10039 .name = "get_mode"},
10041 .cmd = IPW_PRIV_SET_PREAMBLE,
10042 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10043 .name = "set_preamble"},
10045 .cmd = IPW_PRIV_GET_PREAMBLE,
10046 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10047 .name = "get_preamble"},
10049 IPW_PRIV_RESET,
10050 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10052 IPW_PRIV_SW_RESET,
10053 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10054 #ifdef CONFIG_IPW2200_MONITOR
10056 IPW_PRIV_SET_MONITOR,
10057 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10058 #endif /* CONFIG_IPW2200_MONITOR */
10061 static iw_handler ipw_priv_handler[] = {
10062 ipw_wx_set_powermode,
10063 ipw_wx_get_powermode,
10064 ipw_wx_set_wireless_mode,
10065 ipw_wx_get_wireless_mode,
10066 ipw_wx_set_preamble,
10067 ipw_wx_get_preamble,
10068 ipw_wx_reset,
10069 ipw_wx_sw_reset,
10070 #ifdef CONFIG_IPW2200_MONITOR
10071 ipw_wx_set_monitor,
10072 #endif
10075 static struct iw_handler_def ipw_wx_handler_def = {
10076 .standard = ipw_wx_handlers,
10077 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10078 .num_private = ARRAY_SIZE(ipw_priv_handler),
10079 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10080 .private = ipw_priv_handler,
10081 .private_args = ipw_priv_args,
10082 .get_wireless_stats = ipw_get_wireless_stats,
10086 * Get wireless statistics.
10087 * Called by /proc/net/wireless
10088 * Also called by SIOCGIWSTATS
10090 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10092 struct ipw_priv *priv = ieee80211_priv(dev);
10093 struct iw_statistics *wstats;
10095 wstats = &priv->wstats;
10097 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10098 * netdev->get_wireless_stats seems to be called before fw is
10099 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10100 * and associated; if not associcated, the values are all meaningless
10101 * anyway, so set them all to NULL and INVALID */
10102 if (!(priv->status & STATUS_ASSOCIATED)) {
10103 wstats->miss.beacon = 0;
10104 wstats->discard.retries = 0;
10105 wstats->qual.qual = 0;
10106 wstats->qual.level = 0;
10107 wstats->qual.noise = 0;
10108 wstats->qual.updated = 7;
10109 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10110 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10111 return wstats;
10114 wstats->qual.qual = priv->quality;
10115 wstats->qual.level = priv->exp_avg_rssi;
10116 wstats->qual.noise = priv->exp_avg_noise;
10117 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10118 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10120 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10121 wstats->discard.retries = priv->last_tx_failures;
10122 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10124 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10125 goto fail_get_ordinal;
10126 wstats->discard.retries += tx_retry; */
10128 return wstats;
10131 /* net device stuff */
10133 static void init_sys_config(struct ipw_sys_config *sys_config)
10135 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10136 sys_config->bt_coexistence = 0;
10137 sys_config->answer_broadcast_ssid_probe = 0;
10138 sys_config->accept_all_data_frames = 0;
10139 sys_config->accept_non_directed_frames = 1;
10140 sys_config->exclude_unicast_unencrypted = 0;
10141 sys_config->disable_unicast_decryption = 1;
10142 sys_config->exclude_multicast_unencrypted = 0;
10143 sys_config->disable_multicast_decryption = 1;
10144 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10145 antenna = CFG_SYS_ANTENNA_BOTH;
10146 sys_config->antenna_diversity = antenna;
10147 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10148 sys_config->dot11g_auto_detection = 0;
10149 sys_config->enable_cts_to_self = 0;
10150 sys_config->bt_coexist_collision_thr = 0;
10151 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10152 sys_config->silence_threshold = 0x1e;
10155 static int ipw_net_open(struct net_device *dev)
10157 struct ipw_priv *priv = ieee80211_priv(dev);
10158 IPW_DEBUG_INFO("dev->open\n");
10159 /* we should be verifying the device is ready to be opened */
10160 mutex_lock(&priv->mutex);
10161 if (!(priv->status & STATUS_RF_KILL_MASK) &&
10162 (priv->status & STATUS_ASSOCIATED))
10163 netif_start_queue(dev);
10164 mutex_unlock(&priv->mutex);
10165 return 0;
10168 static int ipw_net_stop(struct net_device *dev)
10170 IPW_DEBUG_INFO("dev->close\n");
10171 netif_stop_queue(dev);
10172 return 0;
10176 todo:
10178 modify to send one tfd per fragment instead of using chunking. otherwise
10179 we need to heavily modify the ieee80211_skb_to_txb.
10182 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10183 int pri)
10185 struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10186 txb->fragments[0]->data;
10187 int i = 0;
10188 struct tfd_frame *tfd;
10189 #ifdef CONFIG_IPW2200_QOS
10190 int tx_id = ipw_get_tx_queue_number(priv, pri);
10191 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10192 #else
10193 struct clx2_tx_queue *txq = &priv->txq[0];
10194 #endif
10195 struct clx2_queue *q = &txq->q;
10196 u8 id, hdr_len, unicast;
10197 u16 remaining_bytes;
10198 int fc;
10200 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10201 switch (priv->ieee->iw_mode) {
10202 case IW_MODE_ADHOC:
10203 unicast = !is_multicast_ether_addr(hdr->addr1);
10204 id = ipw_find_station(priv, hdr->addr1);
10205 if (id == IPW_INVALID_STATION) {
10206 id = ipw_add_station(priv, hdr->addr1);
10207 if (id == IPW_INVALID_STATION) {
10208 IPW_WARNING("Attempt to send data to "
10209 "invalid cell: " MAC_FMT "\n",
10210 hdr->addr1[0], hdr->addr1[1],
10211 hdr->addr1[2], hdr->addr1[3],
10212 hdr->addr1[4], hdr->addr1[5]);
10213 goto drop;
10216 break;
10218 case IW_MODE_INFRA:
10219 default:
10220 unicast = !is_multicast_ether_addr(hdr->addr3);
10221 id = 0;
10222 break;
10225 tfd = &txq->bd[q->first_empty];
10226 txq->txb[q->first_empty] = txb;
10227 memset(tfd, 0, sizeof(*tfd));
10228 tfd->u.data.station_number = id;
10230 tfd->control_flags.message_type = TX_FRAME_TYPE;
10231 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10233 tfd->u.data.cmd_id = DINO_CMD_TX;
10234 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10235 remaining_bytes = txb->payload_size;
10237 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10238 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10239 else
10240 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10242 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10243 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10245 fc = le16_to_cpu(hdr->frame_ctl);
10246 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10248 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10250 if (likely(unicast))
10251 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10253 if (txb->encrypted && !priv->ieee->host_encrypt) {
10254 switch (priv->ieee->sec.level) {
10255 case SEC_LEVEL_3:
10256 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10257 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10258 /* XXX: ACK flag must be set for CCMP even if it
10259 * is a multicast/broadcast packet, because CCMP
10260 * group communication encrypted by GTK is
10261 * actually done by the AP. */
10262 if (!unicast)
10263 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10265 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10266 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10267 tfd->u.data.key_index = 0;
10268 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10269 break;
10270 case SEC_LEVEL_2:
10271 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10272 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10273 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10274 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10275 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10276 break;
10277 case SEC_LEVEL_1:
10278 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10279 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10280 tfd->u.data.key_index = priv->ieee->tx_keyidx;
10281 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
10283 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10284 else
10285 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10286 break;
10287 case SEC_LEVEL_0:
10288 break;
10289 default:
10290 printk(KERN_ERR "Unknow security level %d\n",
10291 priv->ieee->sec.level);
10292 break;
10294 } else
10295 /* No hardware encryption */
10296 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10298 #ifdef CONFIG_IPW2200_QOS
10299 if (fc & IEEE80211_STYPE_QOS_DATA)
10300 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10301 #endif /* CONFIG_IPW2200_QOS */
10303 /* payload */
10304 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10305 txb->nr_frags));
10306 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10307 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10308 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10309 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10310 i, le32_to_cpu(tfd->u.data.num_chunks),
10311 txb->fragments[i]->len - hdr_len);
10312 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10313 i, tfd->u.data.num_chunks,
10314 txb->fragments[i]->len - hdr_len);
10315 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10316 txb->fragments[i]->len - hdr_len);
10318 tfd->u.data.chunk_ptr[i] =
10319 cpu_to_le32(pci_map_single
10320 (priv->pci_dev,
10321 txb->fragments[i]->data + hdr_len,
10322 txb->fragments[i]->len - hdr_len,
10323 PCI_DMA_TODEVICE));
10324 tfd->u.data.chunk_len[i] =
10325 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10328 if (i != txb->nr_frags) {
10329 struct sk_buff *skb;
10330 u16 remaining_bytes = 0;
10331 int j;
10333 for (j = i; j < txb->nr_frags; j++)
10334 remaining_bytes += txb->fragments[j]->len - hdr_len;
10336 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10337 remaining_bytes);
10338 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10339 if (skb != NULL) {
10340 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10341 for (j = i; j < txb->nr_frags; j++) {
10342 int size = txb->fragments[j]->len - hdr_len;
10344 printk(KERN_INFO "Adding frag %d %d...\n",
10345 j, size);
10346 memcpy(skb_put(skb, size),
10347 txb->fragments[j]->data + hdr_len, size);
10349 dev_kfree_skb_any(txb->fragments[i]);
10350 txb->fragments[i] = skb;
10351 tfd->u.data.chunk_ptr[i] =
10352 cpu_to_le32(pci_map_single
10353 (priv->pci_dev, skb->data,
10354 remaining_bytes,
10355 PCI_DMA_TODEVICE));
10357 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10361 /* kick DMA */
10362 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10363 ipw_write32(priv, q->reg_w, q->first_empty);
10365 if (ipw_tx_queue_space(q) < q->high_mark)
10366 netif_stop_queue(priv->net_dev);
10368 return NETDEV_TX_OK;
10370 drop:
10371 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10372 ieee80211_txb_free(txb);
10373 return NETDEV_TX_OK;
10376 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10378 struct ipw_priv *priv = ieee80211_priv(dev);
10379 #ifdef CONFIG_IPW2200_QOS
10380 int tx_id = ipw_get_tx_queue_number(priv, pri);
10381 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10382 #else
10383 struct clx2_tx_queue *txq = &priv->txq[0];
10384 #endif /* CONFIG_IPW2200_QOS */
10386 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10387 return 1;
10389 return 0;
10392 #ifdef CONFIG_IPW2200_PROMISCUOUS
10393 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10394 struct ieee80211_txb *txb)
10396 struct ieee80211_rx_stats dummystats;
10397 struct ieee80211_hdr *hdr;
10398 u8 n;
10399 u16 filter = priv->prom_priv->filter;
10400 int hdr_only = 0;
10402 if (filter & IPW_PROM_NO_TX)
10403 return;
10405 memset(&dummystats, 0, sizeof(dummystats));
10407 /* Filtering of fragment chains is done agains the first fragment */
10408 hdr = (void *)txb->fragments[0]->data;
10409 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
10410 if (filter & IPW_PROM_NO_MGMT)
10411 return;
10412 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10413 hdr_only = 1;
10414 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
10415 if (filter & IPW_PROM_NO_CTL)
10416 return;
10417 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10418 hdr_only = 1;
10419 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
10420 if (filter & IPW_PROM_NO_DATA)
10421 return;
10422 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10423 hdr_only = 1;
10426 for(n=0; n<txb->nr_frags; ++n) {
10427 struct sk_buff *src = txb->fragments[n];
10428 struct sk_buff *dst;
10429 struct ieee80211_radiotap_header *rt_hdr;
10430 int len;
10432 if (hdr_only) {
10433 hdr = (void *)src->data;
10434 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10435 } else
10436 len = src->len;
10438 dst = alloc_skb(
10439 len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC);
10440 if (!dst) continue;
10442 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10444 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10445 rt_hdr->it_pad = 0;
10446 rt_hdr->it_present = 0; /* after all, it's just an idea */
10447 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10449 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10450 ieee80211chan2mhz(priv->channel));
10451 if (priv->channel > 14) /* 802.11a */
10452 *(__le16*)skb_put(dst, sizeof(u16)) =
10453 cpu_to_le16(IEEE80211_CHAN_OFDM |
10454 IEEE80211_CHAN_5GHZ);
10455 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10456 *(__le16*)skb_put(dst, sizeof(u16)) =
10457 cpu_to_le16(IEEE80211_CHAN_CCK |
10458 IEEE80211_CHAN_2GHZ);
10459 else /* 802.11g */
10460 *(__le16*)skb_put(dst, sizeof(u16)) =
10461 cpu_to_le16(IEEE80211_CHAN_OFDM |
10462 IEEE80211_CHAN_2GHZ);
10464 rt_hdr->it_len = cpu_to_le16(dst->len);
10466 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10468 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10469 dev_kfree_skb_any(dst);
10472 #endif
10474 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10475 struct net_device *dev, int pri)
10477 struct ipw_priv *priv = ieee80211_priv(dev);
10478 unsigned long flags;
10479 int ret;
10481 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10482 spin_lock_irqsave(&priv->lock, flags);
10484 if (!(priv->status & STATUS_ASSOCIATED)) {
10485 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
10486 priv->ieee->stats.tx_carrier_errors++;
10487 netif_stop_queue(dev);
10488 goto fail_unlock;
10491 #ifdef CONFIG_IPW2200_PROMISCUOUS
10492 if (rtap_iface && netif_running(priv->prom_net_dev))
10493 ipw_handle_promiscuous_tx(priv, txb);
10494 #endif
10496 ret = ipw_tx_skb(priv, txb, pri);
10497 if (ret == NETDEV_TX_OK)
10498 __ipw_led_activity_on(priv);
10499 spin_unlock_irqrestore(&priv->lock, flags);
10501 return ret;
10503 fail_unlock:
10504 spin_unlock_irqrestore(&priv->lock, flags);
10505 return 1;
10508 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
10510 struct ipw_priv *priv = ieee80211_priv(dev);
10512 priv->ieee->stats.tx_packets = priv->tx_packets;
10513 priv->ieee->stats.rx_packets = priv->rx_packets;
10514 return &priv->ieee->stats;
10517 static void ipw_net_set_multicast_list(struct net_device *dev)
10522 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10524 struct ipw_priv *priv = ieee80211_priv(dev);
10525 struct sockaddr *addr = p;
10526 DECLARE_MAC_BUF(mac);
10528 if (!is_valid_ether_addr(addr->sa_data))
10529 return -EADDRNOTAVAIL;
10530 mutex_lock(&priv->mutex);
10531 priv->config |= CFG_CUSTOM_MAC;
10532 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10533 printk(KERN_INFO "%s: Setting MAC to %s\n",
10534 priv->net_dev->name, print_mac(mac, priv->mac_addr));
10535 queue_work(priv->workqueue, &priv->adapter_restart);
10536 mutex_unlock(&priv->mutex);
10537 return 0;
10540 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10541 struct ethtool_drvinfo *info)
10543 struct ipw_priv *p = ieee80211_priv(dev);
10544 char vers[64];
10545 char date[32];
10546 u32 len;
10548 strcpy(info->driver, DRV_NAME);
10549 strcpy(info->version, DRV_VERSION);
10551 len = sizeof(vers);
10552 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10553 len = sizeof(date);
10554 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10556 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10557 vers, date);
10558 strcpy(info->bus_info, pci_name(p->pci_dev));
10559 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10562 static u32 ipw_ethtool_get_link(struct net_device *dev)
10564 struct ipw_priv *priv = ieee80211_priv(dev);
10565 return (priv->status & STATUS_ASSOCIATED) != 0;
10568 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10570 return IPW_EEPROM_IMAGE_SIZE;
10573 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10574 struct ethtool_eeprom *eeprom, u8 * bytes)
10576 struct ipw_priv *p = ieee80211_priv(dev);
10578 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10579 return -EINVAL;
10580 mutex_lock(&p->mutex);
10581 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10582 mutex_unlock(&p->mutex);
10583 return 0;
10586 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10587 struct ethtool_eeprom *eeprom, u8 * bytes)
10589 struct ipw_priv *p = ieee80211_priv(dev);
10590 int i;
10592 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10593 return -EINVAL;
10594 mutex_lock(&p->mutex);
10595 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10596 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10597 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10598 mutex_unlock(&p->mutex);
10599 return 0;
10602 static const struct ethtool_ops ipw_ethtool_ops = {
10603 .get_link = ipw_ethtool_get_link,
10604 .get_drvinfo = ipw_ethtool_get_drvinfo,
10605 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10606 .get_eeprom = ipw_ethtool_get_eeprom,
10607 .set_eeprom = ipw_ethtool_set_eeprom,
10610 static irqreturn_t ipw_isr(int irq, void *data)
10612 struct ipw_priv *priv = data;
10613 u32 inta, inta_mask;
10615 if (!priv)
10616 return IRQ_NONE;
10618 spin_lock(&priv->irq_lock);
10620 if (!(priv->status & STATUS_INT_ENABLED)) {
10621 /* IRQ is disabled */
10622 goto none;
10625 inta = ipw_read32(priv, IPW_INTA_RW);
10626 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10628 if (inta == 0xFFFFFFFF) {
10629 /* Hardware disappeared */
10630 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10631 goto none;
10634 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10635 /* Shared interrupt */
10636 goto none;
10639 /* tell the device to stop sending interrupts */
10640 __ipw_disable_interrupts(priv);
10642 /* ack current interrupts */
10643 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10644 ipw_write32(priv, IPW_INTA_RW, inta);
10646 /* Cache INTA value for our tasklet */
10647 priv->isr_inta = inta;
10649 tasklet_schedule(&priv->irq_tasklet);
10651 spin_unlock(&priv->irq_lock);
10653 return IRQ_HANDLED;
10654 none:
10655 spin_unlock(&priv->irq_lock);
10656 return IRQ_NONE;
10659 static void ipw_rf_kill(void *adapter)
10661 struct ipw_priv *priv = adapter;
10662 unsigned long flags;
10664 spin_lock_irqsave(&priv->lock, flags);
10666 if (rf_kill_active(priv)) {
10667 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10668 if (priv->workqueue)
10669 queue_delayed_work(priv->workqueue,
10670 &priv->rf_kill, 2 * HZ);
10671 goto exit_unlock;
10674 /* RF Kill is now disabled, so bring the device back up */
10676 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10677 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10678 "device\n");
10680 /* we can not do an adapter restart while inside an irq lock */
10681 queue_work(priv->workqueue, &priv->adapter_restart);
10682 } else
10683 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10684 "enabled\n");
10686 exit_unlock:
10687 spin_unlock_irqrestore(&priv->lock, flags);
10690 static void ipw_bg_rf_kill(struct work_struct *work)
10692 struct ipw_priv *priv =
10693 container_of(work, struct ipw_priv, rf_kill.work);
10694 mutex_lock(&priv->mutex);
10695 ipw_rf_kill(priv);
10696 mutex_unlock(&priv->mutex);
10699 static void ipw_link_up(struct ipw_priv *priv)
10701 priv->last_seq_num = -1;
10702 priv->last_frag_num = -1;
10703 priv->last_packet_time = 0;
10705 netif_carrier_on(priv->net_dev);
10706 if (netif_queue_stopped(priv->net_dev)) {
10707 IPW_DEBUG_NOTIF("waking queue\n");
10708 netif_wake_queue(priv->net_dev);
10709 } else {
10710 IPW_DEBUG_NOTIF("starting queue\n");
10711 netif_start_queue(priv->net_dev);
10714 cancel_delayed_work(&priv->request_scan);
10715 cancel_delayed_work(&priv->request_direct_scan);
10716 cancel_delayed_work(&priv->request_passive_scan);
10717 cancel_delayed_work(&priv->scan_event);
10718 ipw_reset_stats(priv);
10719 /* Ensure the rate is updated immediately */
10720 priv->last_rate = ipw_get_current_rate(priv);
10721 ipw_gather_stats(priv);
10722 ipw_led_link_up(priv);
10723 notify_wx_assoc_event(priv);
10725 if (priv->config & CFG_BACKGROUND_SCAN)
10726 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10729 static void ipw_bg_link_up(struct work_struct *work)
10731 struct ipw_priv *priv =
10732 container_of(work, struct ipw_priv, link_up);
10733 mutex_lock(&priv->mutex);
10734 ipw_link_up(priv);
10735 mutex_unlock(&priv->mutex);
10738 static void ipw_link_down(struct ipw_priv *priv)
10740 ipw_led_link_down(priv);
10741 netif_carrier_off(priv->net_dev);
10742 netif_stop_queue(priv->net_dev);
10743 notify_wx_assoc_event(priv);
10745 /* Cancel any queued work ... */
10746 cancel_delayed_work(&priv->request_scan);
10747 cancel_delayed_work(&priv->request_direct_scan);
10748 cancel_delayed_work(&priv->request_passive_scan);
10749 cancel_delayed_work(&priv->adhoc_check);
10750 cancel_delayed_work(&priv->gather_stats);
10752 ipw_reset_stats(priv);
10754 if (!(priv->status & STATUS_EXIT_PENDING)) {
10755 /* Queue up another scan... */
10756 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10757 } else
10758 cancel_delayed_work(&priv->scan_event);
10761 static void ipw_bg_link_down(struct work_struct *work)
10763 struct ipw_priv *priv =
10764 container_of(work, struct ipw_priv, link_down);
10765 mutex_lock(&priv->mutex);
10766 ipw_link_down(priv);
10767 mutex_unlock(&priv->mutex);
10770 static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10772 int ret = 0;
10774 priv->workqueue = create_workqueue(DRV_NAME);
10775 init_waitqueue_head(&priv->wait_command_queue);
10776 init_waitqueue_head(&priv->wait_state);
10778 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10779 INIT_WORK(&priv->associate, ipw_bg_associate);
10780 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10781 INIT_WORK(&priv->system_config, ipw_system_config);
10782 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10783 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10784 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10785 INIT_WORK(&priv->up, ipw_bg_up);
10786 INIT_WORK(&priv->down, ipw_bg_down);
10787 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10788 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10789 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10790 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10791 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10792 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10793 INIT_WORK(&priv->roam, ipw_bg_roam);
10794 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10795 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10796 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10797 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10798 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10799 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10800 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10802 #ifdef CONFIG_IPW2200_QOS
10803 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10804 #endif /* CONFIG_IPW2200_QOS */
10806 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10807 ipw_irq_tasklet, (unsigned long)priv);
10809 return ret;
10812 static void shim__set_security(struct net_device *dev,
10813 struct ieee80211_security *sec)
10815 struct ipw_priv *priv = ieee80211_priv(dev);
10816 int i;
10817 for (i = 0; i < 4; i++) {
10818 if (sec->flags & (1 << i)) {
10819 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10820 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10821 if (sec->key_sizes[i] == 0)
10822 priv->ieee->sec.flags &= ~(1 << i);
10823 else {
10824 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10825 sec->key_sizes[i]);
10826 priv->ieee->sec.flags |= (1 << i);
10828 priv->status |= STATUS_SECURITY_UPDATED;
10829 } else if (sec->level != SEC_LEVEL_1)
10830 priv->ieee->sec.flags &= ~(1 << i);
10833 if (sec->flags & SEC_ACTIVE_KEY) {
10834 if (sec->active_key <= 3) {
10835 priv->ieee->sec.active_key = sec->active_key;
10836 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10837 } else
10838 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10839 priv->status |= STATUS_SECURITY_UPDATED;
10840 } else
10841 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10843 if ((sec->flags & SEC_AUTH_MODE) &&
10844 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10845 priv->ieee->sec.auth_mode = sec->auth_mode;
10846 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10847 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10848 priv->capability |= CAP_SHARED_KEY;
10849 else
10850 priv->capability &= ~CAP_SHARED_KEY;
10851 priv->status |= STATUS_SECURITY_UPDATED;
10854 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10855 priv->ieee->sec.flags |= SEC_ENABLED;
10856 priv->ieee->sec.enabled = sec->enabled;
10857 priv->status |= STATUS_SECURITY_UPDATED;
10858 if (sec->enabled)
10859 priv->capability |= CAP_PRIVACY_ON;
10860 else
10861 priv->capability &= ~CAP_PRIVACY_ON;
10864 if (sec->flags & SEC_ENCRYPT)
10865 priv->ieee->sec.encrypt = sec->encrypt;
10867 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10868 priv->ieee->sec.level = sec->level;
10869 priv->ieee->sec.flags |= SEC_LEVEL;
10870 priv->status |= STATUS_SECURITY_UPDATED;
10873 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10874 ipw_set_hwcrypto_keys(priv);
10876 /* To match current functionality of ipw2100 (which works well w/
10877 * various supplicants, we don't force a disassociate if the
10878 * privacy capability changes ... */
10879 #if 0
10880 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10881 (((priv->assoc_request.capability &
10882 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10883 (!(priv->assoc_request.capability &
10884 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10885 IPW_DEBUG_ASSOC("Disassociating due to capability "
10886 "change.\n");
10887 ipw_disassociate(priv);
10889 #endif
10892 static int init_supported_rates(struct ipw_priv *priv,
10893 struct ipw_supported_rates *rates)
10895 /* TODO: Mask out rates based on priv->rates_mask */
10897 memset(rates, 0, sizeof(*rates));
10898 /* configure supported rates */
10899 switch (priv->ieee->freq_band) {
10900 case IEEE80211_52GHZ_BAND:
10901 rates->ieee_mode = IPW_A_MODE;
10902 rates->purpose = IPW_RATE_CAPABILITIES;
10903 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10904 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10905 break;
10907 default: /* Mixed or 2.4Ghz */
10908 rates->ieee_mode = IPW_G_MODE;
10909 rates->purpose = IPW_RATE_CAPABILITIES;
10910 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10911 IEEE80211_CCK_DEFAULT_RATES_MASK);
10912 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10913 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10914 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10916 break;
10919 return 0;
10922 static int ipw_config(struct ipw_priv *priv)
10924 /* This is only called from ipw_up, which resets/reloads the firmware
10925 so, we don't need to first disable the card before we configure
10926 it */
10927 if (ipw_set_tx_power(priv))
10928 goto error;
10930 /* initialize adapter address */
10931 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10932 goto error;
10934 /* set basic system config settings */
10935 init_sys_config(&priv->sys_config);
10937 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10938 * Does not support BT priority yet (don't abort or defer our Tx) */
10939 if (bt_coexist) {
10940 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10942 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10943 priv->sys_config.bt_coexistence
10944 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10945 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10946 priv->sys_config.bt_coexistence
10947 |= CFG_BT_COEXISTENCE_OOB;
10950 #ifdef CONFIG_IPW2200_PROMISCUOUS
10951 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10952 priv->sys_config.accept_all_data_frames = 1;
10953 priv->sys_config.accept_non_directed_frames = 1;
10954 priv->sys_config.accept_all_mgmt_bcpr = 1;
10955 priv->sys_config.accept_all_mgmt_frames = 1;
10957 #endif
10959 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10960 priv->sys_config.answer_broadcast_ssid_probe = 1;
10961 else
10962 priv->sys_config.answer_broadcast_ssid_probe = 0;
10964 if (ipw_send_system_config(priv))
10965 goto error;
10967 init_supported_rates(priv, &priv->rates);
10968 if (ipw_send_supported_rates(priv, &priv->rates))
10969 goto error;
10971 /* Set request-to-send threshold */
10972 if (priv->rts_threshold) {
10973 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10974 goto error;
10976 #ifdef CONFIG_IPW2200_QOS
10977 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10978 ipw_qos_activate(priv, NULL);
10979 #endif /* CONFIG_IPW2200_QOS */
10981 if (ipw_set_random_seed(priv))
10982 goto error;
10984 /* final state transition to the RUN state */
10985 if (ipw_send_host_complete(priv))
10986 goto error;
10988 priv->status |= STATUS_INIT;
10990 ipw_led_init(priv);
10991 ipw_led_radio_on(priv);
10992 priv->notif_missed_beacons = 0;
10994 /* Set hardware WEP key if it is configured. */
10995 if ((priv->capability & CAP_PRIVACY_ON) &&
10996 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10997 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10998 ipw_set_hwcrypto_keys(priv);
11000 return 0;
11002 error:
11003 return -EIO;
11007 * NOTE:
11009 * These tables have been tested in conjunction with the
11010 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
11012 * Altering this values, using it on other hardware, or in geographies
11013 * not intended for resale of the above mentioned Intel adapters has
11014 * not been tested.
11016 * Remember to update the table in README.ipw2200 when changing this
11017 * table.
11020 static const struct ieee80211_geo ipw_geos[] = {
11021 { /* Restricted */
11022 "---",
11023 .bg_channels = 11,
11024 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11025 {2427, 4}, {2432, 5}, {2437, 6},
11026 {2442, 7}, {2447, 8}, {2452, 9},
11027 {2457, 10}, {2462, 11}},
11030 { /* Custom US/Canada */
11031 "ZZF",
11032 .bg_channels = 11,
11033 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11034 {2427, 4}, {2432, 5}, {2437, 6},
11035 {2442, 7}, {2447, 8}, {2452, 9},
11036 {2457, 10}, {2462, 11}},
11037 .a_channels = 8,
11038 .a = {{5180, 36},
11039 {5200, 40},
11040 {5220, 44},
11041 {5240, 48},
11042 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11043 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11044 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11045 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
11048 { /* Rest of World */
11049 "ZZD",
11050 .bg_channels = 13,
11051 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11052 {2427, 4}, {2432, 5}, {2437, 6},
11053 {2442, 7}, {2447, 8}, {2452, 9},
11054 {2457, 10}, {2462, 11}, {2467, 12},
11055 {2472, 13}},
11058 { /* Custom USA & Europe & High */
11059 "ZZA",
11060 .bg_channels = 11,
11061 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11062 {2427, 4}, {2432, 5}, {2437, 6},
11063 {2442, 7}, {2447, 8}, {2452, 9},
11064 {2457, 10}, {2462, 11}},
11065 .a_channels = 13,
11066 .a = {{5180, 36},
11067 {5200, 40},
11068 {5220, 44},
11069 {5240, 48},
11070 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11071 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11072 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11073 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11074 {5745, 149},
11075 {5765, 153},
11076 {5785, 157},
11077 {5805, 161},
11078 {5825, 165}},
11081 { /* Custom NA & Europe */
11082 "ZZB",
11083 .bg_channels = 11,
11084 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11085 {2427, 4}, {2432, 5}, {2437, 6},
11086 {2442, 7}, {2447, 8}, {2452, 9},
11087 {2457, 10}, {2462, 11}},
11088 .a_channels = 13,
11089 .a = {{5180, 36},
11090 {5200, 40},
11091 {5220, 44},
11092 {5240, 48},
11093 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11094 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11095 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11096 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11097 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11098 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11099 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11100 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11101 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11104 { /* Custom Japan */
11105 "ZZC",
11106 .bg_channels = 11,
11107 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11108 {2427, 4}, {2432, 5}, {2437, 6},
11109 {2442, 7}, {2447, 8}, {2452, 9},
11110 {2457, 10}, {2462, 11}},
11111 .a_channels = 4,
11112 .a = {{5170, 34}, {5190, 38},
11113 {5210, 42}, {5230, 46}},
11116 { /* Custom */
11117 "ZZM",
11118 .bg_channels = 11,
11119 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11120 {2427, 4}, {2432, 5}, {2437, 6},
11121 {2442, 7}, {2447, 8}, {2452, 9},
11122 {2457, 10}, {2462, 11}},
11125 { /* Europe */
11126 "ZZE",
11127 .bg_channels = 13,
11128 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11129 {2427, 4}, {2432, 5}, {2437, 6},
11130 {2442, 7}, {2447, 8}, {2452, 9},
11131 {2457, 10}, {2462, 11}, {2467, 12},
11132 {2472, 13}},
11133 .a_channels = 19,
11134 .a = {{5180, 36},
11135 {5200, 40},
11136 {5220, 44},
11137 {5240, 48},
11138 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11139 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11140 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11141 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11142 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11143 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11144 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11145 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11146 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11147 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11148 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11149 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11150 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11151 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11152 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
11155 { /* Custom Japan */
11156 "ZZJ",
11157 .bg_channels = 14,
11158 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11159 {2427, 4}, {2432, 5}, {2437, 6},
11160 {2442, 7}, {2447, 8}, {2452, 9},
11161 {2457, 10}, {2462, 11}, {2467, 12},
11162 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
11163 .a_channels = 4,
11164 .a = {{5170, 34}, {5190, 38},
11165 {5210, 42}, {5230, 46}},
11168 { /* Rest of World */
11169 "ZZR",
11170 .bg_channels = 14,
11171 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11172 {2427, 4}, {2432, 5}, {2437, 6},
11173 {2442, 7}, {2447, 8}, {2452, 9},
11174 {2457, 10}, {2462, 11}, {2467, 12},
11175 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11176 IEEE80211_CH_PASSIVE_ONLY}},
11179 { /* High Band */
11180 "ZZH",
11181 .bg_channels = 13,
11182 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11183 {2427, 4}, {2432, 5}, {2437, 6},
11184 {2442, 7}, {2447, 8}, {2452, 9},
11185 {2457, 10}, {2462, 11},
11186 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11187 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11188 .a_channels = 4,
11189 .a = {{5745, 149}, {5765, 153},
11190 {5785, 157}, {5805, 161}},
11193 { /* Custom Europe */
11194 "ZZG",
11195 .bg_channels = 13,
11196 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11197 {2427, 4}, {2432, 5}, {2437, 6},
11198 {2442, 7}, {2447, 8}, {2452, 9},
11199 {2457, 10}, {2462, 11},
11200 {2467, 12}, {2472, 13}},
11201 .a_channels = 4,
11202 .a = {{5180, 36}, {5200, 40},
11203 {5220, 44}, {5240, 48}},
11206 { /* Europe */
11207 "ZZK",
11208 .bg_channels = 13,
11209 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11210 {2427, 4}, {2432, 5}, {2437, 6},
11211 {2442, 7}, {2447, 8}, {2452, 9},
11212 {2457, 10}, {2462, 11},
11213 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11214 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11215 .a_channels = 24,
11216 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11217 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11218 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11219 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11220 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11221 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11222 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11223 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11224 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11225 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11226 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11227 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11228 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11229 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11230 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11231 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11232 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11233 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11234 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11235 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11236 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11237 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11238 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11239 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11242 { /* Europe */
11243 "ZZL",
11244 .bg_channels = 11,
11245 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11246 {2427, 4}, {2432, 5}, {2437, 6},
11247 {2442, 7}, {2447, 8}, {2452, 9},
11248 {2457, 10}, {2462, 11}},
11249 .a_channels = 13,
11250 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11251 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11252 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11253 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11254 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11255 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11256 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11257 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11258 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11259 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11260 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11261 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11262 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11266 #define MAX_HW_RESTARTS 5
11267 static int ipw_up(struct ipw_priv *priv)
11269 int rc, i, j;
11271 if (priv->status & STATUS_EXIT_PENDING)
11272 return -EIO;
11274 if (cmdlog && !priv->cmdlog) {
11275 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11276 GFP_KERNEL);
11277 if (priv->cmdlog == NULL) {
11278 IPW_ERROR("Error allocating %d command log entries.\n",
11279 cmdlog);
11280 return -ENOMEM;
11281 } else {
11282 priv->cmdlog_len = cmdlog;
11286 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11287 /* Load the microcode, firmware, and eeprom.
11288 * Also start the clocks. */
11289 rc = ipw_load(priv);
11290 if (rc) {
11291 IPW_ERROR("Unable to load firmware: %d\n", rc);
11292 return rc;
11295 ipw_init_ordinals(priv);
11296 if (!(priv->config & CFG_CUSTOM_MAC))
11297 eeprom_parse_mac(priv, priv->mac_addr);
11298 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11300 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11301 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11302 ipw_geos[j].name, 3))
11303 break;
11305 if (j == ARRAY_SIZE(ipw_geos)) {
11306 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11307 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11308 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11309 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11310 j = 0;
11312 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11313 IPW_WARNING("Could not set geography.");
11314 return 0;
11317 if (priv->status & STATUS_RF_KILL_SW) {
11318 IPW_WARNING("Radio disabled by module parameter.\n");
11319 return 0;
11320 } else if (rf_kill_active(priv)) {
11321 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11322 "Kill switch must be turned off for "
11323 "wireless networking to work.\n");
11324 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11325 2 * HZ);
11326 return 0;
11329 rc = ipw_config(priv);
11330 if (!rc) {
11331 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11333 /* If configure to try and auto-associate, kick
11334 * off a scan. */
11335 queue_delayed_work(priv->workqueue,
11336 &priv->request_scan, 0);
11338 return 0;
11341 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11342 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11343 i, MAX_HW_RESTARTS);
11345 /* We had an error bringing up the hardware, so take it
11346 * all the way back down so we can try again */
11347 ipw_down(priv);
11350 /* tried to restart and config the device for as long as our
11351 * patience could withstand */
11352 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11354 return -EIO;
11357 static void ipw_bg_up(struct work_struct *work)
11359 struct ipw_priv *priv =
11360 container_of(work, struct ipw_priv, up);
11361 mutex_lock(&priv->mutex);
11362 ipw_up(priv);
11363 mutex_unlock(&priv->mutex);
11366 static void ipw_deinit(struct ipw_priv *priv)
11368 int i;
11370 if (priv->status & STATUS_SCANNING) {
11371 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11372 ipw_abort_scan(priv);
11375 if (priv->status & STATUS_ASSOCIATED) {
11376 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11377 ipw_disassociate(priv);
11380 ipw_led_shutdown(priv);
11382 /* Wait up to 1s for status to change to not scanning and not
11383 * associated (disassociation can take a while for a ful 802.11
11384 * exchange */
11385 for (i = 1000; i && (priv->status &
11386 (STATUS_DISASSOCIATING |
11387 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11388 udelay(10);
11390 if (priv->status & (STATUS_DISASSOCIATING |
11391 STATUS_ASSOCIATED | STATUS_SCANNING))
11392 IPW_DEBUG_INFO("Still associated or scanning...\n");
11393 else
11394 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11396 /* Attempt to disable the card */
11397 ipw_send_card_disable(priv, 0);
11399 priv->status &= ~STATUS_INIT;
11402 static void ipw_down(struct ipw_priv *priv)
11404 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11406 priv->status |= STATUS_EXIT_PENDING;
11408 if (ipw_is_init(priv))
11409 ipw_deinit(priv);
11411 /* Wipe out the EXIT_PENDING status bit if we are not actually
11412 * exiting the module */
11413 if (!exit_pending)
11414 priv->status &= ~STATUS_EXIT_PENDING;
11416 /* tell the device to stop sending interrupts */
11417 ipw_disable_interrupts(priv);
11419 /* Clear all bits but the RF Kill */
11420 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11421 netif_carrier_off(priv->net_dev);
11422 netif_stop_queue(priv->net_dev);
11424 ipw_stop_nic(priv);
11426 ipw_led_radio_off(priv);
11429 static void ipw_bg_down(struct work_struct *work)
11431 struct ipw_priv *priv =
11432 container_of(work, struct ipw_priv, down);
11433 mutex_lock(&priv->mutex);
11434 ipw_down(priv);
11435 mutex_unlock(&priv->mutex);
11438 /* Called by register_netdev() */
11439 static int ipw_net_init(struct net_device *dev)
11441 struct ipw_priv *priv = ieee80211_priv(dev);
11442 mutex_lock(&priv->mutex);
11444 if (ipw_up(priv)) {
11445 mutex_unlock(&priv->mutex);
11446 return -EIO;
11449 mutex_unlock(&priv->mutex);
11450 return 0;
11453 /* PCI driver stuff */
11454 static struct pci_device_id card_ids[] = {
11455 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11456 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11457 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11458 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11459 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11460 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11461 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11462 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11463 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11464 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11465 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11466 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11467 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11468 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11469 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11470 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11471 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11472 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11473 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11474 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11475 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11476 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11478 /* required last entry */
11479 {0,}
11482 MODULE_DEVICE_TABLE(pci, card_ids);
11484 static struct attribute *ipw_sysfs_entries[] = {
11485 &dev_attr_rf_kill.attr,
11486 &dev_attr_direct_dword.attr,
11487 &dev_attr_indirect_byte.attr,
11488 &dev_attr_indirect_dword.attr,
11489 &dev_attr_mem_gpio_reg.attr,
11490 &dev_attr_command_event_reg.attr,
11491 &dev_attr_nic_type.attr,
11492 &dev_attr_status.attr,
11493 &dev_attr_cfg.attr,
11494 &dev_attr_error.attr,
11495 &dev_attr_event_log.attr,
11496 &dev_attr_cmd_log.attr,
11497 &dev_attr_eeprom_delay.attr,
11498 &dev_attr_ucode_version.attr,
11499 &dev_attr_rtc.attr,
11500 &dev_attr_scan_age.attr,
11501 &dev_attr_led.attr,
11502 &dev_attr_speed_scan.attr,
11503 &dev_attr_net_stats.attr,
11504 &dev_attr_channels.attr,
11505 #ifdef CONFIG_IPW2200_PROMISCUOUS
11506 &dev_attr_rtap_iface.attr,
11507 &dev_attr_rtap_filter.attr,
11508 #endif
11509 NULL
11512 static struct attribute_group ipw_attribute_group = {
11513 .name = NULL, /* put in device directory */
11514 .attrs = ipw_sysfs_entries,
11517 #ifdef CONFIG_IPW2200_PROMISCUOUS
11518 static int ipw_prom_open(struct net_device *dev)
11520 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11521 struct ipw_priv *priv = prom_priv->priv;
11523 IPW_DEBUG_INFO("prom dev->open\n");
11524 netif_carrier_off(dev);
11525 netif_stop_queue(dev);
11527 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11528 priv->sys_config.accept_all_data_frames = 1;
11529 priv->sys_config.accept_non_directed_frames = 1;
11530 priv->sys_config.accept_all_mgmt_bcpr = 1;
11531 priv->sys_config.accept_all_mgmt_frames = 1;
11533 ipw_send_system_config(priv);
11536 return 0;
11539 static int ipw_prom_stop(struct net_device *dev)
11541 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11542 struct ipw_priv *priv = prom_priv->priv;
11544 IPW_DEBUG_INFO("prom dev->stop\n");
11546 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11547 priv->sys_config.accept_all_data_frames = 0;
11548 priv->sys_config.accept_non_directed_frames = 0;
11549 priv->sys_config.accept_all_mgmt_bcpr = 0;
11550 priv->sys_config.accept_all_mgmt_frames = 0;
11552 ipw_send_system_config(priv);
11555 return 0;
11558 static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11560 IPW_DEBUG_INFO("prom dev->xmit\n");
11561 netif_stop_queue(dev);
11562 return -EOPNOTSUPP;
11565 static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
11567 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11568 return &prom_priv->ieee->stats;
11571 static int ipw_prom_alloc(struct ipw_priv *priv)
11573 int rc = 0;
11575 if (priv->prom_net_dev)
11576 return -EPERM;
11578 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11579 if (priv->prom_net_dev == NULL)
11580 return -ENOMEM;
11582 priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11583 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11584 priv->prom_priv->priv = priv;
11586 strcpy(priv->prom_net_dev->name, "rtap%d");
11587 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11589 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11590 priv->prom_net_dev->open = ipw_prom_open;
11591 priv->prom_net_dev->stop = ipw_prom_stop;
11592 priv->prom_net_dev->get_stats = ipw_prom_get_stats;
11593 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11595 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11596 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11598 rc = register_netdev(priv->prom_net_dev);
11599 if (rc) {
11600 free_ieee80211(priv->prom_net_dev);
11601 priv->prom_net_dev = NULL;
11602 return rc;
11605 return 0;
11608 static void ipw_prom_free(struct ipw_priv *priv)
11610 if (!priv->prom_net_dev)
11611 return;
11613 unregister_netdev(priv->prom_net_dev);
11614 free_ieee80211(priv->prom_net_dev);
11616 priv->prom_net_dev = NULL;
11619 #endif
11622 static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11623 const struct pci_device_id *ent)
11625 int err = 0;
11626 struct net_device *net_dev;
11627 void __iomem *base;
11628 u32 length, val;
11629 struct ipw_priv *priv;
11630 int i;
11632 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11633 if (net_dev == NULL) {
11634 err = -ENOMEM;
11635 goto out;
11638 priv = ieee80211_priv(net_dev);
11639 priv->ieee = netdev_priv(net_dev);
11641 priv->net_dev = net_dev;
11642 priv->pci_dev = pdev;
11643 ipw_debug_level = debug;
11644 spin_lock_init(&priv->irq_lock);
11645 spin_lock_init(&priv->lock);
11646 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11647 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11649 mutex_init(&priv->mutex);
11650 if (pci_enable_device(pdev)) {
11651 err = -ENODEV;
11652 goto out_free_ieee80211;
11655 pci_set_master(pdev);
11657 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11658 if (!err)
11659 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11660 if (err) {
11661 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11662 goto out_pci_disable_device;
11665 pci_set_drvdata(pdev, priv);
11667 err = pci_request_regions(pdev, DRV_NAME);
11668 if (err)
11669 goto out_pci_disable_device;
11671 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11672 * PCI Tx retries from interfering with C3 CPU state */
11673 pci_read_config_dword(pdev, 0x40, &val);
11674 if ((val & 0x0000ff00) != 0)
11675 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11677 length = pci_resource_len(pdev, 0);
11678 priv->hw_len = length;
11680 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
11681 if (!base) {
11682 err = -ENODEV;
11683 goto out_pci_release_regions;
11686 priv->hw_base = base;
11687 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11688 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11690 err = ipw_setup_deferred_work(priv);
11691 if (err) {
11692 IPW_ERROR("Unable to setup deferred work\n");
11693 goto out_iounmap;
11696 ipw_sw_reset(priv, 1);
11698 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11699 if (err) {
11700 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11701 goto out_destroy_workqueue;
11704 SET_NETDEV_DEV(net_dev, &pdev->dev);
11706 mutex_lock(&priv->mutex);
11708 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11709 priv->ieee->set_security = shim__set_security;
11710 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11712 #ifdef CONFIG_IPW2200_QOS
11713 priv->ieee->is_qos_active = ipw_is_qos_active;
11714 priv->ieee->handle_probe_response = ipw_handle_beacon;
11715 priv->ieee->handle_beacon = ipw_handle_probe_response;
11716 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11717 #endif /* CONFIG_IPW2200_QOS */
11719 priv->ieee->perfect_rssi = -20;
11720 priv->ieee->worst_rssi = -85;
11722 net_dev->open = ipw_net_open;
11723 net_dev->stop = ipw_net_stop;
11724 net_dev->init = ipw_net_init;
11725 net_dev->get_stats = ipw_net_get_stats;
11726 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11727 net_dev->set_mac_address = ipw_net_set_mac_address;
11728 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11729 net_dev->wireless_data = &priv->wireless_data;
11730 net_dev->wireless_handlers = &ipw_wx_handler_def;
11731 net_dev->ethtool_ops = &ipw_ethtool_ops;
11732 net_dev->irq = pdev->irq;
11733 net_dev->base_addr = (unsigned long)priv->hw_base;
11734 net_dev->mem_start = pci_resource_start(pdev, 0);
11735 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11737 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11738 if (err) {
11739 IPW_ERROR("failed to create sysfs device attributes\n");
11740 mutex_unlock(&priv->mutex);
11741 goto out_release_irq;
11744 mutex_unlock(&priv->mutex);
11745 err = register_netdev(net_dev);
11746 if (err) {
11747 IPW_ERROR("failed to register network device\n");
11748 goto out_remove_sysfs;
11751 #ifdef CONFIG_IPW2200_PROMISCUOUS
11752 if (rtap_iface) {
11753 err = ipw_prom_alloc(priv);
11754 if (err) {
11755 IPW_ERROR("Failed to register promiscuous network "
11756 "device (error %d).\n", err);
11757 unregister_netdev(priv->net_dev);
11758 goto out_remove_sysfs;
11761 #endif
11763 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11764 "channels, %d 802.11a channels)\n",
11765 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11766 priv->ieee->geo.a_channels);
11768 return 0;
11770 out_remove_sysfs:
11771 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11772 out_release_irq:
11773 free_irq(pdev->irq, priv);
11774 out_destroy_workqueue:
11775 destroy_workqueue(priv->workqueue);
11776 priv->workqueue = NULL;
11777 out_iounmap:
11778 iounmap(priv->hw_base);
11779 out_pci_release_regions:
11780 pci_release_regions(pdev);
11781 out_pci_disable_device:
11782 pci_disable_device(pdev);
11783 pci_set_drvdata(pdev, NULL);
11784 out_free_ieee80211:
11785 free_ieee80211(priv->net_dev);
11786 out:
11787 return err;
11790 static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11792 struct ipw_priv *priv = pci_get_drvdata(pdev);
11793 struct list_head *p, *q;
11794 int i;
11796 if (!priv)
11797 return;
11799 mutex_lock(&priv->mutex);
11801 priv->status |= STATUS_EXIT_PENDING;
11802 ipw_down(priv);
11803 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11805 mutex_unlock(&priv->mutex);
11807 unregister_netdev(priv->net_dev);
11809 if (priv->rxq) {
11810 ipw_rx_queue_free(priv, priv->rxq);
11811 priv->rxq = NULL;
11813 ipw_tx_queue_free(priv);
11815 if (priv->cmdlog) {
11816 kfree(priv->cmdlog);
11817 priv->cmdlog = NULL;
11819 /* ipw_down will ensure that there is no more pending work
11820 * in the workqueue's, so we can safely remove them now. */
11821 cancel_delayed_work(&priv->adhoc_check);
11822 cancel_delayed_work(&priv->gather_stats);
11823 cancel_delayed_work(&priv->request_scan);
11824 cancel_delayed_work(&priv->request_direct_scan);
11825 cancel_delayed_work(&priv->request_passive_scan);
11826 cancel_delayed_work(&priv->scan_event);
11827 cancel_delayed_work(&priv->rf_kill);
11828 cancel_delayed_work(&priv->scan_check);
11829 destroy_workqueue(priv->workqueue);
11830 priv->workqueue = NULL;
11832 /* Free MAC hash list for ADHOC */
11833 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11834 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11835 list_del(p);
11836 kfree(list_entry(p, struct ipw_ibss_seq, list));
11840 kfree(priv->error);
11841 priv->error = NULL;
11843 #ifdef CONFIG_IPW2200_PROMISCUOUS
11844 ipw_prom_free(priv);
11845 #endif
11847 free_irq(pdev->irq, priv);
11848 iounmap(priv->hw_base);
11849 pci_release_regions(pdev);
11850 pci_disable_device(pdev);
11851 pci_set_drvdata(pdev, NULL);
11852 free_ieee80211(priv->net_dev);
11853 free_firmware();
11856 #ifdef CONFIG_PM
11857 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11859 struct ipw_priv *priv = pci_get_drvdata(pdev);
11860 struct net_device *dev = priv->net_dev;
11862 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11864 /* Take down the device; powers it off, etc. */
11865 ipw_down(priv);
11867 /* Remove the PRESENT state of the device */
11868 netif_device_detach(dev);
11870 pci_save_state(pdev);
11871 pci_disable_device(pdev);
11872 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11874 return 0;
11877 static int ipw_pci_resume(struct pci_dev *pdev)
11879 struct ipw_priv *priv = pci_get_drvdata(pdev);
11880 struct net_device *dev = priv->net_dev;
11881 int err;
11882 u32 val;
11884 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11886 pci_set_power_state(pdev, PCI_D0);
11887 err = pci_enable_device(pdev);
11888 if (err) {
11889 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11890 dev->name);
11891 return err;
11893 pci_restore_state(pdev);
11896 * Suspend/Resume resets the PCI configuration space, so we have to
11897 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11898 * from interfering with C3 CPU state. pci_restore_state won't help
11899 * here since it only restores the first 64 bytes pci config header.
11901 pci_read_config_dword(pdev, 0x40, &val);
11902 if ((val & 0x0000ff00) != 0)
11903 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11905 /* Set the device back into the PRESENT state; this will also wake
11906 * the queue of needed */
11907 netif_device_attach(dev);
11909 /* Bring the device back up */
11910 queue_work(priv->workqueue, &priv->up);
11912 return 0;
11914 #endif
11916 static void ipw_pci_shutdown(struct pci_dev *pdev)
11918 struct ipw_priv *priv = pci_get_drvdata(pdev);
11920 /* Take down the device; powers it off, etc. */
11921 ipw_down(priv);
11923 pci_disable_device(pdev);
11926 /* driver initialization stuff */
11927 static struct pci_driver ipw_driver = {
11928 .name = DRV_NAME,
11929 .id_table = card_ids,
11930 .probe = ipw_pci_probe,
11931 .remove = __devexit_p(ipw_pci_remove),
11932 #ifdef CONFIG_PM
11933 .suspend = ipw_pci_suspend,
11934 .resume = ipw_pci_resume,
11935 #endif
11936 .shutdown = ipw_pci_shutdown,
11939 static int __init ipw_init(void)
11941 int ret;
11943 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11944 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11946 ret = pci_register_driver(&ipw_driver);
11947 if (ret) {
11948 IPW_ERROR("Unable to initialize PCI module\n");
11949 return ret;
11952 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11953 if (ret) {
11954 IPW_ERROR("Unable to create driver sysfs file\n");
11955 pci_unregister_driver(&ipw_driver);
11956 return ret;
11959 return ret;
11962 static void __exit ipw_exit(void)
11964 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11965 pci_unregister_driver(&ipw_driver);
11968 module_param(disable, int, 0444);
11969 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11971 module_param(associate, int, 0444);
11972 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11974 module_param(auto_create, int, 0444);
11975 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11977 module_param(led, int, 0444);
11978 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11980 module_param(debug, int, 0444);
11981 MODULE_PARM_DESC(debug, "debug output mask");
11983 module_param(channel, int, 0444);
11984 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11986 #ifdef CONFIG_IPW2200_PROMISCUOUS
11987 module_param(rtap_iface, int, 0444);
11988 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11989 #endif
11991 #ifdef CONFIG_IPW2200_QOS
11992 module_param(qos_enable, int, 0444);
11993 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11995 module_param(qos_burst_enable, int, 0444);
11996 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11998 module_param(qos_no_ack_mask, int, 0444);
11999 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12001 module_param(burst_duration_CCK, int, 0444);
12002 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12004 module_param(burst_duration_OFDM, int, 0444);
12005 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12006 #endif /* CONFIG_IPW2200_QOS */
12008 #ifdef CONFIG_IPW2200_MONITOR
12009 module_param(mode, int, 0444);
12010 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12011 #else
12012 module_param(mode, int, 0444);
12013 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12014 #endif
12016 module_param(bt_coexist, int, 0444);
12017 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12019 module_param(hwcrypto, int, 0444);
12020 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12022 module_param(cmdlog, int, 0444);
12023 MODULE_PARM_DESC(cmdlog,
12024 "allocate a ring buffer for logging firmware commands");
12026 module_param(roaming, int, 0444);
12027 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12029 module_param(antenna, int, 0444);
12030 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12032 module_exit(ipw_exit);
12033 module_init(ipw_init);