[PARISC] Use CONFIG_HZ to determine interval timer rate (aka clock ticks)
[linux-2.6.22.y-op.git] / drivers / net / wireless / ipw2200.c
blob5685d7ba55bb83c7758f3d8362dbc84a3797c89d
1 /******************************************************************************
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 ******************************************************************************/
33 #include "ipw2200.h"
34 #include <linux/version.h>
37 #ifndef KBUILD_EXTMOD
38 #define VK "k"
39 #else
40 #define VK
41 #endif
43 #ifdef CONFIG_IPW2200_DEBUG
44 #define VD "d"
45 #else
46 #define VD
47 #endif
49 #ifdef CONFIG_IPW2200_MONITOR
50 #define VM "m"
51 #else
52 #define VM
53 #endif
55 #ifdef CONFIG_IPW2200_PROMISCUOUS
56 #define VP "p"
57 #else
58 #define VP
59 #endif
61 #ifdef CONFIG_IPW2200_RADIOTAP
62 #define VR "r"
63 #else
64 #define VR
65 #endif
67 #ifdef CONFIG_IPW2200_QOS
68 #define VQ "q"
69 #else
70 #define VQ
71 #endif
73 #define IPW2200_VERSION "1.1.4" VK VD VM VP VR VQ
74 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
76 #define DRV_VERSION IPW2200_VERSION
78 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_VERSION(DRV_VERSION);
82 MODULE_AUTHOR(DRV_COPYRIGHT);
83 MODULE_LICENSE("GPL");
85 static int cmdlog = 0;
86 static int debug = 0;
87 static int channel = 0;
88 static int mode = 0;
90 static u32 ipw_debug_level;
91 static int associate = 1;
92 static int auto_create = 1;
93 static int led = 0;
94 static int disable = 0;
95 static int bt_coexist = 0;
96 static int hwcrypto = 0;
97 static int roaming = 1;
98 static const char ipw_modes[] = {
99 'a', 'b', 'g', '?'
101 static int antenna = CFG_SYS_ANTENNA_BOTH;
103 #ifdef CONFIG_IPW2200_PROMISCUOUS
104 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
105 #endif
108 #ifdef CONFIG_IPW2200_QOS
109 static int qos_enable = 0;
110 static int qos_burst_enable = 0;
111 static int qos_no_ack_mask = 0;
112 static int burst_duration_CCK = 0;
113 static int burst_duration_OFDM = 0;
115 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
116 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
117 QOS_TX3_CW_MIN_OFDM},
118 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
119 QOS_TX3_CW_MAX_OFDM},
120 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
121 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
122 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
123 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
126 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
127 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
128 QOS_TX3_CW_MIN_CCK},
129 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
130 QOS_TX3_CW_MAX_CCK},
131 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
132 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
133 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
134 QOS_TX3_TXOP_LIMIT_CCK}
137 static struct ieee80211_qos_parameters def_parameters_OFDM = {
138 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
139 DEF_TX3_CW_MIN_OFDM},
140 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
141 DEF_TX3_CW_MAX_OFDM},
142 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
143 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
144 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
145 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
148 static struct ieee80211_qos_parameters def_parameters_CCK = {
149 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
150 DEF_TX3_CW_MIN_CCK},
151 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
152 DEF_TX3_CW_MAX_CCK},
153 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
154 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
155 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
156 DEF_TX3_TXOP_LIMIT_CCK}
159 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
161 static int from_priority_to_tx_queue[] = {
162 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
163 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
166 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
168 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
169 *qos_param);
170 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
171 *qos_param);
172 #endif /* CONFIG_IPW2200_QOS */
174 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
175 static void ipw_remove_current_network(struct ipw_priv *priv);
176 static void ipw_rx(struct ipw_priv *priv);
177 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
178 struct clx2_tx_queue *txq, int qindex);
179 static int ipw_queue_reset(struct ipw_priv *priv);
181 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
182 int len, int sync);
184 static void ipw_tx_queue_free(struct ipw_priv *);
186 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
187 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
188 static void ipw_rx_queue_replenish(void *);
189 static int ipw_up(struct ipw_priv *);
190 static void ipw_bg_up(void *);
191 static void ipw_down(struct ipw_priv *);
192 static void ipw_bg_down(void *);
193 static int ipw_config(struct ipw_priv *);
194 static int init_supported_rates(struct ipw_priv *priv,
195 struct ipw_supported_rates *prates);
196 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
197 static void ipw_send_wep_keys(struct ipw_priv *, int);
199 static int snprint_line(char *buf, size_t count,
200 const u8 * data, u32 len, u32 ofs)
202 int out, i, j, l;
203 char c;
205 out = snprintf(buf, count, "%08X", ofs);
207 for (l = 0, i = 0; i < 2; i++) {
208 out += snprintf(buf + out, count - out, " ");
209 for (j = 0; j < 8 && l < len; j++, l++)
210 out += snprintf(buf + out, count - out, "%02X ",
211 data[(i * 8 + j)]);
212 for (; j < 8; j++)
213 out += snprintf(buf + out, count - out, " ");
216 out += snprintf(buf + out, count - out, " ");
217 for (l = 0, i = 0; i < 2; i++) {
218 out += snprintf(buf + out, count - out, " ");
219 for (j = 0; j < 8 && l < len; j++, l++) {
220 c = data[(i * 8 + j)];
221 if (!isascii(c) || !isprint(c))
222 c = '.';
224 out += snprintf(buf + out, count - out, "%c", c);
227 for (; j < 8; j++)
228 out += snprintf(buf + out, count - out, " ");
231 return out;
234 static void printk_buf(int level, const u8 * data, u32 len)
236 char line[81];
237 u32 ofs = 0;
238 if (!(ipw_debug_level & level))
239 return;
241 while (len) {
242 snprint_line(line, sizeof(line), &data[ofs],
243 min(len, 16U), ofs);
244 printk(KERN_DEBUG "%s\n", line);
245 ofs += 16;
246 len -= min(len, 16U);
250 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
252 size_t out = size;
253 u32 ofs = 0;
254 int total = 0;
256 while (size && len) {
257 out = snprint_line(output, size, &data[ofs],
258 min_t(size_t, len, 16U), ofs);
260 ofs += 16;
261 output += out;
262 size -= out;
263 len -= min_t(size_t, len, 16U);
264 total += out;
266 return total;
269 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
270 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
271 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
273 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
274 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
275 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
277 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
278 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
279 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
281 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
282 __LINE__, (u32) (b), (u32) (c));
283 _ipw_write_reg8(a, b, c);
286 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
287 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
288 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
290 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
291 __LINE__, (u32) (b), (u32) (c));
292 _ipw_write_reg16(a, b, c);
295 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
296 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
297 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
299 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
300 __LINE__, (u32) (b), (u32) (c));
301 _ipw_write_reg32(a, b, c);
304 /* 8-bit direct write (low 4K) */
305 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
307 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
308 #define ipw_write8(ipw, ofs, val) \
309 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
310 _ipw_write8(ipw, ofs, val)
312 /* 16-bit direct write (low 4K) */
313 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
315 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
316 #define ipw_write16(ipw, ofs, val) \
317 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
318 _ipw_write16(ipw, ofs, val)
320 /* 32-bit direct write (low 4K) */
321 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
323 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
324 #define ipw_write32(ipw, ofs, val) \
325 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
326 _ipw_write32(ipw, ofs, val)
328 /* 8-bit direct read (low 4K) */
329 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
331 /* 8-bit direct read (low 4K), with debug wrapper */
332 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
334 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
335 return _ipw_read8(ipw, ofs);
338 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
339 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
341 /* 16-bit direct read (low 4K) */
342 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
344 /* 16-bit direct read (low 4K), with debug wrapper */
345 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
347 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
348 return _ipw_read16(ipw, ofs);
351 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
352 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
354 /* 32-bit direct read (low 4K) */
355 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
357 /* 32-bit direct read (low 4K), with debug wrapper */
358 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
360 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
361 return _ipw_read32(ipw, ofs);
364 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
365 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
367 /* multi-byte read (above 4K), with debug wrapper */
368 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
369 static inline void __ipw_read_indirect(const char *f, int l,
370 struct ipw_priv *a, u32 b, u8 * c, int d)
372 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
374 _ipw_read_indirect(a, b, c, d);
377 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
378 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
380 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
381 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
382 int num);
383 #define ipw_write_indirect(a, b, c, d) \
384 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
385 _ipw_write_indirect(a, b, c, d)
387 /* 32-bit indirect write (above 4K) */
388 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
390 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
391 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
392 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
395 /* 8-bit indirect write (above 4K) */
396 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
398 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
399 u32 dif_len = reg - aligned_addr;
401 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
402 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
403 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
406 /* 16-bit indirect write (above 4K) */
407 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
409 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
410 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
412 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
413 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
414 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
417 /* 8-bit indirect read (above 4K) */
418 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
420 u32 word;
421 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
422 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
423 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
424 return (word >> ((reg & 0x3) * 8)) & 0xff;
427 /* 32-bit indirect read (above 4K) */
428 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
430 u32 value;
432 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
434 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
435 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
436 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
437 return value;
440 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
441 /* for area above 1st 4K of SRAM/reg space */
442 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
443 int num)
445 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
446 u32 dif_len = addr - aligned_addr;
447 u32 i;
449 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
451 if (num <= 0) {
452 return;
455 /* Read the first dword (or portion) byte by byte */
456 if (unlikely(dif_len)) {
457 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
458 /* Start reading at aligned_addr + dif_len */
459 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
460 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
461 aligned_addr += 4;
464 /* Read all of the middle dwords as dwords, with auto-increment */
465 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
466 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
467 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
469 /* Read the last dword (or portion) byte by byte */
470 if (unlikely(num)) {
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472 for (i = 0; num > 0; i++, num--)
473 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
477 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
478 /* for area above 1st 4K of SRAM/reg space */
479 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
480 int num)
482 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
483 u32 dif_len = addr - aligned_addr;
484 u32 i;
486 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
488 if (num <= 0) {
489 return;
492 /* Write the first dword (or portion) byte by byte */
493 if (unlikely(dif_len)) {
494 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
495 /* Start writing at aligned_addr + dif_len */
496 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
497 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
498 aligned_addr += 4;
501 /* Write all of the middle dwords as dwords, with auto-increment */
502 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
503 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
504 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
506 /* Write the last dword (or portion) byte by byte */
507 if (unlikely(num)) {
508 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
509 for (i = 0; num > 0; i++, num--, buf++)
510 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
514 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
515 /* for 1st 4K of SRAM/regs space */
516 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
517 int num)
519 memcpy_toio((priv->hw_base + addr), buf, num);
522 /* Set bit(s) in low 4K of SRAM/regs */
523 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
525 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
528 /* Clear bit(s) in low 4K of SRAM/regs */
529 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
531 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
534 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
536 if (priv->status & STATUS_INT_ENABLED)
537 return;
538 priv->status |= STATUS_INT_ENABLED;
539 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
542 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
544 if (!(priv->status & STATUS_INT_ENABLED))
545 return;
546 priv->status &= ~STATUS_INT_ENABLED;
547 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
550 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
552 unsigned long flags;
554 spin_lock_irqsave(&priv->irq_lock, flags);
555 __ipw_enable_interrupts(priv);
556 spin_unlock_irqrestore(&priv->irq_lock, flags);
559 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
561 unsigned long flags;
563 spin_lock_irqsave(&priv->irq_lock, flags);
564 __ipw_disable_interrupts(priv);
565 spin_unlock_irqrestore(&priv->irq_lock, flags);
568 static char *ipw_error_desc(u32 val)
570 switch (val) {
571 case IPW_FW_ERROR_OK:
572 return "ERROR_OK";
573 case IPW_FW_ERROR_FAIL:
574 return "ERROR_FAIL";
575 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
576 return "MEMORY_UNDERFLOW";
577 case IPW_FW_ERROR_MEMORY_OVERFLOW:
578 return "MEMORY_OVERFLOW";
579 case IPW_FW_ERROR_BAD_PARAM:
580 return "BAD_PARAM";
581 case IPW_FW_ERROR_BAD_CHECKSUM:
582 return "BAD_CHECKSUM";
583 case IPW_FW_ERROR_NMI_INTERRUPT:
584 return "NMI_INTERRUPT";
585 case IPW_FW_ERROR_BAD_DATABASE:
586 return "BAD_DATABASE";
587 case IPW_FW_ERROR_ALLOC_FAIL:
588 return "ALLOC_FAIL";
589 case IPW_FW_ERROR_DMA_UNDERRUN:
590 return "DMA_UNDERRUN";
591 case IPW_FW_ERROR_DMA_STATUS:
592 return "DMA_STATUS";
593 case IPW_FW_ERROR_DINO_ERROR:
594 return "DINO_ERROR";
595 case IPW_FW_ERROR_EEPROM_ERROR:
596 return "EEPROM_ERROR";
597 case IPW_FW_ERROR_SYSASSERT:
598 return "SYSASSERT";
599 case IPW_FW_ERROR_FATAL_ERROR:
600 return "FATAL_ERROR";
601 default:
602 return "UNKNOWN_ERROR";
606 static void ipw_dump_error_log(struct ipw_priv *priv,
607 struct ipw_fw_error *error)
609 u32 i;
611 if (!error) {
612 IPW_ERROR("Error allocating and capturing error log. "
613 "Nothing to dump.\n");
614 return;
617 IPW_ERROR("Start IPW Error Log Dump:\n");
618 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
619 error->status, error->config);
621 for (i = 0; i < error->elem_len; i++)
622 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
623 ipw_error_desc(error->elem[i].desc),
624 error->elem[i].time,
625 error->elem[i].blink1,
626 error->elem[i].blink2,
627 error->elem[i].link1,
628 error->elem[i].link2, error->elem[i].data);
629 for (i = 0; i < error->log_len; i++)
630 IPW_ERROR("%i\t0x%08x\t%i\n",
631 error->log[i].time,
632 error->log[i].data, error->log[i].event);
635 static inline int ipw_is_init(struct ipw_priv *priv)
637 return (priv->status & STATUS_INIT) ? 1 : 0;
640 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
642 u32 addr, field_info, field_len, field_count, total_len;
644 IPW_DEBUG_ORD("ordinal = %i\n", ord);
646 if (!priv || !val || !len) {
647 IPW_DEBUG_ORD("Invalid argument\n");
648 return -EINVAL;
651 /* verify device ordinal tables have been initialized */
652 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
653 IPW_DEBUG_ORD("Access ordinals before initialization\n");
654 return -EINVAL;
657 switch (IPW_ORD_TABLE_ID_MASK & ord) {
658 case IPW_ORD_TABLE_0_MASK:
660 * TABLE 0: Direct access to a table of 32 bit values
662 * This is a very simple table with the data directly
663 * read from the table
666 /* remove the table id from the ordinal */
667 ord &= IPW_ORD_TABLE_VALUE_MASK;
669 /* boundary check */
670 if (ord > priv->table0_len) {
671 IPW_DEBUG_ORD("ordinal value (%i) longer then "
672 "max (%i)\n", ord, priv->table0_len);
673 return -EINVAL;
676 /* verify we have enough room to store the value */
677 if (*len < sizeof(u32)) {
678 IPW_DEBUG_ORD("ordinal buffer length too small, "
679 "need %zd\n", sizeof(u32));
680 return -EINVAL;
683 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
684 ord, priv->table0_addr + (ord << 2));
686 *len = sizeof(u32);
687 ord <<= 2;
688 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
689 break;
691 case IPW_ORD_TABLE_1_MASK:
693 * TABLE 1: Indirect access to a table of 32 bit values
695 * This is a fairly large table of u32 values each
696 * representing starting addr for the data (which is
697 * also a u32)
700 /* remove the table id from the ordinal */
701 ord &= IPW_ORD_TABLE_VALUE_MASK;
703 /* boundary check */
704 if (ord > priv->table1_len) {
705 IPW_DEBUG_ORD("ordinal value too long\n");
706 return -EINVAL;
709 /* verify we have enough room to store the value */
710 if (*len < sizeof(u32)) {
711 IPW_DEBUG_ORD("ordinal buffer length too small, "
712 "need %zd\n", sizeof(u32));
713 return -EINVAL;
716 *((u32 *) val) =
717 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
718 *len = sizeof(u32);
719 break;
721 case IPW_ORD_TABLE_2_MASK:
723 * TABLE 2: Indirect access to a table of variable sized values
725 * This table consist of six values, each containing
726 * - dword containing the starting offset of the data
727 * - dword containing the lengh in the first 16bits
728 * and the count in the second 16bits
731 /* remove the table id from the ordinal */
732 ord &= IPW_ORD_TABLE_VALUE_MASK;
734 /* boundary check */
735 if (ord > priv->table2_len) {
736 IPW_DEBUG_ORD("ordinal value too long\n");
737 return -EINVAL;
740 /* get the address of statistic */
741 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
743 /* get the second DW of statistics ;
744 * two 16-bit words - first is length, second is count */
745 field_info =
746 ipw_read_reg32(priv,
747 priv->table2_addr + (ord << 3) +
748 sizeof(u32));
750 /* get each entry length */
751 field_len = *((u16 *) & field_info);
753 /* get number of entries */
754 field_count = *(((u16 *) & field_info) + 1);
756 /* abort if not enought memory */
757 total_len = field_len * field_count;
758 if (total_len > *len) {
759 *len = total_len;
760 return -EINVAL;
763 *len = total_len;
764 if (!total_len)
765 return 0;
767 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
768 "field_info = 0x%08x\n",
769 addr, total_len, field_info);
770 ipw_read_indirect(priv, addr, val, total_len);
771 break;
773 default:
774 IPW_DEBUG_ORD("Invalid ordinal!\n");
775 return -EINVAL;
779 return 0;
782 static void ipw_init_ordinals(struct ipw_priv *priv)
784 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
785 priv->table0_len = ipw_read32(priv, priv->table0_addr);
787 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
788 priv->table0_addr, priv->table0_len);
790 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
791 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
793 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
794 priv->table1_addr, priv->table1_len);
796 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
797 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
798 priv->table2_len &= 0x0000ffff; /* use first two bytes */
800 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
801 priv->table2_addr, priv->table2_len);
805 static u32 ipw_register_toggle(u32 reg)
807 reg &= ~IPW_START_STANDBY;
808 if (reg & IPW_GATE_ODMA)
809 reg &= ~IPW_GATE_ODMA;
810 if (reg & IPW_GATE_IDMA)
811 reg &= ~IPW_GATE_IDMA;
812 if (reg & IPW_GATE_ADMA)
813 reg &= ~IPW_GATE_ADMA;
814 return reg;
818 * LED behavior:
819 * - On radio ON, turn on any LEDs that require to be on during start
820 * - On initialization, start unassociated blink
821 * - On association, disable unassociated blink
822 * - On disassociation, start unassociated blink
823 * - On radio OFF, turn off any LEDs started during radio on
826 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
827 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
828 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
830 static void ipw_led_link_on(struct ipw_priv *priv)
832 unsigned long flags;
833 u32 led;
835 /* If configured to not use LEDs, or nic_type is 1,
836 * then we don't toggle a LINK led */
837 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
838 return;
840 spin_lock_irqsave(&priv->lock, flags);
842 if (!(priv->status & STATUS_RF_KILL_MASK) &&
843 !(priv->status & STATUS_LED_LINK_ON)) {
844 IPW_DEBUG_LED("Link LED On\n");
845 led = ipw_read_reg32(priv, IPW_EVENT_REG);
846 led |= priv->led_association_on;
848 led = ipw_register_toggle(led);
850 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
851 ipw_write_reg32(priv, IPW_EVENT_REG, led);
853 priv->status |= STATUS_LED_LINK_ON;
855 /* If we aren't associated, schedule turning the LED off */
856 if (!(priv->status & STATUS_ASSOCIATED))
857 queue_delayed_work(priv->workqueue,
858 &priv->led_link_off,
859 LD_TIME_LINK_ON);
862 spin_unlock_irqrestore(&priv->lock, flags);
865 static void ipw_bg_led_link_on(void *data)
867 struct ipw_priv *priv = data;
868 mutex_lock(&priv->mutex);
869 ipw_led_link_on(data);
870 mutex_unlock(&priv->mutex);
873 static void ipw_led_link_off(struct ipw_priv *priv)
875 unsigned long flags;
876 u32 led;
878 /* If configured not to use LEDs, or nic type is 1,
879 * then we don't goggle the LINK led. */
880 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
881 return;
883 spin_lock_irqsave(&priv->lock, flags);
885 if (priv->status & STATUS_LED_LINK_ON) {
886 led = ipw_read_reg32(priv, IPW_EVENT_REG);
887 led &= priv->led_association_off;
888 led = ipw_register_toggle(led);
890 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
891 ipw_write_reg32(priv, IPW_EVENT_REG, led);
893 IPW_DEBUG_LED("Link LED Off\n");
895 priv->status &= ~STATUS_LED_LINK_ON;
897 /* If we aren't associated and the radio is on, schedule
898 * turning the LED on (blink while unassociated) */
899 if (!(priv->status & STATUS_RF_KILL_MASK) &&
900 !(priv->status & STATUS_ASSOCIATED))
901 queue_delayed_work(priv->workqueue, &priv->led_link_on,
902 LD_TIME_LINK_OFF);
906 spin_unlock_irqrestore(&priv->lock, flags);
909 static void ipw_bg_led_link_off(void *data)
911 struct ipw_priv *priv = data;
912 mutex_lock(&priv->mutex);
913 ipw_led_link_off(data);
914 mutex_unlock(&priv->mutex);
917 static void __ipw_led_activity_on(struct ipw_priv *priv)
919 u32 led;
921 if (priv->config & CFG_NO_LED)
922 return;
924 if (priv->status & STATUS_RF_KILL_MASK)
925 return;
927 if (!(priv->status & STATUS_LED_ACT_ON)) {
928 led = ipw_read_reg32(priv, IPW_EVENT_REG);
929 led |= priv->led_activity_on;
931 led = ipw_register_toggle(led);
933 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
934 ipw_write_reg32(priv, IPW_EVENT_REG, led);
936 IPW_DEBUG_LED("Activity LED On\n");
938 priv->status |= STATUS_LED_ACT_ON;
940 cancel_delayed_work(&priv->led_act_off);
941 queue_delayed_work(priv->workqueue, &priv->led_act_off,
942 LD_TIME_ACT_ON);
943 } else {
944 /* Reschedule LED off for full time period */
945 cancel_delayed_work(&priv->led_act_off);
946 queue_delayed_work(priv->workqueue, &priv->led_act_off,
947 LD_TIME_ACT_ON);
951 #if 0
952 void ipw_led_activity_on(struct ipw_priv *priv)
954 unsigned long flags;
955 spin_lock_irqsave(&priv->lock, flags);
956 __ipw_led_activity_on(priv);
957 spin_unlock_irqrestore(&priv->lock, flags);
959 #endif /* 0 */
961 static void ipw_led_activity_off(struct ipw_priv *priv)
963 unsigned long flags;
964 u32 led;
966 if (priv->config & CFG_NO_LED)
967 return;
969 spin_lock_irqsave(&priv->lock, flags);
971 if (priv->status & STATUS_LED_ACT_ON) {
972 led = ipw_read_reg32(priv, IPW_EVENT_REG);
973 led &= priv->led_activity_off;
975 led = ipw_register_toggle(led);
977 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
978 ipw_write_reg32(priv, IPW_EVENT_REG, led);
980 IPW_DEBUG_LED("Activity LED Off\n");
982 priv->status &= ~STATUS_LED_ACT_ON;
985 spin_unlock_irqrestore(&priv->lock, flags);
988 static void ipw_bg_led_activity_off(void *data)
990 struct ipw_priv *priv = data;
991 mutex_lock(&priv->mutex);
992 ipw_led_activity_off(data);
993 mutex_unlock(&priv->mutex);
996 static void ipw_led_band_on(struct ipw_priv *priv)
998 unsigned long flags;
999 u32 led;
1001 /* Only nic type 1 supports mode LEDs */
1002 if (priv->config & CFG_NO_LED ||
1003 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1004 return;
1006 spin_lock_irqsave(&priv->lock, flags);
1008 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1009 if (priv->assoc_network->mode == IEEE_A) {
1010 led |= priv->led_ofdm_on;
1011 led &= priv->led_association_off;
1012 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1013 } else if (priv->assoc_network->mode == IEEE_G) {
1014 led |= priv->led_ofdm_on;
1015 led |= priv->led_association_on;
1016 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1017 } else {
1018 led &= priv->led_ofdm_off;
1019 led |= priv->led_association_on;
1020 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1023 led = ipw_register_toggle(led);
1025 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1026 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1028 spin_unlock_irqrestore(&priv->lock, flags);
1031 static void ipw_led_band_off(struct ipw_priv *priv)
1033 unsigned long flags;
1034 u32 led;
1036 /* Only nic type 1 supports mode LEDs */
1037 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1038 return;
1040 spin_lock_irqsave(&priv->lock, flags);
1042 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1043 led &= priv->led_ofdm_off;
1044 led &= priv->led_association_off;
1046 led = ipw_register_toggle(led);
1048 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1049 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1051 spin_unlock_irqrestore(&priv->lock, flags);
1054 static void ipw_led_radio_on(struct ipw_priv *priv)
1056 ipw_led_link_on(priv);
1059 static void ipw_led_radio_off(struct ipw_priv *priv)
1061 ipw_led_activity_off(priv);
1062 ipw_led_link_off(priv);
1065 static void ipw_led_link_up(struct ipw_priv *priv)
1067 /* Set the Link Led on for all nic types */
1068 ipw_led_link_on(priv);
1071 static void ipw_led_link_down(struct ipw_priv *priv)
1073 ipw_led_activity_off(priv);
1074 ipw_led_link_off(priv);
1076 if (priv->status & STATUS_RF_KILL_MASK)
1077 ipw_led_radio_off(priv);
1080 static void ipw_led_init(struct ipw_priv *priv)
1082 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1084 /* Set the default PINs for the link and activity leds */
1085 priv->led_activity_on = IPW_ACTIVITY_LED;
1086 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1088 priv->led_association_on = IPW_ASSOCIATED_LED;
1089 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1091 /* Set the default PINs for the OFDM leds */
1092 priv->led_ofdm_on = IPW_OFDM_LED;
1093 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1095 switch (priv->nic_type) {
1096 case EEPROM_NIC_TYPE_1:
1097 /* In this NIC type, the LEDs are reversed.... */
1098 priv->led_activity_on = IPW_ASSOCIATED_LED;
1099 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1100 priv->led_association_on = IPW_ACTIVITY_LED;
1101 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1103 if (!(priv->config & CFG_NO_LED))
1104 ipw_led_band_on(priv);
1106 /* And we don't blink link LEDs for this nic, so
1107 * just return here */
1108 return;
1110 case EEPROM_NIC_TYPE_3:
1111 case EEPROM_NIC_TYPE_2:
1112 case EEPROM_NIC_TYPE_4:
1113 case EEPROM_NIC_TYPE_0:
1114 break;
1116 default:
1117 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1118 priv->nic_type);
1119 priv->nic_type = EEPROM_NIC_TYPE_0;
1120 break;
1123 if (!(priv->config & CFG_NO_LED)) {
1124 if (priv->status & STATUS_ASSOCIATED)
1125 ipw_led_link_on(priv);
1126 else
1127 ipw_led_link_off(priv);
1131 static void ipw_led_shutdown(struct ipw_priv *priv)
1133 ipw_led_activity_off(priv);
1134 ipw_led_link_off(priv);
1135 ipw_led_band_off(priv);
1136 cancel_delayed_work(&priv->led_link_on);
1137 cancel_delayed_work(&priv->led_link_off);
1138 cancel_delayed_work(&priv->led_act_off);
1142 * The following adds a new attribute to the sysfs representation
1143 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1144 * used for controling the debug level.
1146 * See the level definitions in ipw for details.
1148 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1150 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1153 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1154 size_t count)
1156 char *p = (char *)buf;
1157 u32 val;
1159 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1160 p++;
1161 if (p[0] == 'x' || p[0] == 'X')
1162 p++;
1163 val = simple_strtoul(p, &p, 16);
1164 } else
1165 val = simple_strtoul(p, &p, 10);
1166 if (p == buf)
1167 printk(KERN_INFO DRV_NAME
1168 ": %s is not in hex or decimal form.\n", buf);
1169 else
1170 ipw_debug_level = val;
1172 return strnlen(buf, count);
1175 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1176 show_debug_level, store_debug_level);
1178 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1180 /* length = 1st dword in log */
1181 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1184 static void ipw_capture_event_log(struct ipw_priv *priv,
1185 u32 log_len, struct ipw_event *log)
1187 u32 base;
1189 if (log_len) {
1190 base = ipw_read32(priv, IPW_EVENT_LOG);
1191 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1192 (u8 *) log, sizeof(*log) * log_len);
1196 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1198 struct ipw_fw_error *error;
1199 u32 log_len = ipw_get_event_log_len(priv);
1200 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1201 u32 elem_len = ipw_read_reg32(priv, base);
1203 error = kmalloc(sizeof(*error) +
1204 sizeof(*error->elem) * elem_len +
1205 sizeof(*error->log) * log_len, GFP_ATOMIC);
1206 if (!error) {
1207 IPW_ERROR("Memory allocation for firmware error log "
1208 "failed.\n");
1209 return NULL;
1211 error->jiffies = jiffies;
1212 error->status = priv->status;
1213 error->config = priv->config;
1214 error->elem_len = elem_len;
1215 error->log_len = log_len;
1216 error->elem = (struct ipw_error_elem *)error->payload;
1217 error->log = (struct ipw_event *)(error->elem + elem_len);
1219 ipw_capture_event_log(priv, log_len, error->log);
1221 if (elem_len)
1222 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1223 sizeof(*error->elem) * elem_len);
1225 return error;
1228 static ssize_t show_event_log(struct device *d,
1229 struct device_attribute *attr, char *buf)
1231 struct ipw_priv *priv = dev_get_drvdata(d);
1232 u32 log_len = ipw_get_event_log_len(priv);
1233 struct ipw_event log[log_len];
1234 u32 len = 0, i;
1236 ipw_capture_event_log(priv, log_len, log);
1238 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1239 for (i = 0; i < log_len; i++)
1240 len += snprintf(buf + len, PAGE_SIZE - len,
1241 "\n%08X%08X%08X",
1242 log[i].time, log[i].event, log[i].data);
1243 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1244 return len;
1247 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1249 static ssize_t show_error(struct device *d,
1250 struct device_attribute *attr, char *buf)
1252 struct ipw_priv *priv = dev_get_drvdata(d);
1253 u32 len = 0, i;
1254 if (!priv->error)
1255 return 0;
1256 len += snprintf(buf + len, PAGE_SIZE - len,
1257 "%08lX%08X%08X%08X",
1258 priv->error->jiffies,
1259 priv->error->status,
1260 priv->error->config, priv->error->elem_len);
1261 for (i = 0; i < priv->error->elem_len; i++)
1262 len += snprintf(buf + len, PAGE_SIZE - len,
1263 "\n%08X%08X%08X%08X%08X%08X%08X",
1264 priv->error->elem[i].time,
1265 priv->error->elem[i].desc,
1266 priv->error->elem[i].blink1,
1267 priv->error->elem[i].blink2,
1268 priv->error->elem[i].link1,
1269 priv->error->elem[i].link2,
1270 priv->error->elem[i].data);
1272 len += snprintf(buf + len, PAGE_SIZE - len,
1273 "\n%08X", priv->error->log_len);
1274 for (i = 0; i < priv->error->log_len; i++)
1275 len += snprintf(buf + len, PAGE_SIZE - len,
1276 "\n%08X%08X%08X",
1277 priv->error->log[i].time,
1278 priv->error->log[i].event,
1279 priv->error->log[i].data);
1280 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1281 return len;
1284 static ssize_t clear_error(struct device *d,
1285 struct device_attribute *attr,
1286 const char *buf, size_t count)
1288 struct ipw_priv *priv = dev_get_drvdata(d);
1290 kfree(priv->error);
1291 priv->error = NULL;
1292 return count;
1295 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1297 static ssize_t show_cmd_log(struct device *d,
1298 struct device_attribute *attr, char *buf)
1300 struct ipw_priv *priv = dev_get_drvdata(d);
1301 u32 len = 0, i;
1302 if (!priv->cmdlog)
1303 return 0;
1304 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1305 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1306 i = (i + 1) % priv->cmdlog_len) {
1307 len +=
1308 snprintf(buf + len, PAGE_SIZE - len,
1309 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1310 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1311 priv->cmdlog[i].cmd.len);
1312 len +=
1313 snprintk_buf(buf + len, PAGE_SIZE - len,
1314 (u8 *) priv->cmdlog[i].cmd.param,
1315 priv->cmdlog[i].cmd.len);
1316 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1318 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1319 return len;
1322 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1324 #ifdef CONFIG_IPW2200_PROMISCUOUS
1325 static void ipw_prom_free(struct ipw_priv *priv);
1326 static int ipw_prom_alloc(struct ipw_priv *priv);
1327 static ssize_t store_rtap_iface(struct device *d,
1328 struct device_attribute *attr,
1329 const char *buf, size_t count)
1331 struct ipw_priv *priv = dev_get_drvdata(d);
1332 int rc = 0;
1334 if (count < 1)
1335 return -EINVAL;
1337 switch (buf[0]) {
1338 case '0':
1339 if (!rtap_iface)
1340 return count;
1342 if (netif_running(priv->prom_net_dev)) {
1343 IPW_WARNING("Interface is up. Cannot unregister.\n");
1344 return count;
1347 ipw_prom_free(priv);
1348 rtap_iface = 0;
1349 break;
1351 case '1':
1352 if (rtap_iface)
1353 return count;
1355 rc = ipw_prom_alloc(priv);
1356 if (!rc)
1357 rtap_iface = 1;
1358 break;
1360 default:
1361 return -EINVAL;
1364 if (rc) {
1365 IPW_ERROR("Failed to register promiscuous network "
1366 "device (error %d).\n", rc);
1369 return count;
1372 static ssize_t show_rtap_iface(struct device *d,
1373 struct device_attribute *attr,
1374 char *buf)
1376 struct ipw_priv *priv = dev_get_drvdata(d);
1377 if (rtap_iface)
1378 return sprintf(buf, "%s", priv->prom_net_dev->name);
1379 else {
1380 buf[0] = '-';
1381 buf[1] = '1';
1382 buf[2] = '\0';
1383 return 3;
1387 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1388 store_rtap_iface);
1390 static ssize_t store_rtap_filter(struct device *d,
1391 struct device_attribute *attr,
1392 const char *buf, size_t count)
1394 struct ipw_priv *priv = dev_get_drvdata(d);
1396 if (!priv->prom_priv) {
1397 IPW_ERROR("Attempting to set filter without "
1398 "rtap_iface enabled.\n");
1399 return -EPERM;
1402 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1404 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1405 BIT_ARG16(priv->prom_priv->filter));
1407 return count;
1410 static ssize_t show_rtap_filter(struct device *d,
1411 struct device_attribute *attr,
1412 char *buf)
1414 struct ipw_priv *priv = dev_get_drvdata(d);
1415 return sprintf(buf, "0x%04X",
1416 priv->prom_priv ? priv->prom_priv->filter : 0);
1419 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1420 store_rtap_filter);
1421 #endif
1423 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1424 char *buf)
1426 struct ipw_priv *priv = dev_get_drvdata(d);
1427 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1430 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1431 const char *buf, size_t count)
1433 struct ipw_priv *priv = dev_get_drvdata(d);
1434 struct net_device *dev = priv->net_dev;
1435 char buffer[] = "00000000";
1436 unsigned long len =
1437 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1438 unsigned long val;
1439 char *p = buffer;
1441 IPW_DEBUG_INFO("enter\n");
1443 strncpy(buffer, buf, len);
1444 buffer[len] = 0;
1446 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1447 p++;
1448 if (p[0] == 'x' || p[0] == 'X')
1449 p++;
1450 val = simple_strtoul(p, &p, 16);
1451 } else
1452 val = simple_strtoul(p, &p, 10);
1453 if (p == buffer) {
1454 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1455 } else {
1456 priv->ieee->scan_age = val;
1457 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1460 IPW_DEBUG_INFO("exit\n");
1461 return len;
1464 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1466 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1467 char *buf)
1469 struct ipw_priv *priv = dev_get_drvdata(d);
1470 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1473 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1474 const char *buf, size_t count)
1476 struct ipw_priv *priv = dev_get_drvdata(d);
1478 IPW_DEBUG_INFO("enter\n");
1480 if (count == 0)
1481 return 0;
1483 if (*buf == 0) {
1484 IPW_DEBUG_LED("Disabling LED control.\n");
1485 priv->config |= CFG_NO_LED;
1486 ipw_led_shutdown(priv);
1487 } else {
1488 IPW_DEBUG_LED("Enabling LED control.\n");
1489 priv->config &= ~CFG_NO_LED;
1490 ipw_led_init(priv);
1493 IPW_DEBUG_INFO("exit\n");
1494 return count;
1497 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1499 static ssize_t show_status(struct device *d,
1500 struct device_attribute *attr, char *buf)
1502 struct ipw_priv *p = d->driver_data;
1503 return sprintf(buf, "0x%08x\n", (int)p->status);
1506 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1508 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1509 char *buf)
1511 struct ipw_priv *p = d->driver_data;
1512 return sprintf(buf, "0x%08x\n", (int)p->config);
1515 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1517 static ssize_t show_nic_type(struct device *d,
1518 struct device_attribute *attr, char *buf)
1520 struct ipw_priv *priv = d->driver_data;
1521 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1524 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1526 static ssize_t show_ucode_version(struct device *d,
1527 struct device_attribute *attr, char *buf)
1529 u32 len = sizeof(u32), tmp = 0;
1530 struct ipw_priv *p = d->driver_data;
1532 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1533 return 0;
1535 return sprintf(buf, "0x%08x\n", tmp);
1538 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1540 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1541 char *buf)
1543 u32 len = sizeof(u32), tmp = 0;
1544 struct ipw_priv *p = d->driver_data;
1546 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1547 return 0;
1549 return sprintf(buf, "0x%08x\n", tmp);
1552 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1555 * Add a device attribute to view/control the delay between eeprom
1556 * operations.
1558 static ssize_t show_eeprom_delay(struct device *d,
1559 struct device_attribute *attr, char *buf)
1561 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1562 return sprintf(buf, "%i\n", n);
1564 static ssize_t store_eeprom_delay(struct device *d,
1565 struct device_attribute *attr,
1566 const char *buf, size_t count)
1568 struct ipw_priv *p = d->driver_data;
1569 sscanf(buf, "%i", &p->eeprom_delay);
1570 return strnlen(buf, count);
1573 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1574 show_eeprom_delay, store_eeprom_delay);
1576 static ssize_t show_command_event_reg(struct device *d,
1577 struct device_attribute *attr, char *buf)
1579 u32 reg = 0;
1580 struct ipw_priv *p = d->driver_data;
1582 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1583 return sprintf(buf, "0x%08x\n", reg);
1585 static ssize_t store_command_event_reg(struct device *d,
1586 struct device_attribute *attr,
1587 const char *buf, size_t count)
1589 u32 reg;
1590 struct ipw_priv *p = d->driver_data;
1592 sscanf(buf, "%x", &reg);
1593 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1594 return strnlen(buf, count);
1597 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1598 show_command_event_reg, store_command_event_reg);
1600 static ssize_t show_mem_gpio_reg(struct device *d,
1601 struct device_attribute *attr, char *buf)
1603 u32 reg = 0;
1604 struct ipw_priv *p = d->driver_data;
1606 reg = ipw_read_reg32(p, 0x301100);
1607 return sprintf(buf, "0x%08x\n", reg);
1609 static ssize_t store_mem_gpio_reg(struct device *d,
1610 struct device_attribute *attr,
1611 const char *buf, size_t count)
1613 u32 reg;
1614 struct ipw_priv *p = d->driver_data;
1616 sscanf(buf, "%x", &reg);
1617 ipw_write_reg32(p, 0x301100, reg);
1618 return strnlen(buf, count);
1621 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1622 show_mem_gpio_reg, store_mem_gpio_reg);
1624 static ssize_t show_indirect_dword(struct device *d,
1625 struct device_attribute *attr, char *buf)
1627 u32 reg = 0;
1628 struct ipw_priv *priv = d->driver_data;
1630 if (priv->status & STATUS_INDIRECT_DWORD)
1631 reg = ipw_read_reg32(priv, priv->indirect_dword);
1632 else
1633 reg = 0;
1635 return sprintf(buf, "0x%08x\n", reg);
1637 static ssize_t store_indirect_dword(struct device *d,
1638 struct device_attribute *attr,
1639 const char *buf, size_t count)
1641 struct ipw_priv *priv = d->driver_data;
1643 sscanf(buf, "%x", &priv->indirect_dword);
1644 priv->status |= STATUS_INDIRECT_DWORD;
1645 return strnlen(buf, count);
1648 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1649 show_indirect_dword, store_indirect_dword);
1651 static ssize_t show_indirect_byte(struct device *d,
1652 struct device_attribute *attr, char *buf)
1654 u8 reg = 0;
1655 struct ipw_priv *priv = d->driver_data;
1657 if (priv->status & STATUS_INDIRECT_BYTE)
1658 reg = ipw_read_reg8(priv, priv->indirect_byte);
1659 else
1660 reg = 0;
1662 return sprintf(buf, "0x%02x\n", reg);
1664 static ssize_t store_indirect_byte(struct device *d,
1665 struct device_attribute *attr,
1666 const char *buf, size_t count)
1668 struct ipw_priv *priv = d->driver_data;
1670 sscanf(buf, "%x", &priv->indirect_byte);
1671 priv->status |= STATUS_INDIRECT_BYTE;
1672 return strnlen(buf, count);
1675 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1676 show_indirect_byte, store_indirect_byte);
1678 static ssize_t show_direct_dword(struct device *d,
1679 struct device_attribute *attr, char *buf)
1681 u32 reg = 0;
1682 struct ipw_priv *priv = d->driver_data;
1684 if (priv->status & STATUS_DIRECT_DWORD)
1685 reg = ipw_read32(priv, priv->direct_dword);
1686 else
1687 reg = 0;
1689 return sprintf(buf, "0x%08x\n", reg);
1691 static ssize_t store_direct_dword(struct device *d,
1692 struct device_attribute *attr,
1693 const char *buf, size_t count)
1695 struct ipw_priv *priv = d->driver_data;
1697 sscanf(buf, "%x", &priv->direct_dword);
1698 priv->status |= STATUS_DIRECT_DWORD;
1699 return strnlen(buf, count);
1702 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1703 show_direct_dword, store_direct_dword);
1705 static int rf_kill_active(struct ipw_priv *priv)
1707 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1708 priv->status |= STATUS_RF_KILL_HW;
1709 else
1710 priv->status &= ~STATUS_RF_KILL_HW;
1712 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1715 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1716 char *buf)
1718 /* 0 - RF kill not enabled
1719 1 - SW based RF kill active (sysfs)
1720 2 - HW based RF kill active
1721 3 - Both HW and SW baed RF kill active */
1722 struct ipw_priv *priv = d->driver_data;
1723 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1724 (rf_kill_active(priv) ? 0x2 : 0x0);
1725 return sprintf(buf, "%i\n", val);
1728 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1730 if ((disable_radio ? 1 : 0) ==
1731 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1732 return 0;
1734 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1735 disable_radio ? "OFF" : "ON");
1737 if (disable_radio) {
1738 priv->status |= STATUS_RF_KILL_SW;
1740 if (priv->workqueue)
1741 cancel_delayed_work(&priv->request_scan);
1742 queue_work(priv->workqueue, &priv->down);
1743 } else {
1744 priv->status &= ~STATUS_RF_KILL_SW;
1745 if (rf_kill_active(priv)) {
1746 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1747 "disabled by HW switch\n");
1748 /* Make sure the RF_KILL check timer is running */
1749 cancel_delayed_work(&priv->rf_kill);
1750 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1751 2 * HZ);
1752 } else
1753 queue_work(priv->workqueue, &priv->up);
1756 return 1;
1759 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1760 const char *buf, size_t count)
1762 struct ipw_priv *priv = d->driver_data;
1764 ipw_radio_kill_sw(priv, buf[0] == '1');
1766 return count;
1769 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1771 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1772 char *buf)
1774 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1775 int pos = 0, len = 0;
1776 if (priv->config & CFG_SPEED_SCAN) {
1777 while (priv->speed_scan[pos] != 0)
1778 len += sprintf(&buf[len], "%d ",
1779 priv->speed_scan[pos++]);
1780 return len + sprintf(&buf[len], "\n");
1783 return sprintf(buf, "0\n");
1786 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1787 const char *buf, size_t count)
1789 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1790 int channel, pos = 0;
1791 const char *p = buf;
1793 /* list of space separated channels to scan, optionally ending with 0 */
1794 while ((channel = simple_strtol(p, NULL, 0))) {
1795 if (pos == MAX_SPEED_SCAN - 1) {
1796 priv->speed_scan[pos] = 0;
1797 break;
1800 if (ieee80211_is_valid_channel(priv->ieee, channel))
1801 priv->speed_scan[pos++] = channel;
1802 else
1803 IPW_WARNING("Skipping invalid channel request: %d\n",
1804 channel);
1805 p = strchr(p, ' ');
1806 if (!p)
1807 break;
1808 while (*p == ' ' || *p == '\t')
1809 p++;
1812 if (pos == 0)
1813 priv->config &= ~CFG_SPEED_SCAN;
1814 else {
1815 priv->speed_scan_pos = 0;
1816 priv->config |= CFG_SPEED_SCAN;
1819 return count;
1822 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1823 store_speed_scan);
1825 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1826 char *buf)
1828 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1829 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1832 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1833 const char *buf, size_t count)
1835 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1836 if (buf[0] == '1')
1837 priv->config |= CFG_NET_STATS;
1838 else
1839 priv->config &= ~CFG_NET_STATS;
1841 return count;
1844 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1845 show_net_stats, store_net_stats);
1847 static void notify_wx_assoc_event(struct ipw_priv *priv)
1849 union iwreq_data wrqu;
1850 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1851 if (priv->status & STATUS_ASSOCIATED)
1852 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1853 else
1854 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1855 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1858 static void ipw_irq_tasklet(struct ipw_priv *priv)
1860 u32 inta, inta_mask, handled = 0;
1861 unsigned long flags;
1862 int rc = 0;
1864 spin_lock_irqsave(&priv->irq_lock, flags);
1866 inta = ipw_read32(priv, IPW_INTA_RW);
1867 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1868 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1870 /* Add any cached INTA values that need to be handled */
1871 inta |= priv->isr_inta;
1873 spin_unlock_irqrestore(&priv->irq_lock, flags);
1875 spin_lock_irqsave(&priv->lock, flags);
1877 /* handle all the justifications for the interrupt */
1878 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1879 ipw_rx(priv);
1880 handled |= IPW_INTA_BIT_RX_TRANSFER;
1883 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1884 IPW_DEBUG_HC("Command completed.\n");
1885 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1886 priv->status &= ~STATUS_HCMD_ACTIVE;
1887 wake_up_interruptible(&priv->wait_command_queue);
1888 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1891 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1892 IPW_DEBUG_TX("TX_QUEUE_1\n");
1893 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1894 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1897 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1898 IPW_DEBUG_TX("TX_QUEUE_2\n");
1899 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1900 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1903 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1904 IPW_DEBUG_TX("TX_QUEUE_3\n");
1905 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1906 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1909 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1910 IPW_DEBUG_TX("TX_QUEUE_4\n");
1911 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1912 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1915 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1916 IPW_WARNING("STATUS_CHANGE\n");
1917 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1920 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1921 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1922 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1925 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1926 IPW_WARNING("HOST_CMD_DONE\n");
1927 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1930 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1931 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1932 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1935 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1936 IPW_WARNING("PHY_OFF_DONE\n");
1937 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1940 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1941 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1942 priv->status |= STATUS_RF_KILL_HW;
1943 wake_up_interruptible(&priv->wait_command_queue);
1944 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1945 cancel_delayed_work(&priv->request_scan);
1946 schedule_work(&priv->link_down);
1947 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1948 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1951 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1952 IPW_WARNING("Firmware error detected. Restarting.\n");
1953 if (priv->error) {
1954 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
1955 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1956 struct ipw_fw_error *error =
1957 ipw_alloc_error_log(priv);
1958 ipw_dump_error_log(priv, error);
1959 kfree(error);
1961 } else {
1962 priv->error = ipw_alloc_error_log(priv);
1963 if (priv->error)
1964 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
1965 else
1966 IPW_DEBUG_FW("Error allocating sysfs 'error' "
1967 "log.\n");
1968 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1969 ipw_dump_error_log(priv, priv->error);
1972 /* XXX: If hardware encryption is for WPA/WPA2,
1973 * we have to notify the supplicant. */
1974 if (priv->ieee->sec.encrypt) {
1975 priv->status &= ~STATUS_ASSOCIATED;
1976 notify_wx_assoc_event(priv);
1979 /* Keep the restart process from trying to send host
1980 * commands by clearing the INIT status bit */
1981 priv->status &= ~STATUS_INIT;
1983 /* Cancel currently queued command. */
1984 priv->status &= ~STATUS_HCMD_ACTIVE;
1985 wake_up_interruptible(&priv->wait_command_queue);
1987 queue_work(priv->workqueue, &priv->adapter_restart);
1988 handled |= IPW_INTA_BIT_FATAL_ERROR;
1991 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
1992 IPW_ERROR("Parity error\n");
1993 handled |= IPW_INTA_BIT_PARITY_ERROR;
1996 if (handled != inta) {
1997 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2000 spin_unlock_irqrestore(&priv->lock, flags);
2002 /* enable all interrupts */
2003 ipw_enable_interrupts(priv);
2006 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2007 static char *get_cmd_string(u8 cmd)
2009 switch (cmd) {
2010 IPW_CMD(HOST_COMPLETE);
2011 IPW_CMD(POWER_DOWN);
2012 IPW_CMD(SYSTEM_CONFIG);
2013 IPW_CMD(MULTICAST_ADDRESS);
2014 IPW_CMD(SSID);
2015 IPW_CMD(ADAPTER_ADDRESS);
2016 IPW_CMD(PORT_TYPE);
2017 IPW_CMD(RTS_THRESHOLD);
2018 IPW_CMD(FRAG_THRESHOLD);
2019 IPW_CMD(POWER_MODE);
2020 IPW_CMD(WEP_KEY);
2021 IPW_CMD(TGI_TX_KEY);
2022 IPW_CMD(SCAN_REQUEST);
2023 IPW_CMD(SCAN_REQUEST_EXT);
2024 IPW_CMD(ASSOCIATE);
2025 IPW_CMD(SUPPORTED_RATES);
2026 IPW_CMD(SCAN_ABORT);
2027 IPW_CMD(TX_FLUSH);
2028 IPW_CMD(QOS_PARAMETERS);
2029 IPW_CMD(DINO_CONFIG);
2030 IPW_CMD(RSN_CAPABILITIES);
2031 IPW_CMD(RX_KEY);
2032 IPW_CMD(CARD_DISABLE);
2033 IPW_CMD(SEED_NUMBER);
2034 IPW_CMD(TX_POWER);
2035 IPW_CMD(COUNTRY_INFO);
2036 IPW_CMD(AIRONET_INFO);
2037 IPW_CMD(AP_TX_POWER);
2038 IPW_CMD(CCKM_INFO);
2039 IPW_CMD(CCX_VER_INFO);
2040 IPW_CMD(SET_CALIBRATION);
2041 IPW_CMD(SENSITIVITY_CALIB);
2042 IPW_CMD(RETRY_LIMIT);
2043 IPW_CMD(IPW_PRE_POWER_DOWN);
2044 IPW_CMD(VAP_BEACON_TEMPLATE);
2045 IPW_CMD(VAP_DTIM_PERIOD);
2046 IPW_CMD(EXT_SUPPORTED_RATES);
2047 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2048 IPW_CMD(VAP_QUIET_INTERVALS);
2049 IPW_CMD(VAP_CHANNEL_SWITCH);
2050 IPW_CMD(VAP_MANDATORY_CHANNELS);
2051 IPW_CMD(VAP_CELL_PWR_LIMIT);
2052 IPW_CMD(VAP_CF_PARAM_SET);
2053 IPW_CMD(VAP_SET_BEACONING_STATE);
2054 IPW_CMD(MEASUREMENT);
2055 IPW_CMD(POWER_CAPABILITY);
2056 IPW_CMD(SUPPORTED_CHANNELS);
2057 IPW_CMD(TPC_REPORT);
2058 IPW_CMD(WME_INFO);
2059 IPW_CMD(PRODUCTION_COMMAND);
2060 default:
2061 return "UNKNOWN";
2065 #define HOST_COMPLETE_TIMEOUT HZ
2067 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2069 int rc = 0;
2070 unsigned long flags;
2072 spin_lock_irqsave(&priv->lock, flags);
2073 if (priv->status & STATUS_HCMD_ACTIVE) {
2074 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2075 get_cmd_string(cmd->cmd));
2076 spin_unlock_irqrestore(&priv->lock, flags);
2077 return -EAGAIN;
2080 priv->status |= STATUS_HCMD_ACTIVE;
2082 if (priv->cmdlog) {
2083 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2084 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2085 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2086 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2087 cmd->len);
2088 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2091 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2092 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2093 priv->status);
2095 #ifndef DEBUG_CMD_WEP_KEY
2096 if (cmd->cmd == IPW_CMD_WEP_KEY)
2097 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2098 else
2099 #endif
2100 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2102 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2103 if (rc) {
2104 priv->status &= ~STATUS_HCMD_ACTIVE;
2105 IPW_ERROR("Failed to send %s: Reason %d\n",
2106 get_cmd_string(cmd->cmd), rc);
2107 spin_unlock_irqrestore(&priv->lock, flags);
2108 goto exit;
2110 spin_unlock_irqrestore(&priv->lock, flags);
2112 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2113 !(priv->
2114 status & STATUS_HCMD_ACTIVE),
2115 HOST_COMPLETE_TIMEOUT);
2116 if (rc == 0) {
2117 spin_lock_irqsave(&priv->lock, flags);
2118 if (priv->status & STATUS_HCMD_ACTIVE) {
2119 IPW_ERROR("Failed to send %s: Command timed out.\n",
2120 get_cmd_string(cmd->cmd));
2121 priv->status &= ~STATUS_HCMD_ACTIVE;
2122 spin_unlock_irqrestore(&priv->lock, flags);
2123 rc = -EIO;
2124 goto exit;
2126 spin_unlock_irqrestore(&priv->lock, flags);
2127 } else
2128 rc = 0;
2130 if (priv->status & STATUS_RF_KILL_HW) {
2131 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2132 get_cmd_string(cmd->cmd));
2133 rc = -EIO;
2134 goto exit;
2137 exit:
2138 if (priv->cmdlog) {
2139 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2140 priv->cmdlog_pos %= priv->cmdlog_len;
2142 return rc;
2145 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2147 struct host_cmd cmd = {
2148 .cmd = command,
2151 return __ipw_send_cmd(priv, &cmd);
2154 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2155 void *data)
2157 struct host_cmd cmd = {
2158 .cmd = command,
2159 .len = len,
2160 .param = data,
2163 return __ipw_send_cmd(priv, &cmd);
2166 static int ipw_send_host_complete(struct ipw_priv *priv)
2168 if (!priv) {
2169 IPW_ERROR("Invalid args\n");
2170 return -1;
2173 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2176 static int ipw_send_system_config(struct ipw_priv *priv)
2178 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2179 sizeof(priv->sys_config),
2180 &priv->sys_config);
2183 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2185 if (!priv || !ssid) {
2186 IPW_ERROR("Invalid args\n");
2187 return -1;
2190 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2191 ssid);
2194 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2196 if (!priv || !mac) {
2197 IPW_ERROR("Invalid args\n");
2198 return -1;
2201 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2202 priv->net_dev->name, MAC_ARG(mac));
2204 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2208 * NOTE: This must be executed from our workqueue as it results in udelay
2209 * being called which may corrupt the keyboard if executed on default
2210 * workqueue
2212 static void ipw_adapter_restart(void *adapter)
2214 struct ipw_priv *priv = adapter;
2216 if (priv->status & STATUS_RF_KILL_MASK)
2217 return;
2219 ipw_down(priv);
2221 if (priv->assoc_network &&
2222 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2223 ipw_remove_current_network(priv);
2225 if (ipw_up(priv)) {
2226 IPW_ERROR("Failed to up device\n");
2227 return;
2231 static void ipw_bg_adapter_restart(void *data)
2233 struct ipw_priv *priv = data;
2234 mutex_lock(&priv->mutex);
2235 ipw_adapter_restart(data);
2236 mutex_unlock(&priv->mutex);
2239 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2241 static void ipw_scan_check(void *data)
2243 struct ipw_priv *priv = data;
2244 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2245 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2246 "adapter after (%dms).\n",
2247 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2248 queue_work(priv->workqueue, &priv->adapter_restart);
2252 static void ipw_bg_scan_check(void *data)
2254 struct ipw_priv *priv = data;
2255 mutex_lock(&priv->mutex);
2256 ipw_scan_check(data);
2257 mutex_unlock(&priv->mutex);
2260 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2261 struct ipw_scan_request_ext *request)
2263 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2264 sizeof(*request), request);
2267 static int ipw_send_scan_abort(struct ipw_priv *priv)
2269 if (!priv) {
2270 IPW_ERROR("Invalid args\n");
2271 return -1;
2274 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2277 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2279 struct ipw_sensitivity_calib calib = {
2280 .beacon_rssi_raw = cpu_to_le16(sens),
2283 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2284 &calib);
2287 static int ipw_send_associate(struct ipw_priv *priv,
2288 struct ipw_associate *associate)
2290 struct ipw_associate tmp_associate;
2292 if (!priv || !associate) {
2293 IPW_ERROR("Invalid args\n");
2294 return -1;
2297 memcpy(&tmp_associate, associate, sizeof(*associate));
2298 tmp_associate.policy_support =
2299 cpu_to_le16(tmp_associate.policy_support);
2300 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2301 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2302 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2303 tmp_associate.listen_interval =
2304 cpu_to_le16(tmp_associate.listen_interval);
2305 tmp_associate.beacon_interval =
2306 cpu_to_le16(tmp_associate.beacon_interval);
2307 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2309 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2310 &tmp_associate);
2313 static int ipw_send_supported_rates(struct ipw_priv *priv,
2314 struct ipw_supported_rates *rates)
2316 if (!priv || !rates) {
2317 IPW_ERROR("Invalid args\n");
2318 return -1;
2321 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2322 rates);
2325 static int ipw_set_random_seed(struct ipw_priv *priv)
2327 u32 val;
2329 if (!priv) {
2330 IPW_ERROR("Invalid args\n");
2331 return -1;
2334 get_random_bytes(&val, sizeof(val));
2336 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2339 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2341 if (!priv) {
2342 IPW_ERROR("Invalid args\n");
2343 return -1;
2346 phy_off = cpu_to_le32(phy_off);
2347 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2348 &phy_off);
2351 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2353 if (!priv || !power) {
2354 IPW_ERROR("Invalid args\n");
2355 return -1;
2358 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2361 static int ipw_set_tx_power(struct ipw_priv *priv)
2363 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2364 struct ipw_tx_power tx_power;
2365 s8 max_power;
2366 int i;
2368 memset(&tx_power, 0, sizeof(tx_power));
2370 /* configure device for 'G' band */
2371 tx_power.ieee_mode = IPW_G_MODE;
2372 tx_power.num_channels = geo->bg_channels;
2373 for (i = 0; i < geo->bg_channels; i++) {
2374 max_power = geo->bg[i].max_power;
2375 tx_power.channels_tx_power[i].channel_number =
2376 geo->bg[i].channel;
2377 tx_power.channels_tx_power[i].tx_power = max_power ?
2378 min(max_power, priv->tx_power) : priv->tx_power;
2380 if (ipw_send_tx_power(priv, &tx_power))
2381 return -EIO;
2383 /* configure device to also handle 'B' band */
2384 tx_power.ieee_mode = IPW_B_MODE;
2385 if (ipw_send_tx_power(priv, &tx_power))
2386 return -EIO;
2388 /* configure device to also handle 'A' band */
2389 if (priv->ieee->abg_true) {
2390 tx_power.ieee_mode = IPW_A_MODE;
2391 tx_power.num_channels = geo->a_channels;
2392 for (i = 0; i < tx_power.num_channels; i++) {
2393 max_power = geo->a[i].max_power;
2394 tx_power.channels_tx_power[i].channel_number =
2395 geo->a[i].channel;
2396 tx_power.channels_tx_power[i].tx_power = max_power ?
2397 min(max_power, priv->tx_power) : priv->tx_power;
2399 if (ipw_send_tx_power(priv, &tx_power))
2400 return -EIO;
2402 return 0;
2405 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2407 struct ipw_rts_threshold rts_threshold = {
2408 .rts_threshold = cpu_to_le16(rts),
2411 if (!priv) {
2412 IPW_ERROR("Invalid args\n");
2413 return -1;
2416 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2417 sizeof(rts_threshold), &rts_threshold);
2420 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2422 struct ipw_frag_threshold frag_threshold = {
2423 .frag_threshold = cpu_to_le16(frag),
2426 if (!priv) {
2427 IPW_ERROR("Invalid args\n");
2428 return -1;
2431 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2432 sizeof(frag_threshold), &frag_threshold);
2435 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2437 u32 param;
2439 if (!priv) {
2440 IPW_ERROR("Invalid args\n");
2441 return -1;
2444 /* If on battery, set to 3, if AC set to CAM, else user
2445 * level */
2446 switch (mode) {
2447 case IPW_POWER_BATTERY:
2448 param = IPW_POWER_INDEX_3;
2449 break;
2450 case IPW_POWER_AC:
2451 param = IPW_POWER_MODE_CAM;
2452 break;
2453 default:
2454 param = mode;
2455 break;
2458 param = cpu_to_le32(mode);
2459 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2460 &param);
2463 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2465 struct ipw_retry_limit retry_limit = {
2466 .short_retry_limit = slimit,
2467 .long_retry_limit = llimit
2470 if (!priv) {
2471 IPW_ERROR("Invalid args\n");
2472 return -1;
2475 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2476 &retry_limit);
2480 * The IPW device contains a Microwire compatible EEPROM that stores
2481 * various data like the MAC address. Usually the firmware has exclusive
2482 * access to the eeprom, but during device initialization (before the
2483 * device driver has sent the HostComplete command to the firmware) the
2484 * device driver has read access to the EEPROM by way of indirect addressing
2485 * through a couple of memory mapped registers.
2487 * The following is a simplified implementation for pulling data out of the
2488 * the eeprom, along with some helper functions to find information in
2489 * the per device private data's copy of the eeprom.
2491 * NOTE: To better understand how these functions work (i.e what is a chip
2492 * select and why do have to keep driving the eeprom clock?), read
2493 * just about any data sheet for a Microwire compatible EEPROM.
2496 /* write a 32 bit value into the indirect accessor register */
2497 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2499 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2501 /* the eeprom requires some time to complete the operation */
2502 udelay(p->eeprom_delay);
2504 return;
2507 /* perform a chip select operation */
2508 static void eeprom_cs(struct ipw_priv *priv)
2510 eeprom_write_reg(priv, 0);
2511 eeprom_write_reg(priv, EEPROM_BIT_CS);
2512 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2513 eeprom_write_reg(priv, EEPROM_BIT_CS);
2516 /* perform a chip select operation */
2517 static void eeprom_disable_cs(struct ipw_priv *priv)
2519 eeprom_write_reg(priv, EEPROM_BIT_CS);
2520 eeprom_write_reg(priv, 0);
2521 eeprom_write_reg(priv, EEPROM_BIT_SK);
2524 /* push a single bit down to the eeprom */
2525 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2527 int d = (bit ? EEPROM_BIT_DI : 0);
2528 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2529 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2532 /* push an opcode followed by an address down to the eeprom */
2533 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2535 int i;
2537 eeprom_cs(priv);
2538 eeprom_write_bit(priv, 1);
2539 eeprom_write_bit(priv, op & 2);
2540 eeprom_write_bit(priv, op & 1);
2541 for (i = 7; i >= 0; i--) {
2542 eeprom_write_bit(priv, addr & (1 << i));
2546 /* pull 16 bits off the eeprom, one bit at a time */
2547 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2549 int i;
2550 u16 r = 0;
2552 /* Send READ Opcode */
2553 eeprom_op(priv, EEPROM_CMD_READ, addr);
2555 /* Send dummy bit */
2556 eeprom_write_reg(priv, EEPROM_BIT_CS);
2558 /* Read the byte off the eeprom one bit at a time */
2559 for (i = 0; i < 16; i++) {
2560 u32 data = 0;
2561 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2562 eeprom_write_reg(priv, EEPROM_BIT_CS);
2563 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2564 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2567 /* Send another dummy bit */
2568 eeprom_write_reg(priv, 0);
2569 eeprom_disable_cs(priv);
2571 return r;
2574 /* helper function for pulling the mac address out of the private */
2575 /* data's copy of the eeprom data */
2576 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2578 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2582 * Either the device driver (i.e. the host) or the firmware can
2583 * load eeprom data into the designated region in SRAM. If neither
2584 * happens then the FW will shutdown with a fatal error.
2586 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2587 * bit needs region of shared SRAM needs to be non-zero.
2589 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2591 int i;
2592 u16 *eeprom = (u16 *) priv->eeprom;
2594 IPW_DEBUG_TRACE(">>\n");
2596 /* read entire contents of eeprom into private buffer */
2597 for (i = 0; i < 128; i++)
2598 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2601 If the data looks correct, then copy it to our private
2602 copy. Otherwise let the firmware know to perform the operation
2603 on its own.
2605 if (priv->eeprom[EEPROM_VERSION] != 0) {
2606 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2608 /* write the eeprom data to sram */
2609 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2610 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2612 /* Do not load eeprom data on fatal error or suspend */
2613 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2614 } else {
2615 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2617 /* Load eeprom data on fatal error or suspend */
2618 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2621 IPW_DEBUG_TRACE("<<\n");
2624 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2626 count >>= 2;
2627 if (!count)
2628 return;
2629 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2630 while (count--)
2631 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2634 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2636 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2637 CB_NUMBER_OF_ELEMENTS_SMALL *
2638 sizeof(struct command_block));
2641 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2642 { /* start dma engine but no transfers yet */
2644 IPW_DEBUG_FW(">> : \n");
2646 /* Start the dma */
2647 ipw_fw_dma_reset_command_blocks(priv);
2649 /* Write CB base address */
2650 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2652 IPW_DEBUG_FW("<< : \n");
2653 return 0;
2656 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2658 u32 control = 0;
2660 IPW_DEBUG_FW(">> :\n");
2662 /* set the Stop and Abort bit */
2663 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2664 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2665 priv->sram_desc.last_cb_index = 0;
2667 IPW_DEBUG_FW("<< \n");
2670 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2671 struct command_block *cb)
2673 u32 address =
2674 IPW_SHARED_SRAM_DMA_CONTROL +
2675 (sizeof(struct command_block) * index);
2676 IPW_DEBUG_FW(">> :\n");
2678 ipw_write_indirect(priv, address, (u8 *) cb,
2679 (int)sizeof(struct command_block));
2681 IPW_DEBUG_FW("<< :\n");
2682 return 0;
2686 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2688 u32 control = 0;
2689 u32 index = 0;
2691 IPW_DEBUG_FW(">> :\n");
2693 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2694 ipw_fw_dma_write_command_block(priv, index,
2695 &priv->sram_desc.cb_list[index]);
2697 /* Enable the DMA in the CSR register */
2698 ipw_clear_bit(priv, IPW_RESET_REG,
2699 IPW_RESET_REG_MASTER_DISABLED |
2700 IPW_RESET_REG_STOP_MASTER);
2702 /* Set the Start bit. */
2703 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2704 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2706 IPW_DEBUG_FW("<< :\n");
2707 return 0;
2710 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2712 u32 address;
2713 u32 register_value = 0;
2714 u32 cb_fields_address = 0;
2716 IPW_DEBUG_FW(">> :\n");
2717 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2718 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2720 /* Read the DMA Controlor register */
2721 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2722 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2724 /* Print the CB values */
2725 cb_fields_address = address;
2726 register_value = ipw_read_reg32(priv, cb_fields_address);
2727 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2729 cb_fields_address += sizeof(u32);
2730 register_value = ipw_read_reg32(priv, cb_fields_address);
2731 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2733 cb_fields_address += sizeof(u32);
2734 register_value = ipw_read_reg32(priv, cb_fields_address);
2735 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2736 register_value);
2738 cb_fields_address += sizeof(u32);
2739 register_value = ipw_read_reg32(priv, cb_fields_address);
2740 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2742 IPW_DEBUG_FW(">> :\n");
2745 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2747 u32 current_cb_address = 0;
2748 u32 current_cb_index = 0;
2750 IPW_DEBUG_FW("<< :\n");
2751 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2753 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2754 sizeof(struct command_block);
2756 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2757 current_cb_index, current_cb_address);
2759 IPW_DEBUG_FW(">> :\n");
2760 return current_cb_index;
2764 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2765 u32 src_address,
2766 u32 dest_address,
2767 u32 length,
2768 int interrupt_enabled, int is_last)
2771 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2772 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2773 CB_DEST_SIZE_LONG;
2774 struct command_block *cb;
2775 u32 last_cb_element = 0;
2777 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2778 src_address, dest_address, length);
2780 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2781 return -1;
2783 last_cb_element = priv->sram_desc.last_cb_index;
2784 cb = &priv->sram_desc.cb_list[last_cb_element];
2785 priv->sram_desc.last_cb_index++;
2787 /* Calculate the new CB control word */
2788 if (interrupt_enabled)
2789 control |= CB_INT_ENABLED;
2791 if (is_last)
2792 control |= CB_LAST_VALID;
2794 control |= length;
2796 /* Calculate the CB Element's checksum value */
2797 cb->status = control ^ src_address ^ dest_address;
2799 /* Copy the Source and Destination addresses */
2800 cb->dest_addr = dest_address;
2801 cb->source_addr = src_address;
2803 /* Copy the Control Word last */
2804 cb->control = control;
2806 return 0;
2809 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2810 u32 src_phys, u32 dest_address, u32 length)
2812 u32 bytes_left = length;
2813 u32 src_offset = 0;
2814 u32 dest_offset = 0;
2815 int status = 0;
2816 IPW_DEBUG_FW(">> \n");
2817 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2818 src_phys, dest_address, length);
2819 while (bytes_left > CB_MAX_LENGTH) {
2820 status = ipw_fw_dma_add_command_block(priv,
2821 src_phys + src_offset,
2822 dest_address +
2823 dest_offset,
2824 CB_MAX_LENGTH, 0, 0);
2825 if (status) {
2826 IPW_DEBUG_FW_INFO(": Failed\n");
2827 return -1;
2828 } else
2829 IPW_DEBUG_FW_INFO(": Added new cb\n");
2831 src_offset += CB_MAX_LENGTH;
2832 dest_offset += CB_MAX_LENGTH;
2833 bytes_left -= CB_MAX_LENGTH;
2836 /* add the buffer tail */
2837 if (bytes_left > 0) {
2838 status =
2839 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2840 dest_address + dest_offset,
2841 bytes_left, 0, 0);
2842 if (status) {
2843 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2844 return -1;
2845 } else
2846 IPW_DEBUG_FW_INFO
2847 (": Adding new cb - the buffer tail\n");
2850 IPW_DEBUG_FW("<< \n");
2851 return 0;
2854 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2856 u32 current_index = 0, previous_index;
2857 u32 watchdog = 0;
2859 IPW_DEBUG_FW(">> : \n");
2861 current_index = ipw_fw_dma_command_block_index(priv);
2862 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2863 (int)priv->sram_desc.last_cb_index);
2865 while (current_index < priv->sram_desc.last_cb_index) {
2866 udelay(50);
2867 previous_index = current_index;
2868 current_index = ipw_fw_dma_command_block_index(priv);
2870 if (previous_index < current_index) {
2871 watchdog = 0;
2872 continue;
2874 if (++watchdog > 400) {
2875 IPW_DEBUG_FW_INFO("Timeout\n");
2876 ipw_fw_dma_dump_command_block(priv);
2877 ipw_fw_dma_abort(priv);
2878 return -1;
2882 ipw_fw_dma_abort(priv);
2884 /*Disable the DMA in the CSR register */
2885 ipw_set_bit(priv, IPW_RESET_REG,
2886 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2888 IPW_DEBUG_FW("<< dmaWaitSync \n");
2889 return 0;
2892 static void ipw_remove_current_network(struct ipw_priv *priv)
2894 struct list_head *element, *safe;
2895 struct ieee80211_network *network = NULL;
2896 unsigned long flags;
2898 spin_lock_irqsave(&priv->ieee->lock, flags);
2899 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2900 network = list_entry(element, struct ieee80211_network, list);
2901 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2902 list_del(element);
2903 list_add_tail(&network->list,
2904 &priv->ieee->network_free_list);
2907 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2911 * Check that card is still alive.
2912 * Reads debug register from domain0.
2913 * If card is present, pre-defined value should
2914 * be found there.
2916 * @param priv
2917 * @return 1 if card is present, 0 otherwise
2919 static inline int ipw_alive(struct ipw_priv *priv)
2921 return ipw_read32(priv, 0x90) == 0xd55555d5;
2924 /* timeout in msec, attempted in 10-msec quanta */
2925 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2926 int timeout)
2928 int i = 0;
2930 do {
2931 if ((ipw_read32(priv, addr) & mask) == mask)
2932 return i;
2933 mdelay(10);
2934 i += 10;
2935 } while (i < timeout);
2937 return -ETIME;
2940 /* These functions load the firmware and micro code for the operation of
2941 * the ipw hardware. It assumes the buffer has all the bits for the
2942 * image and the caller is handling the memory allocation and clean up.
2945 static int ipw_stop_master(struct ipw_priv *priv)
2947 int rc;
2949 IPW_DEBUG_TRACE(">> \n");
2950 /* stop master. typical delay - 0 */
2951 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2953 /* timeout is in msec, polled in 10-msec quanta */
2954 rc = ipw_poll_bit(priv, IPW_RESET_REG,
2955 IPW_RESET_REG_MASTER_DISABLED, 100);
2956 if (rc < 0) {
2957 IPW_ERROR("wait for stop master failed after 100ms\n");
2958 return -1;
2961 IPW_DEBUG_INFO("stop master %dms\n", rc);
2963 return rc;
2966 static void ipw_arc_release(struct ipw_priv *priv)
2968 IPW_DEBUG_TRACE(">> \n");
2969 mdelay(5);
2971 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2973 /* no one knows timing, for safety add some delay */
2974 mdelay(5);
2977 struct fw_chunk {
2978 u32 address;
2979 u32 length;
2982 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2984 int rc = 0, i, addr;
2985 u8 cr = 0;
2986 u16 *image;
2988 image = (u16 *) data;
2990 IPW_DEBUG_TRACE(">> \n");
2992 rc = ipw_stop_master(priv);
2994 if (rc < 0)
2995 return rc;
2997 for (addr = IPW_SHARED_LOWER_BOUND;
2998 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
2999 ipw_write32(priv, addr, 0);
3002 /* no ucode (yet) */
3003 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3004 /* destroy DMA queues */
3005 /* reset sequence */
3007 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3008 ipw_arc_release(priv);
3009 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3010 mdelay(1);
3012 /* reset PHY */
3013 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3014 mdelay(1);
3016 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3017 mdelay(1);
3019 /* enable ucode store */
3020 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3021 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3022 mdelay(1);
3024 /* write ucode */
3026 * @bug
3027 * Do NOT set indirect address register once and then
3028 * store data to indirect data register in the loop.
3029 * It seems very reasonable, but in this case DINO do not
3030 * accept ucode. It is essential to set address each time.
3032 /* load new ipw uCode */
3033 for (i = 0; i < len / 2; i++)
3034 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3035 cpu_to_le16(image[i]));
3037 /* enable DINO */
3038 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3039 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3041 /* this is where the igx / win driver deveates from the VAP driver. */
3043 /* wait for alive response */
3044 for (i = 0; i < 100; i++) {
3045 /* poll for incoming data */
3046 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3047 if (cr & DINO_RXFIFO_DATA)
3048 break;
3049 mdelay(1);
3052 if (cr & DINO_RXFIFO_DATA) {
3053 /* alive_command_responce size is NOT multiple of 4 */
3054 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3056 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3057 response_buffer[i] =
3058 le32_to_cpu(ipw_read_reg32(priv,
3059 IPW_BASEBAND_RX_FIFO_READ));
3060 memcpy(&priv->dino_alive, response_buffer,
3061 sizeof(priv->dino_alive));
3062 if (priv->dino_alive.alive_command == 1
3063 && priv->dino_alive.ucode_valid == 1) {
3064 rc = 0;
3065 IPW_DEBUG_INFO
3066 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3067 "of %02d/%02d/%02d %02d:%02d\n",
3068 priv->dino_alive.software_revision,
3069 priv->dino_alive.software_revision,
3070 priv->dino_alive.device_identifier,
3071 priv->dino_alive.device_identifier,
3072 priv->dino_alive.time_stamp[0],
3073 priv->dino_alive.time_stamp[1],
3074 priv->dino_alive.time_stamp[2],
3075 priv->dino_alive.time_stamp[3],
3076 priv->dino_alive.time_stamp[4]);
3077 } else {
3078 IPW_DEBUG_INFO("Microcode is not alive\n");
3079 rc = -EINVAL;
3081 } else {
3082 IPW_DEBUG_INFO("No alive response from DINO\n");
3083 rc = -ETIME;
3086 /* disable DINO, otherwise for some reason
3087 firmware have problem getting alive resp. */
3088 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3090 return rc;
3093 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3095 int rc = -1;
3096 int offset = 0;
3097 struct fw_chunk *chunk;
3098 dma_addr_t shared_phys;
3099 u8 *shared_virt;
3101 IPW_DEBUG_TRACE("<< : \n");
3102 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3104 if (!shared_virt)
3105 return -ENOMEM;
3107 memmove(shared_virt, data, len);
3109 /* Start the Dma */
3110 rc = ipw_fw_dma_enable(priv);
3112 if (priv->sram_desc.last_cb_index > 0) {
3113 /* the DMA is already ready this would be a bug. */
3114 BUG();
3115 goto out;
3118 do {
3119 chunk = (struct fw_chunk *)(data + offset);
3120 offset += sizeof(struct fw_chunk);
3121 /* build DMA packet and queue up for sending */
3122 /* dma to chunk->address, the chunk->length bytes from data +
3123 * offeset*/
3124 /* Dma loading */
3125 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3126 le32_to_cpu(chunk->address),
3127 le32_to_cpu(chunk->length));
3128 if (rc) {
3129 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3130 goto out;
3133 offset += le32_to_cpu(chunk->length);
3134 } while (offset < len);
3136 /* Run the DMA and wait for the answer */
3137 rc = ipw_fw_dma_kick(priv);
3138 if (rc) {
3139 IPW_ERROR("dmaKick Failed\n");
3140 goto out;
3143 rc = ipw_fw_dma_wait(priv);
3144 if (rc) {
3145 IPW_ERROR("dmaWaitSync Failed\n");
3146 goto out;
3148 out:
3149 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3150 return rc;
3153 /* stop nic */
3154 static int ipw_stop_nic(struct ipw_priv *priv)
3156 int rc = 0;
3158 /* stop */
3159 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3161 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3162 IPW_RESET_REG_MASTER_DISABLED, 500);
3163 if (rc < 0) {
3164 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3165 return rc;
3168 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3170 return rc;
3173 static void ipw_start_nic(struct ipw_priv *priv)
3175 IPW_DEBUG_TRACE(">>\n");
3177 /* prvHwStartNic release ARC */
3178 ipw_clear_bit(priv, IPW_RESET_REG,
3179 IPW_RESET_REG_MASTER_DISABLED |
3180 IPW_RESET_REG_STOP_MASTER |
3181 CBD_RESET_REG_PRINCETON_RESET);
3183 /* enable power management */
3184 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3185 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3187 IPW_DEBUG_TRACE("<<\n");
3190 static int ipw_init_nic(struct ipw_priv *priv)
3192 int rc;
3194 IPW_DEBUG_TRACE(">>\n");
3195 /* reset */
3196 /*prvHwInitNic */
3197 /* set "initialization complete" bit to move adapter to D0 state */
3198 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3200 /* low-level PLL activation */
3201 ipw_write32(priv, IPW_READ_INT_REGISTER,
3202 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3204 /* wait for clock stabilization */
3205 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3206 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3207 if (rc < 0)
3208 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3210 /* assert SW reset */
3211 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3213 udelay(10);
3215 /* set "initialization complete" bit to move adapter to D0 state */
3216 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3218 IPW_DEBUG_TRACE(">>\n");
3219 return 0;
3222 /* Call this function from process context, it will sleep in request_firmware.
3223 * Probe is an ok place to call this from.
3225 static int ipw_reset_nic(struct ipw_priv *priv)
3227 int rc = 0;
3228 unsigned long flags;
3230 IPW_DEBUG_TRACE(">>\n");
3232 rc = ipw_init_nic(priv);
3234 spin_lock_irqsave(&priv->lock, flags);
3235 /* Clear the 'host command active' bit... */
3236 priv->status &= ~STATUS_HCMD_ACTIVE;
3237 wake_up_interruptible(&priv->wait_command_queue);
3238 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3239 wake_up_interruptible(&priv->wait_state);
3240 spin_unlock_irqrestore(&priv->lock, flags);
3242 IPW_DEBUG_TRACE("<<\n");
3243 return rc;
3247 struct ipw_fw {
3248 __le32 ver;
3249 __le32 boot_size;
3250 __le32 ucode_size;
3251 __le32 fw_size;
3252 u8 data[0];
3255 static int ipw_get_fw(struct ipw_priv *priv,
3256 const struct firmware **raw, const char *name)
3258 struct ipw_fw *fw;
3259 int rc;
3261 /* ask firmware_class module to get the boot firmware off disk */
3262 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3263 if (rc < 0) {
3264 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3265 return rc;
3268 if ((*raw)->size < sizeof(*fw)) {
3269 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3270 return -EINVAL;
3273 fw = (void *)(*raw)->data;
3275 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3276 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3277 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3278 name, (*raw)->size);
3279 return -EINVAL;
3282 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3283 name,
3284 le32_to_cpu(fw->ver) >> 16,
3285 le32_to_cpu(fw->ver) & 0xff,
3286 (*raw)->size - sizeof(*fw));
3287 return 0;
3290 #define IPW_RX_BUF_SIZE (3000)
3292 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3293 struct ipw_rx_queue *rxq)
3295 unsigned long flags;
3296 int i;
3298 spin_lock_irqsave(&rxq->lock, flags);
3300 INIT_LIST_HEAD(&rxq->rx_free);
3301 INIT_LIST_HEAD(&rxq->rx_used);
3303 /* Fill the rx_used queue with _all_ of the Rx buffers */
3304 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3305 /* In the reset function, these buffers may have been allocated
3306 * to an SKB, so we need to unmap and free potential storage */
3307 if (rxq->pool[i].skb != NULL) {
3308 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3309 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3310 dev_kfree_skb(rxq->pool[i].skb);
3311 rxq->pool[i].skb = NULL;
3313 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3316 /* Set us so that we have processed and used all buffers, but have
3317 * not restocked the Rx queue with fresh buffers */
3318 rxq->read = rxq->write = 0;
3319 rxq->processed = RX_QUEUE_SIZE - 1;
3320 rxq->free_count = 0;
3321 spin_unlock_irqrestore(&rxq->lock, flags);
3324 #ifdef CONFIG_PM
3325 static int fw_loaded = 0;
3326 static const struct firmware *raw = NULL;
3328 static void free_firmware(void)
3330 if (fw_loaded) {
3331 release_firmware(raw);
3332 raw = NULL;
3333 fw_loaded = 0;
3336 #else
3337 #define free_firmware() do {} while (0)
3338 #endif
3340 static int ipw_load(struct ipw_priv *priv)
3342 #ifndef CONFIG_PM
3343 const struct firmware *raw = NULL;
3344 #endif
3345 struct ipw_fw *fw;
3346 u8 *boot_img, *ucode_img, *fw_img;
3347 u8 *name = NULL;
3348 int rc = 0, retries = 3;
3350 switch (priv->ieee->iw_mode) {
3351 case IW_MODE_ADHOC:
3352 name = "ipw2200-ibss.fw";
3353 break;
3354 #ifdef CONFIG_IPW2200_MONITOR
3355 case IW_MODE_MONITOR:
3356 name = "ipw2200-sniffer.fw";
3357 break;
3358 #endif
3359 case IW_MODE_INFRA:
3360 name = "ipw2200-bss.fw";
3361 break;
3364 if (!name) {
3365 rc = -EINVAL;
3366 goto error;
3369 #ifdef CONFIG_PM
3370 if (!fw_loaded) {
3371 #endif
3372 rc = ipw_get_fw(priv, &raw, name);
3373 if (rc < 0)
3374 goto error;
3375 #ifdef CONFIG_PM
3377 #endif
3379 fw = (void *)raw->data;
3380 boot_img = &fw->data[0];
3381 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3382 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3383 le32_to_cpu(fw->ucode_size)];
3385 if (rc < 0)
3386 goto error;
3388 if (!priv->rxq)
3389 priv->rxq = ipw_rx_queue_alloc(priv);
3390 else
3391 ipw_rx_queue_reset(priv, priv->rxq);
3392 if (!priv->rxq) {
3393 IPW_ERROR("Unable to initialize Rx queue\n");
3394 goto error;
3397 retry:
3398 /* Ensure interrupts are disabled */
3399 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3400 priv->status &= ~STATUS_INT_ENABLED;
3402 /* ack pending interrupts */
3403 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3405 ipw_stop_nic(priv);
3407 rc = ipw_reset_nic(priv);
3408 if (rc < 0) {
3409 IPW_ERROR("Unable to reset NIC\n");
3410 goto error;
3413 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3414 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3416 /* DMA the initial boot firmware into the device */
3417 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3418 if (rc < 0) {
3419 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3420 goto error;
3423 /* kick start the device */
3424 ipw_start_nic(priv);
3426 /* wait for the device to finish its initial startup sequence */
3427 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3428 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3429 if (rc < 0) {
3430 IPW_ERROR("device failed to boot initial fw image\n");
3431 goto error;
3433 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3435 /* ack fw init done interrupt */
3436 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3438 /* DMA the ucode into the device */
3439 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3440 if (rc < 0) {
3441 IPW_ERROR("Unable to load ucode: %d\n", rc);
3442 goto error;
3445 /* stop nic */
3446 ipw_stop_nic(priv);
3448 /* DMA bss firmware into the device */
3449 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3450 if (rc < 0) {
3451 IPW_ERROR("Unable to load firmware: %d\n", rc);
3452 goto error;
3454 #ifdef CONFIG_PM
3455 fw_loaded = 1;
3456 #endif
3458 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3460 rc = ipw_queue_reset(priv);
3461 if (rc < 0) {
3462 IPW_ERROR("Unable to initialize queues\n");
3463 goto error;
3466 /* Ensure interrupts are disabled */
3467 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3468 /* ack pending interrupts */
3469 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3471 /* kick start the device */
3472 ipw_start_nic(priv);
3474 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3475 if (retries > 0) {
3476 IPW_WARNING("Parity error. Retrying init.\n");
3477 retries--;
3478 goto retry;
3481 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3482 rc = -EIO;
3483 goto error;
3486 /* wait for the device */
3487 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3488 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3489 if (rc < 0) {
3490 IPW_ERROR("device failed to start within 500ms\n");
3491 goto error;
3493 IPW_DEBUG_INFO("device response after %dms\n", rc);
3495 /* ack fw init done interrupt */
3496 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3498 /* read eeprom data and initialize the eeprom region of sram */
3499 priv->eeprom_delay = 1;
3500 ipw_eeprom_init_sram(priv);
3502 /* enable interrupts */
3503 ipw_enable_interrupts(priv);
3505 /* Ensure our queue has valid packets */
3506 ipw_rx_queue_replenish(priv);
3508 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3510 /* ack pending interrupts */
3511 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3513 #ifndef CONFIG_PM
3514 release_firmware(raw);
3515 #endif
3516 return 0;
3518 error:
3519 if (priv->rxq) {
3520 ipw_rx_queue_free(priv, priv->rxq);
3521 priv->rxq = NULL;
3523 ipw_tx_queue_free(priv);
3524 if (raw)
3525 release_firmware(raw);
3526 #ifdef CONFIG_PM
3527 fw_loaded = 0;
3528 raw = NULL;
3529 #endif
3531 return rc;
3535 * DMA services
3537 * Theory of operation
3539 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3540 * 2 empty entries always kept in the buffer to protect from overflow.
3542 * For Tx queue, there are low mark and high mark limits. If, after queuing
3543 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3544 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3545 * Tx queue resumed.
3547 * The IPW operates with six queues, one receive queue in the device's
3548 * sram, one transmit queue for sending commands to the device firmware,
3549 * and four transmit queues for data.
3551 * The four transmit queues allow for performing quality of service (qos)
3552 * transmissions as per the 802.11 protocol. Currently Linux does not
3553 * provide a mechanism to the user for utilizing prioritized queues, so
3554 * we only utilize the first data transmit queue (queue1).
3558 * Driver allocates buffers of this size for Rx
3561 static inline int ipw_queue_space(const struct clx2_queue *q)
3563 int s = q->last_used - q->first_empty;
3564 if (s <= 0)
3565 s += q->n_bd;
3566 s -= 2; /* keep some reserve to not confuse empty and full situations */
3567 if (s < 0)
3568 s = 0;
3569 return s;
3572 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3574 return (++index == n_bd) ? 0 : index;
3578 * Initialize common DMA queue structure
3580 * @param q queue to init
3581 * @param count Number of BD's to allocate. Should be power of 2
3582 * @param read_register Address for 'read' register
3583 * (not offset within BAR, full address)
3584 * @param write_register Address for 'write' register
3585 * (not offset within BAR, full address)
3586 * @param base_register Address for 'base' register
3587 * (not offset within BAR, full address)
3588 * @param size Address for 'size' register
3589 * (not offset within BAR, full address)
3591 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3592 int count, u32 read, u32 write, u32 base, u32 size)
3594 q->n_bd = count;
3596 q->low_mark = q->n_bd / 4;
3597 if (q->low_mark < 4)
3598 q->low_mark = 4;
3600 q->high_mark = q->n_bd / 8;
3601 if (q->high_mark < 2)
3602 q->high_mark = 2;
3604 q->first_empty = q->last_used = 0;
3605 q->reg_r = read;
3606 q->reg_w = write;
3608 ipw_write32(priv, base, q->dma_addr);
3609 ipw_write32(priv, size, count);
3610 ipw_write32(priv, read, 0);
3611 ipw_write32(priv, write, 0);
3613 _ipw_read32(priv, 0x90);
3616 static int ipw_queue_tx_init(struct ipw_priv *priv,
3617 struct clx2_tx_queue *q,
3618 int count, u32 read, u32 write, u32 base, u32 size)
3620 struct pci_dev *dev = priv->pci_dev;
3622 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3623 if (!q->txb) {
3624 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3625 return -ENOMEM;
3628 q->bd =
3629 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3630 if (!q->bd) {
3631 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3632 sizeof(q->bd[0]) * count);
3633 kfree(q->txb);
3634 q->txb = NULL;
3635 return -ENOMEM;
3638 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3639 return 0;
3643 * Free one TFD, those at index [txq->q.last_used].
3644 * Do NOT advance any indexes
3646 * @param dev
3647 * @param txq
3649 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3650 struct clx2_tx_queue *txq)
3652 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3653 struct pci_dev *dev = priv->pci_dev;
3654 int i;
3656 /* classify bd */
3657 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3658 /* nothing to cleanup after for host commands */
3659 return;
3661 /* sanity check */
3662 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3663 IPW_ERROR("Too many chunks: %i\n",
3664 le32_to_cpu(bd->u.data.num_chunks));
3665 /** @todo issue fatal error, it is quite serious situation */
3666 return;
3669 /* unmap chunks if any */
3670 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3671 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3672 le16_to_cpu(bd->u.data.chunk_len[i]),
3673 PCI_DMA_TODEVICE);
3674 if (txq->txb[txq->q.last_used]) {
3675 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3676 txq->txb[txq->q.last_used] = NULL;
3682 * Deallocate DMA queue.
3684 * Empty queue by removing and destroying all BD's.
3685 * Free all buffers.
3687 * @param dev
3688 * @param q
3690 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3692 struct clx2_queue *q = &txq->q;
3693 struct pci_dev *dev = priv->pci_dev;
3695 if (q->n_bd == 0)
3696 return;
3698 /* first, empty all BD's */
3699 for (; q->first_empty != q->last_used;
3700 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3701 ipw_queue_tx_free_tfd(priv, txq);
3704 /* free buffers belonging to queue itself */
3705 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3706 q->dma_addr);
3707 kfree(txq->txb);
3709 /* 0 fill whole structure */
3710 memset(txq, 0, sizeof(*txq));
3714 * Destroy all DMA queues and structures
3716 * @param priv
3718 static void ipw_tx_queue_free(struct ipw_priv *priv)
3720 /* Tx CMD queue */
3721 ipw_queue_tx_free(priv, &priv->txq_cmd);
3723 /* Tx queues */
3724 ipw_queue_tx_free(priv, &priv->txq[0]);
3725 ipw_queue_tx_free(priv, &priv->txq[1]);
3726 ipw_queue_tx_free(priv, &priv->txq[2]);
3727 ipw_queue_tx_free(priv, &priv->txq[3]);
3730 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3732 /* First 3 bytes are manufacturer */
3733 bssid[0] = priv->mac_addr[0];
3734 bssid[1] = priv->mac_addr[1];
3735 bssid[2] = priv->mac_addr[2];
3737 /* Last bytes are random */
3738 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3740 bssid[0] &= 0xfe; /* clear multicast bit */
3741 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3744 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3746 struct ipw_station_entry entry;
3747 int i;
3749 for (i = 0; i < priv->num_stations; i++) {
3750 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3751 /* Another node is active in network */
3752 priv->missed_adhoc_beacons = 0;
3753 if (!(priv->config & CFG_STATIC_CHANNEL))
3754 /* when other nodes drop out, we drop out */
3755 priv->config &= ~CFG_ADHOC_PERSIST;
3757 return i;
3761 if (i == MAX_STATIONS)
3762 return IPW_INVALID_STATION;
3764 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3766 entry.reserved = 0;
3767 entry.support_mode = 0;
3768 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3769 memcpy(priv->stations[i], bssid, ETH_ALEN);
3770 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3771 &entry, sizeof(entry));
3772 priv->num_stations++;
3774 return i;
3777 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3779 int i;
3781 for (i = 0; i < priv->num_stations; i++)
3782 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3783 return i;
3785 return IPW_INVALID_STATION;
3788 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3790 int err;
3792 if (priv->status & STATUS_ASSOCIATING) {
3793 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3794 queue_work(priv->workqueue, &priv->disassociate);
3795 return;
3798 if (!(priv->status & STATUS_ASSOCIATED)) {
3799 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3800 return;
3803 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3804 "on channel %d.\n",
3805 MAC_ARG(priv->assoc_request.bssid),
3806 priv->assoc_request.channel);
3808 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3809 priv->status |= STATUS_DISASSOCIATING;
3811 if (quiet)
3812 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3813 else
3814 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3816 err = ipw_send_associate(priv, &priv->assoc_request);
3817 if (err) {
3818 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3819 "failed.\n");
3820 return;
3825 static int ipw_disassociate(void *data)
3827 struct ipw_priv *priv = data;
3828 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3829 return 0;
3830 ipw_send_disassociate(data, 0);
3831 return 1;
3834 static void ipw_bg_disassociate(void *data)
3836 struct ipw_priv *priv = data;
3837 mutex_lock(&priv->mutex);
3838 ipw_disassociate(data);
3839 mutex_unlock(&priv->mutex);
3842 static void ipw_system_config(void *data)
3844 struct ipw_priv *priv = data;
3846 #ifdef CONFIG_IPW2200_PROMISCUOUS
3847 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3848 priv->sys_config.accept_all_data_frames = 1;
3849 priv->sys_config.accept_non_directed_frames = 1;
3850 priv->sys_config.accept_all_mgmt_bcpr = 1;
3851 priv->sys_config.accept_all_mgmt_frames = 1;
3853 #endif
3855 ipw_send_system_config(priv);
3858 struct ipw_status_code {
3859 u16 status;
3860 const char *reason;
3863 static const struct ipw_status_code ipw_status_codes[] = {
3864 {0x00, "Successful"},
3865 {0x01, "Unspecified failure"},
3866 {0x0A, "Cannot support all requested capabilities in the "
3867 "Capability information field"},
3868 {0x0B, "Reassociation denied due to inability to confirm that "
3869 "association exists"},
3870 {0x0C, "Association denied due to reason outside the scope of this "
3871 "standard"},
3872 {0x0D,
3873 "Responding station does not support the specified authentication "
3874 "algorithm"},
3875 {0x0E,
3876 "Received an Authentication frame with authentication sequence "
3877 "transaction sequence number out of expected sequence"},
3878 {0x0F, "Authentication rejected because of challenge failure"},
3879 {0x10, "Authentication rejected due to timeout waiting for next "
3880 "frame in sequence"},
3881 {0x11, "Association denied because AP is unable to handle additional "
3882 "associated stations"},
3883 {0x12,
3884 "Association denied due to requesting station not supporting all "
3885 "of the datarates in the BSSBasicServiceSet Parameter"},
3886 {0x13,
3887 "Association denied due to requesting station not supporting "
3888 "short preamble operation"},
3889 {0x14,
3890 "Association denied due to requesting station not supporting "
3891 "PBCC encoding"},
3892 {0x15,
3893 "Association denied due to requesting station not supporting "
3894 "channel agility"},
3895 {0x19,
3896 "Association denied due to requesting station not supporting "
3897 "short slot operation"},
3898 {0x1A,
3899 "Association denied due to requesting station not supporting "
3900 "DSSS-OFDM operation"},
3901 {0x28, "Invalid Information Element"},
3902 {0x29, "Group Cipher is not valid"},
3903 {0x2A, "Pairwise Cipher is not valid"},
3904 {0x2B, "AKMP is not valid"},
3905 {0x2C, "Unsupported RSN IE version"},
3906 {0x2D, "Invalid RSN IE Capabilities"},
3907 {0x2E, "Cipher suite is rejected per security policy"},
3910 static const char *ipw_get_status_code(u16 status)
3912 int i;
3913 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3914 if (ipw_status_codes[i].status == (status & 0xff))
3915 return ipw_status_codes[i].reason;
3916 return "Unknown status value.";
3919 static void inline average_init(struct average *avg)
3921 memset(avg, 0, sizeof(*avg));
3924 #define DEPTH_RSSI 8
3925 #define DEPTH_NOISE 16
3926 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3928 return ((depth-1)*prev_avg + val)/depth;
3931 static void average_add(struct average *avg, s16 val)
3933 avg->sum -= avg->entries[avg->pos];
3934 avg->sum += val;
3935 avg->entries[avg->pos++] = val;
3936 if (unlikely(avg->pos == AVG_ENTRIES)) {
3937 avg->init = 1;
3938 avg->pos = 0;
3942 static s16 average_value(struct average *avg)
3944 if (!unlikely(avg->init)) {
3945 if (avg->pos)
3946 return avg->sum / avg->pos;
3947 return 0;
3950 return avg->sum / AVG_ENTRIES;
3953 static void ipw_reset_stats(struct ipw_priv *priv)
3955 u32 len = sizeof(u32);
3957 priv->quality = 0;
3959 average_init(&priv->average_missed_beacons);
3960 priv->exp_avg_rssi = -60;
3961 priv->exp_avg_noise = -85 + 0x100;
3963 priv->last_rate = 0;
3964 priv->last_missed_beacons = 0;
3965 priv->last_rx_packets = 0;
3966 priv->last_tx_packets = 0;
3967 priv->last_tx_failures = 0;
3969 /* Firmware managed, reset only when NIC is restarted, so we have to
3970 * normalize on the current value */
3971 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3972 &priv->last_rx_err, &len);
3973 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3974 &priv->last_tx_failures, &len);
3976 /* Driver managed, reset with each association */
3977 priv->missed_adhoc_beacons = 0;
3978 priv->missed_beacons = 0;
3979 priv->tx_packets = 0;
3980 priv->rx_packets = 0;
3984 static u32 ipw_get_max_rate(struct ipw_priv *priv)
3986 u32 i = 0x80000000;
3987 u32 mask = priv->rates_mask;
3988 /* If currently associated in B mode, restrict the maximum
3989 * rate match to B rates */
3990 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3991 mask &= IEEE80211_CCK_RATES_MASK;
3993 /* TODO: Verify that the rate is supported by the current rates
3994 * list. */
3996 while (i && !(mask & i))
3997 i >>= 1;
3998 switch (i) {
3999 case IEEE80211_CCK_RATE_1MB_MASK:
4000 return 1000000;
4001 case IEEE80211_CCK_RATE_2MB_MASK:
4002 return 2000000;
4003 case IEEE80211_CCK_RATE_5MB_MASK:
4004 return 5500000;
4005 case IEEE80211_OFDM_RATE_6MB_MASK:
4006 return 6000000;
4007 case IEEE80211_OFDM_RATE_9MB_MASK:
4008 return 9000000;
4009 case IEEE80211_CCK_RATE_11MB_MASK:
4010 return 11000000;
4011 case IEEE80211_OFDM_RATE_12MB_MASK:
4012 return 12000000;
4013 case IEEE80211_OFDM_RATE_18MB_MASK:
4014 return 18000000;
4015 case IEEE80211_OFDM_RATE_24MB_MASK:
4016 return 24000000;
4017 case IEEE80211_OFDM_RATE_36MB_MASK:
4018 return 36000000;
4019 case IEEE80211_OFDM_RATE_48MB_MASK:
4020 return 48000000;
4021 case IEEE80211_OFDM_RATE_54MB_MASK:
4022 return 54000000;
4025 if (priv->ieee->mode == IEEE_B)
4026 return 11000000;
4027 else
4028 return 54000000;
4031 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4033 u32 rate, len = sizeof(rate);
4034 int err;
4036 if (!(priv->status & STATUS_ASSOCIATED))
4037 return 0;
4039 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4040 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4041 &len);
4042 if (err) {
4043 IPW_DEBUG_INFO("failed querying ordinals.\n");
4044 return 0;
4046 } else
4047 return ipw_get_max_rate(priv);
4049 switch (rate) {
4050 case IPW_TX_RATE_1MB:
4051 return 1000000;
4052 case IPW_TX_RATE_2MB:
4053 return 2000000;
4054 case IPW_TX_RATE_5MB:
4055 return 5500000;
4056 case IPW_TX_RATE_6MB:
4057 return 6000000;
4058 case IPW_TX_RATE_9MB:
4059 return 9000000;
4060 case IPW_TX_RATE_11MB:
4061 return 11000000;
4062 case IPW_TX_RATE_12MB:
4063 return 12000000;
4064 case IPW_TX_RATE_18MB:
4065 return 18000000;
4066 case IPW_TX_RATE_24MB:
4067 return 24000000;
4068 case IPW_TX_RATE_36MB:
4069 return 36000000;
4070 case IPW_TX_RATE_48MB:
4071 return 48000000;
4072 case IPW_TX_RATE_54MB:
4073 return 54000000;
4076 return 0;
4079 #define IPW_STATS_INTERVAL (2 * HZ)
4080 static void ipw_gather_stats(struct ipw_priv *priv)
4082 u32 rx_err, rx_err_delta, rx_packets_delta;
4083 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4084 u32 missed_beacons_percent, missed_beacons_delta;
4085 u32 quality = 0;
4086 u32 len = sizeof(u32);
4087 s16 rssi;
4088 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4089 rate_quality;
4090 u32 max_rate;
4092 if (!(priv->status & STATUS_ASSOCIATED)) {
4093 priv->quality = 0;
4094 return;
4097 /* Update the statistics */
4098 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4099 &priv->missed_beacons, &len);
4100 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4101 priv->last_missed_beacons = priv->missed_beacons;
4102 if (priv->assoc_request.beacon_interval) {
4103 missed_beacons_percent = missed_beacons_delta *
4104 (HZ * priv->assoc_request.beacon_interval) /
4105 (IPW_STATS_INTERVAL * 10);
4106 } else {
4107 missed_beacons_percent = 0;
4109 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4111 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4112 rx_err_delta = rx_err - priv->last_rx_err;
4113 priv->last_rx_err = rx_err;
4115 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4116 tx_failures_delta = tx_failures - priv->last_tx_failures;
4117 priv->last_tx_failures = tx_failures;
4119 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4120 priv->last_rx_packets = priv->rx_packets;
4122 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4123 priv->last_tx_packets = priv->tx_packets;
4125 /* Calculate quality based on the following:
4127 * Missed beacon: 100% = 0, 0% = 70% missed
4128 * Rate: 60% = 1Mbs, 100% = Max
4129 * Rx and Tx errors represent a straight % of total Rx/Tx
4130 * RSSI: 100% = > -50, 0% = < -80
4131 * Rx errors: 100% = 0, 0% = 50% missed
4133 * The lowest computed quality is used.
4136 #define BEACON_THRESHOLD 5
4137 beacon_quality = 100 - missed_beacons_percent;
4138 if (beacon_quality < BEACON_THRESHOLD)
4139 beacon_quality = 0;
4140 else
4141 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4142 (100 - BEACON_THRESHOLD);
4143 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4144 beacon_quality, missed_beacons_percent);
4146 priv->last_rate = ipw_get_current_rate(priv);
4147 max_rate = ipw_get_max_rate(priv);
4148 rate_quality = priv->last_rate * 40 / max_rate + 60;
4149 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4150 rate_quality, priv->last_rate / 1000000);
4152 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4153 rx_quality = 100 - (rx_err_delta * 100) /
4154 (rx_packets_delta + rx_err_delta);
4155 else
4156 rx_quality = 100;
4157 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4158 rx_quality, rx_err_delta, rx_packets_delta);
4160 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4161 tx_quality = 100 - (tx_failures_delta * 100) /
4162 (tx_packets_delta + tx_failures_delta);
4163 else
4164 tx_quality = 100;
4165 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4166 tx_quality, tx_failures_delta, tx_packets_delta);
4168 rssi = priv->exp_avg_rssi;
4169 signal_quality =
4170 (100 *
4171 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4172 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4173 (priv->ieee->perfect_rssi - rssi) *
4174 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4175 62 * (priv->ieee->perfect_rssi - rssi))) /
4176 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4177 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4178 if (signal_quality > 100)
4179 signal_quality = 100;
4180 else if (signal_quality < 1)
4181 signal_quality = 0;
4183 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4184 signal_quality, rssi);
4186 quality = min(beacon_quality,
4187 min(rate_quality,
4188 min(tx_quality, min(rx_quality, signal_quality))));
4189 if (quality == beacon_quality)
4190 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4191 quality);
4192 if (quality == rate_quality)
4193 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4194 quality);
4195 if (quality == tx_quality)
4196 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4197 quality);
4198 if (quality == rx_quality)
4199 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4200 quality);
4201 if (quality == signal_quality)
4202 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4203 quality);
4205 priv->quality = quality;
4207 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4208 IPW_STATS_INTERVAL);
4211 static void ipw_bg_gather_stats(void *data)
4213 struct ipw_priv *priv = data;
4214 mutex_lock(&priv->mutex);
4215 ipw_gather_stats(data);
4216 mutex_unlock(&priv->mutex);
4219 /* Missed beacon behavior:
4220 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4221 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4222 * Above disassociate threshold, give up and stop scanning.
4223 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4224 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4225 int missed_count)
4227 priv->notif_missed_beacons = missed_count;
4229 if (missed_count > priv->disassociate_threshold &&
4230 priv->status & STATUS_ASSOCIATED) {
4231 /* If associated and we've hit the missed
4232 * beacon threshold, disassociate, turn
4233 * off roaming, and abort any active scans */
4234 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4235 IPW_DL_STATE | IPW_DL_ASSOC,
4236 "Missed beacon: %d - disassociate\n", missed_count);
4237 priv->status &= ~STATUS_ROAMING;
4238 if (priv->status & STATUS_SCANNING) {
4239 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4240 IPW_DL_STATE,
4241 "Aborting scan with missed beacon.\n");
4242 queue_work(priv->workqueue, &priv->abort_scan);
4245 queue_work(priv->workqueue, &priv->disassociate);
4246 return;
4249 if (priv->status & STATUS_ROAMING) {
4250 /* If we are currently roaming, then just
4251 * print a debug statement... */
4252 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4253 "Missed beacon: %d - roam in progress\n",
4254 missed_count);
4255 return;
4258 if (roaming &&
4259 (missed_count > priv->roaming_threshold &&
4260 missed_count <= priv->disassociate_threshold)) {
4261 /* If we are not already roaming, set the ROAM
4262 * bit in the status and kick off a scan.
4263 * This can happen several times before we reach
4264 * disassociate_threshold. */
4265 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4266 "Missed beacon: %d - initiate "
4267 "roaming\n", missed_count);
4268 if (!(priv->status & STATUS_ROAMING)) {
4269 priv->status |= STATUS_ROAMING;
4270 if (!(priv->status & STATUS_SCANNING))
4271 queue_work(priv->workqueue,
4272 &priv->request_scan);
4274 return;
4277 if (priv->status & STATUS_SCANNING) {
4278 /* Stop scan to keep fw from getting
4279 * stuck (only if we aren't roaming --
4280 * otherwise we'll never scan more than 2 or 3
4281 * channels..) */
4282 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4283 "Aborting scan with missed beacon.\n");
4284 queue_work(priv->workqueue, &priv->abort_scan);
4287 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4291 * Handle host notification packet.
4292 * Called from interrupt routine
4294 static void ipw_rx_notification(struct ipw_priv *priv,
4295 struct ipw_rx_notification *notif)
4297 notif->size = le16_to_cpu(notif->size);
4299 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4301 switch (notif->subtype) {
4302 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4303 struct notif_association *assoc = &notif->u.assoc;
4305 switch (assoc->state) {
4306 case CMAS_ASSOCIATED:{
4307 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4308 IPW_DL_ASSOC,
4309 "associated: '%s' " MAC_FMT
4310 " \n",
4311 escape_essid(priv->essid,
4312 priv->essid_len),
4313 MAC_ARG(priv->bssid));
4315 switch (priv->ieee->iw_mode) {
4316 case IW_MODE_INFRA:
4317 memcpy(priv->ieee->bssid,
4318 priv->bssid, ETH_ALEN);
4319 break;
4321 case IW_MODE_ADHOC:
4322 memcpy(priv->ieee->bssid,
4323 priv->bssid, ETH_ALEN);
4325 /* clear out the station table */
4326 priv->num_stations = 0;
4328 IPW_DEBUG_ASSOC
4329 ("queueing adhoc check\n");
4330 queue_delayed_work(priv->
4331 workqueue,
4332 &priv->
4333 adhoc_check,
4334 priv->
4335 assoc_request.
4336 beacon_interval);
4337 break;
4340 priv->status &= ~STATUS_ASSOCIATING;
4341 priv->status |= STATUS_ASSOCIATED;
4342 queue_work(priv->workqueue,
4343 &priv->system_config);
4345 #ifdef CONFIG_IPW2200_QOS
4346 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4347 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4348 if ((priv->status & STATUS_AUTH) &&
4349 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4350 == IEEE80211_STYPE_ASSOC_RESP)) {
4351 if ((sizeof
4352 (struct
4353 ieee80211_assoc_response)
4354 <= notif->size)
4355 && (notif->size <= 2314)) {
4356 struct
4357 ieee80211_rx_stats
4358 stats = {
4359 .len =
4360 notif->
4361 size - 1,
4364 IPW_DEBUG_QOS
4365 ("QoS Associate "
4366 "size %d\n",
4367 notif->size);
4368 ieee80211_rx_mgt(priv->
4369 ieee,
4370 (struct
4371 ieee80211_hdr_4addr
4373 &notif->u.raw, &stats);
4376 #endif
4378 schedule_work(&priv->link_up);
4380 break;
4383 case CMAS_AUTHENTICATED:{
4384 if (priv->
4385 status & (STATUS_ASSOCIATED |
4386 STATUS_AUTH)) {
4387 struct notif_authenticate *auth
4388 = &notif->u.auth;
4389 IPW_DEBUG(IPW_DL_NOTIF |
4390 IPW_DL_STATE |
4391 IPW_DL_ASSOC,
4392 "deauthenticated: '%s' "
4393 MAC_FMT
4394 ": (0x%04X) - %s \n",
4395 escape_essid(priv->
4396 essid,
4397 priv->
4398 essid_len),
4399 MAC_ARG(priv->bssid),
4400 ntohs(auth->status),
4401 ipw_get_status_code
4402 (ntohs
4403 (auth->status)));
4405 priv->status &=
4406 ~(STATUS_ASSOCIATING |
4407 STATUS_AUTH |
4408 STATUS_ASSOCIATED);
4410 schedule_work(&priv->link_down);
4411 break;
4414 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4415 IPW_DL_ASSOC,
4416 "authenticated: '%s' " MAC_FMT
4417 "\n",
4418 escape_essid(priv->essid,
4419 priv->essid_len),
4420 MAC_ARG(priv->bssid));
4421 break;
4424 case CMAS_INIT:{
4425 if (priv->status & STATUS_AUTH) {
4426 struct
4427 ieee80211_assoc_response
4428 *resp;
4429 resp =
4430 (struct
4431 ieee80211_assoc_response
4432 *)&notif->u.raw;
4433 IPW_DEBUG(IPW_DL_NOTIF |
4434 IPW_DL_STATE |
4435 IPW_DL_ASSOC,
4436 "association failed (0x%04X): %s\n",
4437 ntohs(resp->status),
4438 ipw_get_status_code
4439 (ntohs
4440 (resp->status)));
4443 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4444 IPW_DL_ASSOC,
4445 "disassociated: '%s' " MAC_FMT
4446 " \n",
4447 escape_essid(priv->essid,
4448 priv->essid_len),
4449 MAC_ARG(priv->bssid));
4451 priv->status &=
4452 ~(STATUS_DISASSOCIATING |
4453 STATUS_ASSOCIATING |
4454 STATUS_ASSOCIATED | STATUS_AUTH);
4455 if (priv->assoc_network
4456 && (priv->assoc_network->
4457 capability &
4458 WLAN_CAPABILITY_IBSS))
4459 ipw_remove_current_network
4460 (priv);
4462 schedule_work(&priv->link_down);
4464 break;
4467 case CMAS_RX_ASSOC_RESP:
4468 break;
4470 default:
4471 IPW_ERROR("assoc: unknown (%d)\n",
4472 assoc->state);
4473 break;
4476 break;
4479 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4480 struct notif_authenticate *auth = &notif->u.auth;
4481 switch (auth->state) {
4482 case CMAS_AUTHENTICATED:
4483 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4484 "authenticated: '%s' " MAC_FMT " \n",
4485 escape_essid(priv->essid,
4486 priv->essid_len),
4487 MAC_ARG(priv->bssid));
4488 priv->status |= STATUS_AUTH;
4489 break;
4491 case CMAS_INIT:
4492 if (priv->status & STATUS_AUTH) {
4493 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4494 IPW_DL_ASSOC,
4495 "authentication failed (0x%04X): %s\n",
4496 ntohs(auth->status),
4497 ipw_get_status_code(ntohs
4498 (auth->
4499 status)));
4501 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4502 IPW_DL_ASSOC,
4503 "deauthenticated: '%s' " MAC_FMT "\n",
4504 escape_essid(priv->essid,
4505 priv->essid_len),
4506 MAC_ARG(priv->bssid));
4508 priv->status &= ~(STATUS_ASSOCIATING |
4509 STATUS_AUTH |
4510 STATUS_ASSOCIATED);
4512 schedule_work(&priv->link_down);
4513 break;
4515 case CMAS_TX_AUTH_SEQ_1:
4516 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4517 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4518 break;
4519 case CMAS_RX_AUTH_SEQ_2:
4520 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4521 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4522 break;
4523 case CMAS_AUTH_SEQ_1_PASS:
4524 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4525 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4526 break;
4527 case CMAS_AUTH_SEQ_1_FAIL:
4528 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4529 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4530 break;
4531 case CMAS_TX_AUTH_SEQ_3:
4532 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4533 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4534 break;
4535 case CMAS_RX_AUTH_SEQ_4:
4536 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4537 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4538 break;
4539 case CMAS_AUTH_SEQ_2_PASS:
4540 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4541 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4542 break;
4543 case CMAS_AUTH_SEQ_2_FAIL:
4544 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4545 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4546 break;
4547 case CMAS_TX_ASSOC:
4548 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4549 IPW_DL_ASSOC, "TX_ASSOC\n");
4550 break;
4551 case CMAS_RX_ASSOC_RESP:
4552 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4553 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4555 break;
4556 case CMAS_ASSOCIATED:
4557 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4558 IPW_DL_ASSOC, "ASSOCIATED\n");
4559 break;
4560 default:
4561 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4562 auth->state);
4563 break;
4565 break;
4568 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4569 struct notif_channel_result *x =
4570 &notif->u.channel_result;
4572 if (notif->size == sizeof(*x)) {
4573 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4574 x->channel_num);
4575 } else {
4576 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4577 "(should be %zd)\n",
4578 notif->size, sizeof(*x));
4580 break;
4583 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4584 struct notif_scan_complete *x = &notif->u.scan_complete;
4585 if (notif->size == sizeof(*x)) {
4586 IPW_DEBUG_SCAN
4587 ("Scan completed: type %d, %d channels, "
4588 "%d status\n", x->scan_type,
4589 x->num_channels, x->status);
4590 } else {
4591 IPW_ERROR("Scan completed of wrong size %d "
4592 "(should be %zd)\n",
4593 notif->size, sizeof(*x));
4596 priv->status &=
4597 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4599 wake_up_interruptible(&priv->wait_state);
4600 cancel_delayed_work(&priv->scan_check);
4602 if (priv->status & STATUS_EXIT_PENDING)
4603 break;
4605 priv->ieee->scans++;
4607 #ifdef CONFIG_IPW2200_MONITOR
4608 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4609 priv->status |= STATUS_SCAN_FORCED;
4610 queue_work(priv->workqueue,
4611 &priv->request_scan);
4612 break;
4614 priv->status &= ~STATUS_SCAN_FORCED;
4615 #endif /* CONFIG_IPW2200_MONITOR */
4617 if (!(priv->status & (STATUS_ASSOCIATED |
4618 STATUS_ASSOCIATING |
4619 STATUS_ROAMING |
4620 STATUS_DISASSOCIATING)))
4621 queue_work(priv->workqueue, &priv->associate);
4622 else if (priv->status & STATUS_ROAMING) {
4623 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4624 /* If a scan completed and we are in roam mode, then
4625 * the scan that completed was the one requested as a
4626 * result of entering roam... so, schedule the
4627 * roam work */
4628 queue_work(priv->workqueue,
4629 &priv->roam);
4630 else
4631 /* Don't schedule if we aborted the scan */
4632 priv->status &= ~STATUS_ROAMING;
4633 } else if (priv->status & STATUS_SCAN_PENDING)
4634 queue_work(priv->workqueue,
4635 &priv->request_scan);
4636 else if (priv->config & CFG_BACKGROUND_SCAN
4637 && priv->status & STATUS_ASSOCIATED)
4638 queue_delayed_work(priv->workqueue,
4639 &priv->request_scan, HZ);
4641 /* Send an empty event to user space.
4642 * We don't send the received data on the event because
4643 * it would require us to do complex transcoding, and
4644 * we want to minimise the work done in the irq handler
4645 * Use a request to extract the data.
4646 * Also, we generate this even for any scan, regardless
4647 * on how the scan was initiated. User space can just
4648 * sync on periodic scan to get fresh data...
4649 * Jean II */
4650 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) {
4651 union iwreq_data wrqu;
4653 wrqu.data.length = 0;
4654 wrqu.data.flags = 0;
4655 wireless_send_event(priv->net_dev, SIOCGIWSCAN,
4656 &wrqu, NULL);
4658 break;
4661 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4662 struct notif_frag_length *x = &notif->u.frag_len;
4664 if (notif->size == sizeof(*x))
4665 IPW_ERROR("Frag length: %d\n",
4666 le16_to_cpu(x->frag_length));
4667 else
4668 IPW_ERROR("Frag length of wrong size %d "
4669 "(should be %zd)\n",
4670 notif->size, sizeof(*x));
4671 break;
4674 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4675 struct notif_link_deterioration *x =
4676 &notif->u.link_deterioration;
4678 if (notif->size == sizeof(*x)) {
4679 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4680 "link deterioration: type %d, cnt %d\n",
4681 x->silence_notification_type,
4682 x->silence_count);
4683 memcpy(&priv->last_link_deterioration, x,
4684 sizeof(*x));
4685 } else {
4686 IPW_ERROR("Link Deterioration of wrong size %d "
4687 "(should be %zd)\n",
4688 notif->size, sizeof(*x));
4690 break;
4693 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4694 IPW_ERROR("Dino config\n");
4695 if (priv->hcmd
4696 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4697 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4699 break;
4702 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4703 struct notif_beacon_state *x = &notif->u.beacon_state;
4704 if (notif->size != sizeof(*x)) {
4705 IPW_ERROR
4706 ("Beacon state of wrong size %d (should "
4707 "be %zd)\n", notif->size, sizeof(*x));
4708 break;
4711 if (le32_to_cpu(x->state) ==
4712 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4713 ipw_handle_missed_beacon(priv,
4714 le32_to_cpu(x->
4715 number));
4717 break;
4720 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4721 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4722 if (notif->size == sizeof(*x)) {
4723 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4724 "0x%02x station %d\n",
4725 x->key_state, x->security_type,
4726 x->station_index);
4727 break;
4730 IPW_ERROR
4731 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4732 notif->size, sizeof(*x));
4733 break;
4736 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4737 struct notif_calibration *x = &notif->u.calibration;
4739 if (notif->size == sizeof(*x)) {
4740 memcpy(&priv->calib, x, sizeof(*x));
4741 IPW_DEBUG_INFO("TODO: Calibration\n");
4742 break;
4745 IPW_ERROR
4746 ("Calibration of wrong size %d (should be %zd)\n",
4747 notif->size, sizeof(*x));
4748 break;
4751 case HOST_NOTIFICATION_NOISE_STATS:{
4752 if (notif->size == sizeof(u32)) {
4753 priv->exp_avg_noise =
4754 exponential_average(priv->exp_avg_noise,
4755 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4756 DEPTH_NOISE);
4757 break;
4760 IPW_ERROR
4761 ("Noise stat is wrong size %d (should be %zd)\n",
4762 notif->size, sizeof(u32));
4763 break;
4766 default:
4767 IPW_DEBUG_NOTIF("Unknown notification: "
4768 "subtype=%d,flags=0x%2x,size=%d\n",
4769 notif->subtype, notif->flags, notif->size);
4774 * Destroys all DMA structures and initialise them again
4776 * @param priv
4777 * @return error code
4779 static int ipw_queue_reset(struct ipw_priv *priv)
4781 int rc = 0;
4782 /** @todo customize queue sizes */
4783 int nTx = 64, nTxCmd = 8;
4784 ipw_tx_queue_free(priv);
4785 /* Tx CMD queue */
4786 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4787 IPW_TX_CMD_QUEUE_READ_INDEX,
4788 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4789 IPW_TX_CMD_QUEUE_BD_BASE,
4790 IPW_TX_CMD_QUEUE_BD_SIZE);
4791 if (rc) {
4792 IPW_ERROR("Tx Cmd queue init failed\n");
4793 goto error;
4795 /* Tx queue(s) */
4796 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4797 IPW_TX_QUEUE_0_READ_INDEX,
4798 IPW_TX_QUEUE_0_WRITE_INDEX,
4799 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4800 if (rc) {
4801 IPW_ERROR("Tx 0 queue init failed\n");
4802 goto error;
4804 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4805 IPW_TX_QUEUE_1_READ_INDEX,
4806 IPW_TX_QUEUE_1_WRITE_INDEX,
4807 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4808 if (rc) {
4809 IPW_ERROR("Tx 1 queue init failed\n");
4810 goto error;
4812 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4813 IPW_TX_QUEUE_2_READ_INDEX,
4814 IPW_TX_QUEUE_2_WRITE_INDEX,
4815 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4816 if (rc) {
4817 IPW_ERROR("Tx 2 queue init failed\n");
4818 goto error;
4820 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4821 IPW_TX_QUEUE_3_READ_INDEX,
4822 IPW_TX_QUEUE_3_WRITE_INDEX,
4823 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4824 if (rc) {
4825 IPW_ERROR("Tx 3 queue init failed\n");
4826 goto error;
4828 /* statistics */
4829 priv->rx_bufs_min = 0;
4830 priv->rx_pend_max = 0;
4831 return rc;
4833 error:
4834 ipw_tx_queue_free(priv);
4835 return rc;
4839 * Reclaim Tx queue entries no more used by NIC.
4841 * When FW adwances 'R' index, all entries between old and
4842 * new 'R' index need to be reclaimed. As result, some free space
4843 * forms. If there is enough free space (> low mark), wake Tx queue.
4845 * @note Need to protect against garbage in 'R' index
4846 * @param priv
4847 * @param txq
4848 * @param qindex
4849 * @return Number of used entries remains in the queue
4851 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4852 struct clx2_tx_queue *txq, int qindex)
4854 u32 hw_tail;
4855 int used;
4856 struct clx2_queue *q = &txq->q;
4858 hw_tail = ipw_read32(priv, q->reg_r);
4859 if (hw_tail >= q->n_bd) {
4860 IPW_ERROR
4861 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4862 hw_tail, q->n_bd);
4863 goto done;
4865 for (; q->last_used != hw_tail;
4866 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4867 ipw_queue_tx_free_tfd(priv, txq);
4868 priv->tx_packets++;
4870 done:
4871 if ((ipw_queue_space(q) > q->low_mark) &&
4872 (qindex >= 0) &&
4873 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4874 netif_wake_queue(priv->net_dev);
4875 used = q->first_empty - q->last_used;
4876 if (used < 0)
4877 used += q->n_bd;
4879 return used;
4882 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4883 int len, int sync)
4885 struct clx2_tx_queue *txq = &priv->txq_cmd;
4886 struct clx2_queue *q = &txq->q;
4887 struct tfd_frame *tfd;
4889 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4890 IPW_ERROR("No space for Tx\n");
4891 return -EBUSY;
4894 tfd = &txq->bd[q->first_empty];
4895 txq->txb[q->first_empty] = NULL;
4897 memset(tfd, 0, sizeof(*tfd));
4898 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4899 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4900 priv->hcmd_seq++;
4901 tfd->u.cmd.index = hcmd;
4902 tfd->u.cmd.length = len;
4903 memcpy(tfd->u.cmd.payload, buf, len);
4904 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4905 ipw_write32(priv, q->reg_w, q->first_empty);
4906 _ipw_read32(priv, 0x90);
4908 return 0;
4912 * Rx theory of operation
4914 * The host allocates 32 DMA target addresses and passes the host address
4915 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4916 * 0 to 31
4918 * Rx Queue Indexes
4919 * The host/firmware share two index registers for managing the Rx buffers.
4921 * The READ index maps to the first position that the firmware may be writing
4922 * to -- the driver can read up to (but not including) this position and get
4923 * good data.
4924 * The READ index is managed by the firmware once the card is enabled.
4926 * The WRITE index maps to the last position the driver has read from -- the
4927 * position preceding WRITE is the last slot the firmware can place a packet.
4929 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4930 * WRITE = READ.
4932 * During initialization the host sets up the READ queue position to the first
4933 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4935 * When the firmware places a packet in a buffer it will advance the READ index
4936 * and fire the RX interrupt. The driver can then query the READ index and
4937 * process as many packets as possible, moving the WRITE index forward as it
4938 * resets the Rx queue buffers with new memory.
4940 * The management in the driver is as follows:
4941 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
4942 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4943 * to replensish the ipw->rxq->rx_free.
4944 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4945 * ipw->rxq is replenished and the READ INDEX is updated (updating the
4946 * 'processed' and 'read' driver indexes as well)
4947 * + A received packet is processed and handed to the kernel network stack,
4948 * detached from the ipw->rxq. The driver 'processed' index is updated.
4949 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4950 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4951 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
4952 * were enough free buffers and RX_STALLED is set it is cleared.
4955 * Driver sequence:
4957 * ipw_rx_queue_alloc() Allocates rx_free
4958 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
4959 * ipw_rx_queue_restock
4960 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
4961 * queue, updates firmware pointers, and updates
4962 * the WRITE index. If insufficient rx_free buffers
4963 * are available, schedules ipw_rx_queue_replenish
4965 * -- enable interrupts --
4966 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
4967 * READ INDEX, detaching the SKB from the pool.
4968 * Moves the packet buffer from queue to rx_used.
4969 * Calls ipw_rx_queue_restock to refill any empty
4970 * slots.
4971 * ...
4976 * If there are slots in the RX queue that need to be restocked,
4977 * and we have free pre-allocated buffers, fill the ranks as much
4978 * as we can pulling from rx_free.
4980 * This moves the 'write' index forward to catch up with 'processed', and
4981 * also updates the memory address in the firmware to reference the new
4982 * target buffer.
4984 static void ipw_rx_queue_restock(struct ipw_priv *priv)
4986 struct ipw_rx_queue *rxq = priv->rxq;
4987 struct list_head *element;
4988 struct ipw_rx_mem_buffer *rxb;
4989 unsigned long flags;
4990 int write;
4992 spin_lock_irqsave(&rxq->lock, flags);
4993 write = rxq->write;
4994 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
4995 element = rxq->rx_free.next;
4996 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4997 list_del(element);
4999 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5000 rxb->dma_addr);
5001 rxq->queue[rxq->write] = rxb;
5002 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5003 rxq->free_count--;
5005 spin_unlock_irqrestore(&rxq->lock, flags);
5007 /* If the pre-allocated buffer pool is dropping low, schedule to
5008 * refill it */
5009 if (rxq->free_count <= RX_LOW_WATERMARK)
5010 queue_work(priv->workqueue, &priv->rx_replenish);
5012 /* If we've added more space for the firmware to place data, tell it */
5013 if (write != rxq->write)
5014 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5018 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5019 * Also restock the Rx queue via ipw_rx_queue_restock.
5021 * This is called as a scheduled work item (except for during intialization)
5023 static void ipw_rx_queue_replenish(void *data)
5025 struct ipw_priv *priv = data;
5026 struct ipw_rx_queue *rxq = priv->rxq;
5027 struct list_head *element;
5028 struct ipw_rx_mem_buffer *rxb;
5029 unsigned long flags;
5031 spin_lock_irqsave(&rxq->lock, flags);
5032 while (!list_empty(&rxq->rx_used)) {
5033 element = rxq->rx_used.next;
5034 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5035 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5036 if (!rxb->skb) {
5037 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5038 priv->net_dev->name);
5039 /* We don't reschedule replenish work here -- we will
5040 * call the restock method and if it still needs
5041 * more buffers it will schedule replenish */
5042 break;
5044 list_del(element);
5046 rxb->dma_addr =
5047 pci_map_single(priv->pci_dev, rxb->skb->data,
5048 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5050 list_add_tail(&rxb->list, &rxq->rx_free);
5051 rxq->free_count++;
5053 spin_unlock_irqrestore(&rxq->lock, flags);
5055 ipw_rx_queue_restock(priv);
5058 static void ipw_bg_rx_queue_replenish(void *data)
5060 struct ipw_priv *priv = data;
5061 mutex_lock(&priv->mutex);
5062 ipw_rx_queue_replenish(data);
5063 mutex_unlock(&priv->mutex);
5066 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5067 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5068 * This free routine walks the list of POOL entries and if SKB is set to
5069 * non NULL it is unmapped and freed
5071 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5073 int i;
5075 if (!rxq)
5076 return;
5078 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5079 if (rxq->pool[i].skb != NULL) {
5080 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5081 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5082 dev_kfree_skb(rxq->pool[i].skb);
5086 kfree(rxq);
5089 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5091 struct ipw_rx_queue *rxq;
5092 int i;
5094 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5095 if (unlikely(!rxq)) {
5096 IPW_ERROR("memory allocation failed\n");
5097 return NULL;
5099 spin_lock_init(&rxq->lock);
5100 INIT_LIST_HEAD(&rxq->rx_free);
5101 INIT_LIST_HEAD(&rxq->rx_used);
5103 /* Fill the rx_used queue with _all_ of the Rx buffers */
5104 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5105 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5107 /* Set us so that we have processed and used all buffers, but have
5108 * not restocked the Rx queue with fresh buffers */
5109 rxq->read = rxq->write = 0;
5110 rxq->processed = RX_QUEUE_SIZE - 1;
5111 rxq->free_count = 0;
5113 return rxq;
5116 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5118 rate &= ~IEEE80211_BASIC_RATE_MASK;
5119 if (ieee_mode == IEEE_A) {
5120 switch (rate) {
5121 case IEEE80211_OFDM_RATE_6MB:
5122 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5123 1 : 0;
5124 case IEEE80211_OFDM_RATE_9MB:
5125 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5126 1 : 0;
5127 case IEEE80211_OFDM_RATE_12MB:
5128 return priv->
5129 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5130 case IEEE80211_OFDM_RATE_18MB:
5131 return priv->
5132 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5133 case IEEE80211_OFDM_RATE_24MB:
5134 return priv->
5135 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5136 case IEEE80211_OFDM_RATE_36MB:
5137 return priv->
5138 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5139 case IEEE80211_OFDM_RATE_48MB:
5140 return priv->
5141 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5142 case IEEE80211_OFDM_RATE_54MB:
5143 return priv->
5144 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5145 default:
5146 return 0;
5150 /* B and G mixed */
5151 switch (rate) {
5152 case IEEE80211_CCK_RATE_1MB:
5153 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5154 case IEEE80211_CCK_RATE_2MB:
5155 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5156 case IEEE80211_CCK_RATE_5MB:
5157 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5158 case IEEE80211_CCK_RATE_11MB:
5159 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5162 /* If we are limited to B modulations, bail at this point */
5163 if (ieee_mode == IEEE_B)
5164 return 0;
5166 /* G */
5167 switch (rate) {
5168 case IEEE80211_OFDM_RATE_6MB:
5169 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5170 case IEEE80211_OFDM_RATE_9MB:
5171 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5172 case IEEE80211_OFDM_RATE_12MB:
5173 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5174 case IEEE80211_OFDM_RATE_18MB:
5175 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5176 case IEEE80211_OFDM_RATE_24MB:
5177 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5178 case IEEE80211_OFDM_RATE_36MB:
5179 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5180 case IEEE80211_OFDM_RATE_48MB:
5181 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5182 case IEEE80211_OFDM_RATE_54MB:
5183 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5186 return 0;
5189 static int ipw_compatible_rates(struct ipw_priv *priv,
5190 const struct ieee80211_network *network,
5191 struct ipw_supported_rates *rates)
5193 int num_rates, i;
5195 memset(rates, 0, sizeof(*rates));
5196 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5197 rates->num_rates = 0;
5198 for (i = 0; i < num_rates; i++) {
5199 if (!ipw_is_rate_in_mask(priv, network->mode,
5200 network->rates[i])) {
5202 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5203 IPW_DEBUG_SCAN("Adding masked mandatory "
5204 "rate %02X\n",
5205 network->rates[i]);
5206 rates->supported_rates[rates->num_rates++] =
5207 network->rates[i];
5208 continue;
5211 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5212 network->rates[i], priv->rates_mask);
5213 continue;
5216 rates->supported_rates[rates->num_rates++] = network->rates[i];
5219 num_rates = min(network->rates_ex_len,
5220 (u8) (IPW_MAX_RATES - num_rates));
5221 for (i = 0; i < num_rates; i++) {
5222 if (!ipw_is_rate_in_mask(priv, network->mode,
5223 network->rates_ex[i])) {
5224 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5225 IPW_DEBUG_SCAN("Adding masked mandatory "
5226 "rate %02X\n",
5227 network->rates_ex[i]);
5228 rates->supported_rates[rates->num_rates++] =
5229 network->rates[i];
5230 continue;
5233 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5234 network->rates_ex[i], priv->rates_mask);
5235 continue;
5238 rates->supported_rates[rates->num_rates++] =
5239 network->rates_ex[i];
5242 return 1;
5245 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5246 const struct ipw_supported_rates *src)
5248 u8 i;
5249 for (i = 0; i < src->num_rates; i++)
5250 dest->supported_rates[i] = src->supported_rates[i];
5251 dest->num_rates = src->num_rates;
5254 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5255 * mask should ever be used -- right now all callers to add the scan rates are
5256 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5257 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5258 u8 modulation, u32 rate_mask)
5260 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5261 IEEE80211_BASIC_RATE_MASK : 0;
5263 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5264 rates->supported_rates[rates->num_rates++] =
5265 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5267 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5268 rates->supported_rates[rates->num_rates++] =
5269 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5271 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5272 rates->supported_rates[rates->num_rates++] = basic_mask |
5273 IEEE80211_CCK_RATE_5MB;
5275 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5276 rates->supported_rates[rates->num_rates++] = basic_mask |
5277 IEEE80211_CCK_RATE_11MB;
5280 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5281 u8 modulation, u32 rate_mask)
5283 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5284 IEEE80211_BASIC_RATE_MASK : 0;
5286 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5287 rates->supported_rates[rates->num_rates++] = basic_mask |
5288 IEEE80211_OFDM_RATE_6MB;
5290 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5291 rates->supported_rates[rates->num_rates++] =
5292 IEEE80211_OFDM_RATE_9MB;
5294 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5295 rates->supported_rates[rates->num_rates++] = basic_mask |
5296 IEEE80211_OFDM_RATE_12MB;
5298 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5299 rates->supported_rates[rates->num_rates++] =
5300 IEEE80211_OFDM_RATE_18MB;
5302 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5303 rates->supported_rates[rates->num_rates++] = basic_mask |
5304 IEEE80211_OFDM_RATE_24MB;
5306 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5307 rates->supported_rates[rates->num_rates++] =
5308 IEEE80211_OFDM_RATE_36MB;
5310 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5311 rates->supported_rates[rates->num_rates++] =
5312 IEEE80211_OFDM_RATE_48MB;
5314 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5315 rates->supported_rates[rates->num_rates++] =
5316 IEEE80211_OFDM_RATE_54MB;
5319 struct ipw_network_match {
5320 struct ieee80211_network *network;
5321 struct ipw_supported_rates rates;
5324 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5325 struct ipw_network_match *match,
5326 struct ieee80211_network *network,
5327 int roaming)
5329 struct ipw_supported_rates rates;
5331 /* Verify that this network's capability is compatible with the
5332 * current mode (AdHoc or Infrastructure) */
5333 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5334 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5335 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5336 "capability mismatch.\n",
5337 escape_essid(network->ssid, network->ssid_len),
5338 MAC_ARG(network->bssid));
5339 return 0;
5342 /* If we do not have an ESSID for this AP, we can not associate with
5343 * it */
5344 if (network->flags & NETWORK_EMPTY_ESSID) {
5345 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5346 "because of hidden ESSID.\n",
5347 escape_essid(network->ssid, network->ssid_len),
5348 MAC_ARG(network->bssid));
5349 return 0;
5352 if (unlikely(roaming)) {
5353 /* If we are roaming, then ensure check if this is a valid
5354 * network to try and roam to */
5355 if ((network->ssid_len != match->network->ssid_len) ||
5356 memcmp(network->ssid, match->network->ssid,
5357 network->ssid_len)) {
5358 IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5359 "because of non-network ESSID.\n",
5360 escape_essid(network->ssid,
5361 network->ssid_len),
5362 MAC_ARG(network->bssid));
5363 return 0;
5365 } else {
5366 /* If an ESSID has been configured then compare the broadcast
5367 * ESSID to ours */
5368 if ((priv->config & CFG_STATIC_ESSID) &&
5369 ((network->ssid_len != priv->essid_len) ||
5370 memcmp(network->ssid, priv->essid,
5371 min(network->ssid_len, priv->essid_len)))) {
5372 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5374 strncpy(escaped,
5375 escape_essid(network->ssid, network->ssid_len),
5376 sizeof(escaped));
5377 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5378 "because of ESSID mismatch: '%s'.\n",
5379 escaped, MAC_ARG(network->bssid),
5380 escape_essid(priv->essid,
5381 priv->essid_len));
5382 return 0;
5386 /* If the old network rate is better than this one, don't bother
5387 * testing everything else. */
5389 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5390 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5391 "current network.\n",
5392 escape_essid(match->network->ssid,
5393 match->network->ssid_len));
5394 return 0;
5395 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5396 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5397 "current network.\n",
5398 escape_essid(match->network->ssid,
5399 match->network->ssid_len));
5400 return 0;
5403 /* Now go through and see if the requested network is valid... */
5404 if (priv->ieee->scan_age != 0 &&
5405 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5406 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5407 "because of age: %ums.\n",
5408 escape_essid(network->ssid, network->ssid_len),
5409 MAC_ARG(network->bssid),
5410 jiffies_to_msecs(jiffies -
5411 network->last_scanned));
5412 return 0;
5415 if ((priv->config & CFG_STATIC_CHANNEL) &&
5416 (network->channel != priv->channel)) {
5417 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5418 "because of channel mismatch: %d != %d.\n",
5419 escape_essid(network->ssid, network->ssid_len),
5420 MAC_ARG(network->bssid),
5421 network->channel, priv->channel);
5422 return 0;
5425 /* Verify privacy compatability */
5426 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5427 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5428 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5429 "because of privacy mismatch: %s != %s.\n",
5430 escape_essid(network->ssid, network->ssid_len),
5431 MAC_ARG(network->bssid),
5432 priv->
5433 capability & CAP_PRIVACY_ON ? "on" : "off",
5434 network->
5435 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5436 "off");
5437 return 0;
5440 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5441 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5442 "because of the same BSSID match: " MAC_FMT
5443 ".\n", escape_essid(network->ssid,
5444 network->ssid_len),
5445 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5446 return 0;
5449 /* Filter out any incompatible freq / mode combinations */
5450 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5451 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5452 "because of invalid frequency/mode "
5453 "combination.\n",
5454 escape_essid(network->ssid, network->ssid_len),
5455 MAC_ARG(network->bssid));
5456 return 0;
5459 /* Ensure that the rates supported by the driver are compatible with
5460 * this AP, including verification of basic rates (mandatory) */
5461 if (!ipw_compatible_rates(priv, network, &rates)) {
5462 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5463 "because configured rate mask excludes "
5464 "AP mandatory rate.\n",
5465 escape_essid(network->ssid, network->ssid_len),
5466 MAC_ARG(network->bssid));
5467 return 0;
5470 if (rates.num_rates == 0) {
5471 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5472 "because of no compatible rates.\n",
5473 escape_essid(network->ssid, network->ssid_len),
5474 MAC_ARG(network->bssid));
5475 return 0;
5478 /* TODO: Perform any further minimal comparititive tests. We do not
5479 * want to put too much policy logic here; intelligent scan selection
5480 * should occur within a generic IEEE 802.11 user space tool. */
5482 /* Set up 'new' AP to this network */
5483 ipw_copy_rates(&match->rates, &rates);
5484 match->network = network;
5485 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5486 escape_essid(network->ssid, network->ssid_len),
5487 MAC_ARG(network->bssid));
5489 return 1;
5492 static void ipw_merge_adhoc_network(void *data)
5494 struct ipw_priv *priv = data;
5495 struct ieee80211_network *network = NULL;
5496 struct ipw_network_match match = {
5497 .network = priv->assoc_network
5500 if ((priv->status & STATUS_ASSOCIATED) &&
5501 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5502 /* First pass through ROAM process -- look for a better
5503 * network */
5504 unsigned long flags;
5506 spin_lock_irqsave(&priv->ieee->lock, flags);
5507 list_for_each_entry(network, &priv->ieee->network_list, list) {
5508 if (network != priv->assoc_network)
5509 ipw_find_adhoc_network(priv, &match, network,
5512 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5514 if (match.network == priv->assoc_network) {
5515 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5516 "merge to.\n");
5517 return;
5520 mutex_lock(&priv->mutex);
5521 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5522 IPW_DEBUG_MERGE("remove network %s\n",
5523 escape_essid(priv->essid,
5524 priv->essid_len));
5525 ipw_remove_current_network(priv);
5528 ipw_disassociate(priv);
5529 priv->assoc_network = match.network;
5530 mutex_unlock(&priv->mutex);
5531 return;
5535 static int ipw_best_network(struct ipw_priv *priv,
5536 struct ipw_network_match *match,
5537 struct ieee80211_network *network, int roaming)
5539 struct ipw_supported_rates rates;
5541 /* Verify that this network's capability is compatible with the
5542 * current mode (AdHoc or Infrastructure) */
5543 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5544 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5545 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5546 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5547 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5548 "capability mismatch.\n",
5549 escape_essid(network->ssid, network->ssid_len),
5550 MAC_ARG(network->bssid));
5551 return 0;
5554 /* If we do not have an ESSID for this AP, we can not associate with
5555 * it */
5556 if (network->flags & NETWORK_EMPTY_ESSID) {
5557 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5558 "because of hidden ESSID.\n",
5559 escape_essid(network->ssid, network->ssid_len),
5560 MAC_ARG(network->bssid));
5561 return 0;
5564 if (unlikely(roaming)) {
5565 /* If we are roaming, then ensure check if this is a valid
5566 * network to try and roam to */
5567 if ((network->ssid_len != match->network->ssid_len) ||
5568 memcmp(network->ssid, match->network->ssid,
5569 network->ssid_len)) {
5570 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5571 "because of non-network ESSID.\n",
5572 escape_essid(network->ssid,
5573 network->ssid_len),
5574 MAC_ARG(network->bssid));
5575 return 0;
5577 } else {
5578 /* If an ESSID has been configured then compare the broadcast
5579 * ESSID to ours */
5580 if ((priv->config & CFG_STATIC_ESSID) &&
5581 ((network->ssid_len != priv->essid_len) ||
5582 memcmp(network->ssid, priv->essid,
5583 min(network->ssid_len, priv->essid_len)))) {
5584 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5585 strncpy(escaped,
5586 escape_essid(network->ssid, network->ssid_len),
5587 sizeof(escaped));
5588 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5589 "because of ESSID mismatch: '%s'.\n",
5590 escaped, MAC_ARG(network->bssid),
5591 escape_essid(priv->essid,
5592 priv->essid_len));
5593 return 0;
5597 /* If the old network rate is better than this one, don't bother
5598 * testing everything else. */
5599 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5600 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5601 strncpy(escaped,
5602 escape_essid(network->ssid, network->ssid_len),
5603 sizeof(escaped));
5604 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5605 "'%s (" MAC_FMT ")' has a stronger signal.\n",
5606 escaped, MAC_ARG(network->bssid),
5607 escape_essid(match->network->ssid,
5608 match->network->ssid_len),
5609 MAC_ARG(match->network->bssid));
5610 return 0;
5613 /* If this network has already had an association attempt within the
5614 * last 3 seconds, do not try and associate again... */
5615 if (network->last_associate &&
5616 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5617 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5618 "because of storming (%ums since last "
5619 "assoc attempt).\n",
5620 escape_essid(network->ssid, network->ssid_len),
5621 MAC_ARG(network->bssid),
5622 jiffies_to_msecs(jiffies -
5623 network->last_associate));
5624 return 0;
5627 /* Now go through and see if the requested network is valid... */
5628 if (priv->ieee->scan_age != 0 &&
5629 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5630 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5631 "because of age: %ums.\n",
5632 escape_essid(network->ssid, network->ssid_len),
5633 MAC_ARG(network->bssid),
5634 jiffies_to_msecs(jiffies -
5635 network->last_scanned));
5636 return 0;
5639 if ((priv->config & CFG_STATIC_CHANNEL) &&
5640 (network->channel != priv->channel)) {
5641 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5642 "because of channel mismatch: %d != %d.\n",
5643 escape_essid(network->ssid, network->ssid_len),
5644 MAC_ARG(network->bssid),
5645 network->channel, priv->channel);
5646 return 0;
5649 /* Verify privacy compatability */
5650 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5651 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5652 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5653 "because of privacy mismatch: %s != %s.\n",
5654 escape_essid(network->ssid, network->ssid_len),
5655 MAC_ARG(network->bssid),
5656 priv->capability & CAP_PRIVACY_ON ? "on" :
5657 "off",
5658 network->capability &
5659 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5660 return 0;
5663 if ((priv->config & CFG_STATIC_BSSID) &&
5664 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5665 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5666 "because of BSSID mismatch: " MAC_FMT ".\n",
5667 escape_essid(network->ssid, network->ssid_len),
5668 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5669 return 0;
5672 /* Filter out any incompatible freq / mode combinations */
5673 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5674 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5675 "because of invalid frequency/mode "
5676 "combination.\n",
5677 escape_essid(network->ssid, network->ssid_len),
5678 MAC_ARG(network->bssid));
5679 return 0;
5682 /* Filter out invalid channel in current GEO */
5683 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5684 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5685 "because of invalid channel in current GEO\n",
5686 escape_essid(network->ssid, network->ssid_len),
5687 MAC_ARG(network->bssid));
5688 return 0;
5691 /* Ensure that the rates supported by the driver are compatible with
5692 * this AP, including verification of basic rates (mandatory) */
5693 if (!ipw_compatible_rates(priv, network, &rates)) {
5694 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5695 "because configured rate mask excludes "
5696 "AP mandatory rate.\n",
5697 escape_essid(network->ssid, network->ssid_len),
5698 MAC_ARG(network->bssid));
5699 return 0;
5702 if (rates.num_rates == 0) {
5703 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5704 "because of no compatible rates.\n",
5705 escape_essid(network->ssid, network->ssid_len),
5706 MAC_ARG(network->bssid));
5707 return 0;
5710 /* TODO: Perform any further minimal comparititive tests. We do not
5711 * want to put too much policy logic here; intelligent scan selection
5712 * should occur within a generic IEEE 802.11 user space tool. */
5714 /* Set up 'new' AP to this network */
5715 ipw_copy_rates(&match->rates, &rates);
5716 match->network = network;
5718 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5719 escape_essid(network->ssid, network->ssid_len),
5720 MAC_ARG(network->bssid));
5722 return 1;
5725 static void ipw_adhoc_create(struct ipw_priv *priv,
5726 struct ieee80211_network *network)
5728 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5729 int i;
5732 * For the purposes of scanning, we can set our wireless mode
5733 * to trigger scans across combinations of bands, but when it
5734 * comes to creating a new ad-hoc network, we have tell the FW
5735 * exactly which band to use.
5737 * We also have the possibility of an invalid channel for the
5738 * chossen band. Attempting to create a new ad-hoc network
5739 * with an invalid channel for wireless mode will trigger a
5740 * FW fatal error.
5743 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5744 case IEEE80211_52GHZ_BAND:
5745 network->mode = IEEE_A;
5746 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5747 BUG_ON(i == -1);
5748 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5749 IPW_WARNING("Overriding invalid channel\n");
5750 priv->channel = geo->a[0].channel;
5752 break;
5754 case IEEE80211_24GHZ_BAND:
5755 if (priv->ieee->mode & IEEE_G)
5756 network->mode = IEEE_G;
5757 else
5758 network->mode = IEEE_B;
5759 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5760 BUG_ON(i == -1);
5761 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5762 IPW_WARNING("Overriding invalid channel\n");
5763 priv->channel = geo->bg[0].channel;
5765 break;
5767 default:
5768 IPW_WARNING("Overriding invalid channel\n");
5769 if (priv->ieee->mode & IEEE_A) {
5770 network->mode = IEEE_A;
5771 priv->channel = geo->a[0].channel;
5772 } else if (priv->ieee->mode & IEEE_G) {
5773 network->mode = IEEE_G;
5774 priv->channel = geo->bg[0].channel;
5775 } else {
5776 network->mode = IEEE_B;
5777 priv->channel = geo->bg[0].channel;
5779 break;
5782 network->channel = priv->channel;
5783 priv->config |= CFG_ADHOC_PERSIST;
5784 ipw_create_bssid(priv, network->bssid);
5785 network->ssid_len = priv->essid_len;
5786 memcpy(network->ssid, priv->essid, priv->essid_len);
5787 memset(&network->stats, 0, sizeof(network->stats));
5788 network->capability = WLAN_CAPABILITY_IBSS;
5789 if (!(priv->config & CFG_PREAMBLE_LONG))
5790 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5791 if (priv->capability & CAP_PRIVACY_ON)
5792 network->capability |= WLAN_CAPABILITY_PRIVACY;
5793 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5794 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5795 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5796 memcpy(network->rates_ex,
5797 &priv->rates.supported_rates[network->rates_len],
5798 network->rates_ex_len);
5799 network->last_scanned = 0;
5800 network->flags = 0;
5801 network->last_associate = 0;
5802 network->time_stamp[0] = 0;
5803 network->time_stamp[1] = 0;
5804 network->beacon_interval = 100; /* Default */
5805 network->listen_interval = 10; /* Default */
5806 network->atim_window = 0; /* Default */
5807 network->wpa_ie_len = 0;
5808 network->rsn_ie_len = 0;
5811 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5813 struct ipw_tgi_tx_key key;
5815 if (!(priv->ieee->sec.flags & (1 << index)))
5816 return;
5818 key.key_id = index;
5819 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5820 key.security_type = type;
5821 key.station_index = 0; /* always 0 for BSS */
5822 key.flags = 0;
5823 /* 0 for new key; previous value of counter (after fatal error) */
5824 key.tx_counter[0] = cpu_to_le32(0);
5825 key.tx_counter[1] = cpu_to_le32(0);
5827 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5830 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5832 struct ipw_wep_key key;
5833 int i;
5835 key.cmd_id = DINO_CMD_WEP_KEY;
5836 key.seq_num = 0;
5838 /* Note: AES keys cannot be set for multiple times.
5839 * Only set it at the first time. */
5840 for (i = 0; i < 4; i++) {
5841 key.key_index = i | type;
5842 if (!(priv->ieee->sec.flags & (1 << i))) {
5843 key.key_size = 0;
5844 continue;
5847 key.key_size = priv->ieee->sec.key_sizes[i];
5848 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5850 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5854 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5856 if (priv->ieee->host_encrypt)
5857 return;
5859 switch (level) {
5860 case SEC_LEVEL_3:
5861 priv->sys_config.disable_unicast_decryption = 0;
5862 priv->ieee->host_decrypt = 0;
5863 break;
5864 case SEC_LEVEL_2:
5865 priv->sys_config.disable_unicast_decryption = 1;
5866 priv->ieee->host_decrypt = 1;
5867 break;
5868 case SEC_LEVEL_1:
5869 priv->sys_config.disable_unicast_decryption = 0;
5870 priv->ieee->host_decrypt = 0;
5871 break;
5872 case SEC_LEVEL_0:
5873 priv->sys_config.disable_unicast_decryption = 1;
5874 break;
5875 default:
5876 break;
5880 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5882 if (priv->ieee->host_encrypt)
5883 return;
5885 switch (level) {
5886 case SEC_LEVEL_3:
5887 priv->sys_config.disable_multicast_decryption = 0;
5888 break;
5889 case SEC_LEVEL_2:
5890 priv->sys_config.disable_multicast_decryption = 1;
5891 break;
5892 case SEC_LEVEL_1:
5893 priv->sys_config.disable_multicast_decryption = 0;
5894 break;
5895 case SEC_LEVEL_0:
5896 priv->sys_config.disable_multicast_decryption = 1;
5897 break;
5898 default:
5899 break;
5903 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5905 switch (priv->ieee->sec.level) {
5906 case SEC_LEVEL_3:
5907 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5908 ipw_send_tgi_tx_key(priv,
5909 DCT_FLAG_EXT_SECURITY_CCM,
5910 priv->ieee->sec.active_key);
5912 if (!priv->ieee->host_mc_decrypt)
5913 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5914 break;
5915 case SEC_LEVEL_2:
5916 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5917 ipw_send_tgi_tx_key(priv,
5918 DCT_FLAG_EXT_SECURITY_TKIP,
5919 priv->ieee->sec.active_key);
5920 break;
5921 case SEC_LEVEL_1:
5922 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5923 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5924 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5925 break;
5926 case SEC_LEVEL_0:
5927 default:
5928 break;
5932 static void ipw_adhoc_check(void *data)
5934 struct ipw_priv *priv = data;
5936 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
5937 !(priv->config & CFG_ADHOC_PERSIST)) {
5938 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
5939 IPW_DL_STATE | IPW_DL_ASSOC,
5940 "Missed beacon: %d - disassociate\n",
5941 priv->missed_adhoc_beacons);
5942 ipw_remove_current_network(priv);
5943 ipw_disassociate(priv);
5944 return;
5947 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
5948 priv->assoc_request.beacon_interval);
5951 static void ipw_bg_adhoc_check(void *data)
5953 struct ipw_priv *priv = data;
5954 mutex_lock(&priv->mutex);
5955 ipw_adhoc_check(data);
5956 mutex_unlock(&priv->mutex);
5959 static void ipw_debug_config(struct ipw_priv *priv)
5961 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
5962 "[CFG 0x%08X]\n", priv->config);
5963 if (priv->config & CFG_STATIC_CHANNEL)
5964 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
5965 else
5966 IPW_DEBUG_INFO("Channel unlocked.\n");
5967 if (priv->config & CFG_STATIC_ESSID)
5968 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
5969 escape_essid(priv->essid, priv->essid_len));
5970 else
5971 IPW_DEBUG_INFO("ESSID unlocked.\n");
5972 if (priv->config & CFG_STATIC_BSSID)
5973 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
5974 MAC_ARG(priv->bssid));
5975 else
5976 IPW_DEBUG_INFO("BSSID unlocked.\n");
5977 if (priv->capability & CAP_PRIVACY_ON)
5978 IPW_DEBUG_INFO("PRIVACY on\n");
5979 else
5980 IPW_DEBUG_INFO("PRIVACY off\n");
5981 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
5984 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
5986 /* TODO: Verify that this works... */
5987 struct ipw_fixed_rate fr = {
5988 .tx_rates = priv->rates_mask
5990 u32 reg;
5991 u16 mask = 0;
5993 /* Identify 'current FW band' and match it with the fixed
5994 * Tx rates */
5996 switch (priv->ieee->freq_band) {
5997 case IEEE80211_52GHZ_BAND: /* A only */
5998 /* IEEE_A */
5999 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6000 /* Invalid fixed rate mask */
6001 IPW_DEBUG_WX
6002 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6003 fr.tx_rates = 0;
6004 break;
6007 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6008 break;
6010 default: /* 2.4Ghz or Mixed */
6011 /* IEEE_B */
6012 if (mode == IEEE_B) {
6013 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6014 /* Invalid fixed rate mask */
6015 IPW_DEBUG_WX
6016 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6017 fr.tx_rates = 0;
6019 break;
6022 /* IEEE_G */
6023 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6024 IEEE80211_OFDM_RATES_MASK)) {
6025 /* Invalid fixed rate mask */
6026 IPW_DEBUG_WX
6027 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6028 fr.tx_rates = 0;
6029 break;
6032 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6033 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6034 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6037 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6038 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6039 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6042 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6043 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6044 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6047 fr.tx_rates |= mask;
6048 break;
6051 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6052 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6055 static void ipw_abort_scan(struct ipw_priv *priv)
6057 int err;
6059 if (priv->status & STATUS_SCAN_ABORTING) {
6060 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6061 return;
6063 priv->status |= STATUS_SCAN_ABORTING;
6065 err = ipw_send_scan_abort(priv);
6066 if (err)
6067 IPW_DEBUG_HC("Request to abort scan failed.\n");
6070 static void ipw_add_scan_channels(struct ipw_priv *priv,
6071 struct ipw_scan_request_ext *scan,
6072 int scan_type)
6074 int channel_index = 0;
6075 const struct ieee80211_geo *geo;
6076 int i;
6078 geo = ieee80211_get_geo(priv->ieee);
6080 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6081 int start = channel_index;
6082 for (i = 0; i < geo->a_channels; i++) {
6083 if ((priv->status & STATUS_ASSOCIATED) &&
6084 geo->a[i].channel == priv->channel)
6085 continue;
6086 channel_index++;
6087 scan->channels_list[channel_index] = geo->a[i].channel;
6088 ipw_set_scan_type(scan, channel_index,
6089 geo->a[i].
6090 flags & IEEE80211_CH_PASSIVE_ONLY ?
6091 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6092 scan_type);
6095 if (start != channel_index) {
6096 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6097 (channel_index - start);
6098 channel_index++;
6102 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6103 int start = channel_index;
6104 if (priv->config & CFG_SPEED_SCAN) {
6105 int index;
6106 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6107 /* nop out the list */
6108 [0] = 0
6111 u8 channel;
6112 while (channel_index < IPW_SCAN_CHANNELS) {
6113 channel =
6114 priv->speed_scan[priv->speed_scan_pos];
6115 if (channel == 0) {
6116 priv->speed_scan_pos = 0;
6117 channel = priv->speed_scan[0];
6119 if ((priv->status & STATUS_ASSOCIATED) &&
6120 channel == priv->channel) {
6121 priv->speed_scan_pos++;
6122 continue;
6125 /* If this channel has already been
6126 * added in scan, break from loop
6127 * and this will be the first channel
6128 * in the next scan.
6130 if (channels[channel - 1] != 0)
6131 break;
6133 channels[channel - 1] = 1;
6134 priv->speed_scan_pos++;
6135 channel_index++;
6136 scan->channels_list[channel_index] = channel;
6137 index =
6138 ieee80211_channel_to_index(priv->ieee, channel);
6139 ipw_set_scan_type(scan, channel_index,
6140 geo->bg[index].
6141 flags &
6142 IEEE80211_CH_PASSIVE_ONLY ?
6143 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6144 : scan_type);
6146 } else {
6147 for (i = 0; i < geo->bg_channels; i++) {
6148 if ((priv->status & STATUS_ASSOCIATED) &&
6149 geo->bg[i].channel == priv->channel)
6150 continue;
6151 channel_index++;
6152 scan->channels_list[channel_index] =
6153 geo->bg[i].channel;
6154 ipw_set_scan_type(scan, channel_index,
6155 geo->bg[i].
6156 flags &
6157 IEEE80211_CH_PASSIVE_ONLY ?
6158 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6159 : scan_type);
6163 if (start != channel_index) {
6164 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6165 (channel_index - start);
6170 static int ipw_request_scan_helper(struct ipw_priv *priv, int type)
6172 struct ipw_scan_request_ext scan;
6173 int err = 0, scan_type;
6175 if (!(priv->status & STATUS_INIT) ||
6176 (priv->status & STATUS_EXIT_PENDING))
6177 return 0;
6179 mutex_lock(&priv->mutex);
6181 if (priv->status & STATUS_SCANNING) {
6182 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6183 priv->status |= STATUS_SCAN_PENDING;
6184 goto done;
6187 if (!(priv->status & STATUS_SCAN_FORCED) &&
6188 priv->status & STATUS_SCAN_ABORTING) {
6189 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6190 priv->status |= STATUS_SCAN_PENDING;
6191 goto done;
6194 if (priv->status & STATUS_RF_KILL_MASK) {
6195 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6196 priv->status |= STATUS_SCAN_PENDING;
6197 goto done;
6200 memset(&scan, 0, sizeof(scan));
6201 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6203 if (type == IW_SCAN_TYPE_PASSIVE) {
6204 IPW_DEBUG_WX("use passive scanning\n");
6205 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6206 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6207 cpu_to_le16(120);
6208 ipw_add_scan_channels(priv, &scan, scan_type);
6209 goto send_request;
6212 /* Use active scan by default. */
6213 if (priv->config & CFG_SPEED_SCAN)
6214 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6215 cpu_to_le16(30);
6216 else
6217 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6218 cpu_to_le16(20);
6220 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6221 cpu_to_le16(20);
6223 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6225 #ifdef CONFIG_IPW2200_MONITOR
6226 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6227 u8 channel;
6228 u8 band = 0;
6230 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6231 case IEEE80211_52GHZ_BAND:
6232 band = (u8) (IPW_A_MODE << 6) | 1;
6233 channel = priv->channel;
6234 break;
6236 case IEEE80211_24GHZ_BAND:
6237 band = (u8) (IPW_B_MODE << 6) | 1;
6238 channel = priv->channel;
6239 break;
6241 default:
6242 band = (u8) (IPW_B_MODE << 6) | 1;
6243 channel = 9;
6244 break;
6247 scan.channels_list[0] = band;
6248 scan.channels_list[1] = channel;
6249 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6251 /* NOTE: The card will sit on this channel for this time
6252 * period. Scan aborts are timing sensitive and frequently
6253 * result in firmware restarts. As such, it is best to
6254 * set a small dwell_time here and just keep re-issuing
6255 * scans. Otherwise fast channel hopping will not actually
6256 * hop channels.
6258 * TODO: Move SPEED SCAN support to all modes and bands */
6259 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6260 cpu_to_le16(2000);
6261 } else {
6262 #endif /* CONFIG_IPW2200_MONITOR */
6263 /* If we are roaming, then make this a directed scan for the
6264 * current network. Otherwise, ensure that every other scan
6265 * is a fast channel hop scan */
6266 if ((priv->status & STATUS_ROAMING)
6267 || (!(priv->status & STATUS_ASSOCIATED)
6268 && (priv->config & CFG_STATIC_ESSID)
6269 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6270 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6271 if (err) {
6272 IPW_DEBUG_HC("Attempt to send SSID command "
6273 "failed.\n");
6274 goto done;
6277 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6278 } else
6279 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6281 ipw_add_scan_channels(priv, &scan, scan_type);
6282 #ifdef CONFIG_IPW2200_MONITOR
6284 #endif
6286 send_request:
6287 err = ipw_send_scan_request_ext(priv, &scan);
6288 if (err) {
6289 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6290 goto done;
6293 priv->status |= STATUS_SCANNING;
6294 priv->status &= ~STATUS_SCAN_PENDING;
6295 queue_delayed_work(priv->workqueue, &priv->scan_check,
6296 IPW_SCAN_CHECK_WATCHDOG);
6297 done:
6298 mutex_unlock(&priv->mutex);
6299 return err;
6302 static int ipw_request_passive_scan(struct ipw_priv *priv) {
6303 return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
6306 static int ipw_request_scan(struct ipw_priv *priv) {
6307 return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
6310 static void ipw_bg_abort_scan(void *data)
6312 struct ipw_priv *priv = data;
6313 mutex_lock(&priv->mutex);
6314 ipw_abort_scan(data);
6315 mutex_unlock(&priv->mutex);
6318 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6320 /* This is called when wpa_supplicant loads and closes the driver
6321 * interface. */
6322 priv->ieee->wpa_enabled = value;
6323 return 0;
6326 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6328 struct ieee80211_device *ieee = priv->ieee;
6329 struct ieee80211_security sec = {
6330 .flags = SEC_AUTH_MODE,
6332 int ret = 0;
6334 if (value & IW_AUTH_ALG_SHARED_KEY) {
6335 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6336 ieee->open_wep = 0;
6337 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6338 sec.auth_mode = WLAN_AUTH_OPEN;
6339 ieee->open_wep = 1;
6340 } else if (value & IW_AUTH_ALG_LEAP) {
6341 sec.auth_mode = WLAN_AUTH_LEAP;
6342 ieee->open_wep = 1;
6343 } else
6344 return -EINVAL;
6346 if (ieee->set_security)
6347 ieee->set_security(ieee->dev, &sec);
6348 else
6349 ret = -EOPNOTSUPP;
6351 return ret;
6354 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6355 int wpa_ie_len)
6357 /* make sure WPA is enabled */
6358 ipw_wpa_enable(priv, 1);
6361 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6362 char *capabilities, int length)
6364 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6366 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6367 capabilities);
6371 * WE-18 support
6374 /* SIOCSIWGENIE */
6375 static int ipw_wx_set_genie(struct net_device *dev,
6376 struct iw_request_info *info,
6377 union iwreq_data *wrqu, char *extra)
6379 struct ipw_priv *priv = ieee80211_priv(dev);
6380 struct ieee80211_device *ieee = priv->ieee;
6381 u8 *buf;
6382 int err = 0;
6384 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6385 (wrqu->data.length && extra == NULL))
6386 return -EINVAL;
6388 if (wrqu->data.length) {
6389 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6390 if (buf == NULL) {
6391 err = -ENOMEM;
6392 goto out;
6395 memcpy(buf, extra, wrqu->data.length);
6396 kfree(ieee->wpa_ie);
6397 ieee->wpa_ie = buf;
6398 ieee->wpa_ie_len = wrqu->data.length;
6399 } else {
6400 kfree(ieee->wpa_ie);
6401 ieee->wpa_ie = NULL;
6402 ieee->wpa_ie_len = 0;
6405 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6406 out:
6407 return err;
6410 /* SIOCGIWGENIE */
6411 static int ipw_wx_get_genie(struct net_device *dev,
6412 struct iw_request_info *info,
6413 union iwreq_data *wrqu, char *extra)
6415 struct ipw_priv *priv = ieee80211_priv(dev);
6416 struct ieee80211_device *ieee = priv->ieee;
6417 int err = 0;
6419 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6420 wrqu->data.length = 0;
6421 goto out;
6424 if (wrqu->data.length < ieee->wpa_ie_len) {
6425 err = -E2BIG;
6426 goto out;
6429 wrqu->data.length = ieee->wpa_ie_len;
6430 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6432 out:
6433 return err;
6436 static int wext_cipher2level(int cipher)
6438 switch (cipher) {
6439 case IW_AUTH_CIPHER_NONE:
6440 return SEC_LEVEL_0;
6441 case IW_AUTH_CIPHER_WEP40:
6442 case IW_AUTH_CIPHER_WEP104:
6443 return SEC_LEVEL_1;
6444 case IW_AUTH_CIPHER_TKIP:
6445 return SEC_LEVEL_2;
6446 case IW_AUTH_CIPHER_CCMP:
6447 return SEC_LEVEL_3;
6448 default:
6449 return -1;
6453 /* SIOCSIWAUTH */
6454 static int ipw_wx_set_auth(struct net_device *dev,
6455 struct iw_request_info *info,
6456 union iwreq_data *wrqu, char *extra)
6458 struct ipw_priv *priv = ieee80211_priv(dev);
6459 struct ieee80211_device *ieee = priv->ieee;
6460 struct iw_param *param = &wrqu->param;
6461 struct ieee80211_crypt_data *crypt;
6462 unsigned long flags;
6463 int ret = 0;
6465 switch (param->flags & IW_AUTH_INDEX) {
6466 case IW_AUTH_WPA_VERSION:
6467 break;
6468 case IW_AUTH_CIPHER_PAIRWISE:
6469 ipw_set_hw_decrypt_unicast(priv,
6470 wext_cipher2level(param->value));
6471 break;
6472 case IW_AUTH_CIPHER_GROUP:
6473 ipw_set_hw_decrypt_multicast(priv,
6474 wext_cipher2level(param->value));
6475 break;
6476 case IW_AUTH_KEY_MGMT:
6478 * ipw2200 does not use these parameters
6480 break;
6482 case IW_AUTH_TKIP_COUNTERMEASURES:
6483 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6484 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6485 break;
6487 flags = crypt->ops->get_flags(crypt->priv);
6489 if (param->value)
6490 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6491 else
6492 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6494 crypt->ops->set_flags(flags, crypt->priv);
6496 break;
6498 case IW_AUTH_DROP_UNENCRYPTED:{
6499 /* HACK:
6501 * wpa_supplicant calls set_wpa_enabled when the driver
6502 * is loaded and unloaded, regardless of if WPA is being
6503 * used. No other calls are made which can be used to
6504 * determine if encryption will be used or not prior to
6505 * association being expected. If encryption is not being
6506 * used, drop_unencrypted is set to false, else true -- we
6507 * can use this to determine if the CAP_PRIVACY_ON bit should
6508 * be set.
6510 struct ieee80211_security sec = {
6511 .flags = SEC_ENABLED,
6512 .enabled = param->value,
6514 priv->ieee->drop_unencrypted = param->value;
6515 /* We only change SEC_LEVEL for open mode. Others
6516 * are set by ipw_wpa_set_encryption.
6518 if (!param->value) {
6519 sec.flags |= SEC_LEVEL;
6520 sec.level = SEC_LEVEL_0;
6521 } else {
6522 sec.flags |= SEC_LEVEL;
6523 sec.level = SEC_LEVEL_1;
6525 if (priv->ieee->set_security)
6526 priv->ieee->set_security(priv->ieee->dev, &sec);
6527 break;
6530 case IW_AUTH_80211_AUTH_ALG:
6531 ret = ipw_wpa_set_auth_algs(priv, param->value);
6532 break;
6534 case IW_AUTH_WPA_ENABLED:
6535 ret = ipw_wpa_enable(priv, param->value);
6536 ipw_disassociate(priv);
6537 break;
6539 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6540 ieee->ieee802_1x = param->value;
6541 break;
6543 case IW_AUTH_PRIVACY_INVOKED:
6544 ieee->privacy_invoked = param->value;
6545 break;
6547 default:
6548 return -EOPNOTSUPP;
6550 return ret;
6553 /* SIOCGIWAUTH */
6554 static int ipw_wx_get_auth(struct net_device *dev,
6555 struct iw_request_info *info,
6556 union iwreq_data *wrqu, char *extra)
6558 struct ipw_priv *priv = ieee80211_priv(dev);
6559 struct ieee80211_device *ieee = priv->ieee;
6560 struct ieee80211_crypt_data *crypt;
6561 struct iw_param *param = &wrqu->param;
6562 int ret = 0;
6564 switch (param->flags & IW_AUTH_INDEX) {
6565 case IW_AUTH_WPA_VERSION:
6566 case IW_AUTH_CIPHER_PAIRWISE:
6567 case IW_AUTH_CIPHER_GROUP:
6568 case IW_AUTH_KEY_MGMT:
6570 * wpa_supplicant will control these internally
6572 ret = -EOPNOTSUPP;
6573 break;
6575 case IW_AUTH_TKIP_COUNTERMEASURES:
6576 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6577 if (!crypt || !crypt->ops->get_flags)
6578 break;
6580 param->value = (crypt->ops->get_flags(crypt->priv) &
6581 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6583 break;
6585 case IW_AUTH_DROP_UNENCRYPTED:
6586 param->value = ieee->drop_unencrypted;
6587 break;
6589 case IW_AUTH_80211_AUTH_ALG:
6590 param->value = ieee->sec.auth_mode;
6591 break;
6593 case IW_AUTH_WPA_ENABLED:
6594 param->value = ieee->wpa_enabled;
6595 break;
6597 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6598 param->value = ieee->ieee802_1x;
6599 break;
6601 case IW_AUTH_ROAMING_CONTROL:
6602 case IW_AUTH_PRIVACY_INVOKED:
6603 param->value = ieee->privacy_invoked;
6604 break;
6606 default:
6607 return -EOPNOTSUPP;
6609 return 0;
6612 /* SIOCSIWENCODEEXT */
6613 static int ipw_wx_set_encodeext(struct net_device *dev,
6614 struct iw_request_info *info,
6615 union iwreq_data *wrqu, char *extra)
6617 struct ipw_priv *priv = ieee80211_priv(dev);
6618 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6620 if (hwcrypto) {
6621 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6622 /* IPW HW can't build TKIP MIC,
6623 host decryption still needed */
6624 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6625 priv->ieee->host_mc_decrypt = 1;
6626 else {
6627 priv->ieee->host_encrypt = 0;
6628 priv->ieee->host_encrypt_msdu = 1;
6629 priv->ieee->host_decrypt = 1;
6631 } else {
6632 priv->ieee->host_encrypt = 0;
6633 priv->ieee->host_encrypt_msdu = 0;
6634 priv->ieee->host_decrypt = 0;
6635 priv->ieee->host_mc_decrypt = 0;
6639 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6642 /* SIOCGIWENCODEEXT */
6643 static int ipw_wx_get_encodeext(struct net_device *dev,
6644 struct iw_request_info *info,
6645 union iwreq_data *wrqu, char *extra)
6647 struct ipw_priv *priv = ieee80211_priv(dev);
6648 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6651 /* SIOCSIWMLME */
6652 static int ipw_wx_set_mlme(struct net_device *dev,
6653 struct iw_request_info *info,
6654 union iwreq_data *wrqu, char *extra)
6656 struct ipw_priv *priv = ieee80211_priv(dev);
6657 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6658 u16 reason;
6660 reason = cpu_to_le16(mlme->reason_code);
6662 switch (mlme->cmd) {
6663 case IW_MLME_DEAUTH:
6664 /* silently ignore */
6665 break;
6667 case IW_MLME_DISASSOC:
6668 ipw_disassociate(priv);
6669 break;
6671 default:
6672 return -EOPNOTSUPP;
6674 return 0;
6677 #ifdef CONFIG_IPW2200_QOS
6679 /* QoS */
6681 * get the modulation type of the current network or
6682 * the card current mode
6684 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6686 u8 mode = 0;
6688 if (priv->status & STATUS_ASSOCIATED) {
6689 unsigned long flags;
6691 spin_lock_irqsave(&priv->ieee->lock, flags);
6692 mode = priv->assoc_network->mode;
6693 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6694 } else {
6695 mode = priv->ieee->mode;
6697 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6698 return mode;
6702 * Handle management frame beacon and probe response
6704 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6705 int active_network,
6706 struct ieee80211_network *network)
6708 u32 size = sizeof(struct ieee80211_qos_parameters);
6710 if (network->capability & WLAN_CAPABILITY_IBSS)
6711 network->qos_data.active = network->qos_data.supported;
6713 if (network->flags & NETWORK_HAS_QOS_MASK) {
6714 if (active_network &&
6715 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6716 network->qos_data.active = network->qos_data.supported;
6718 if ((network->qos_data.active == 1) && (active_network == 1) &&
6719 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6720 (network->qos_data.old_param_count !=
6721 network->qos_data.param_count)) {
6722 network->qos_data.old_param_count =
6723 network->qos_data.param_count;
6724 schedule_work(&priv->qos_activate);
6725 IPW_DEBUG_QOS("QoS parameters change call "
6726 "qos_activate\n");
6728 } else {
6729 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6730 memcpy(&network->qos_data.parameters,
6731 &def_parameters_CCK, size);
6732 else
6733 memcpy(&network->qos_data.parameters,
6734 &def_parameters_OFDM, size);
6736 if ((network->qos_data.active == 1) && (active_network == 1)) {
6737 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6738 schedule_work(&priv->qos_activate);
6741 network->qos_data.active = 0;
6742 network->qos_data.supported = 0;
6744 if ((priv->status & STATUS_ASSOCIATED) &&
6745 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6746 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6747 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6748 !(network->flags & NETWORK_EMPTY_ESSID))
6749 if ((network->ssid_len ==
6750 priv->assoc_network->ssid_len) &&
6751 !memcmp(network->ssid,
6752 priv->assoc_network->ssid,
6753 network->ssid_len)) {
6754 queue_work(priv->workqueue,
6755 &priv->merge_networks);
6759 return 0;
6763 * This function set up the firmware to support QoS. It sends
6764 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6766 static int ipw_qos_activate(struct ipw_priv *priv,
6767 struct ieee80211_qos_data *qos_network_data)
6769 int err;
6770 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6771 struct ieee80211_qos_parameters *active_one = NULL;
6772 u32 size = sizeof(struct ieee80211_qos_parameters);
6773 u32 burst_duration;
6774 int i;
6775 u8 type;
6777 type = ipw_qos_current_mode(priv);
6779 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6780 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6781 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6782 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6784 if (qos_network_data == NULL) {
6785 if (type == IEEE_B) {
6786 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6787 active_one = &def_parameters_CCK;
6788 } else
6789 active_one = &def_parameters_OFDM;
6791 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6792 burst_duration = ipw_qos_get_burst_duration(priv);
6793 for (i = 0; i < QOS_QUEUE_NUM; i++)
6794 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6795 (u16)burst_duration;
6796 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6797 if (type == IEEE_B) {
6798 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6799 type);
6800 if (priv->qos_data.qos_enable == 0)
6801 active_one = &def_parameters_CCK;
6802 else
6803 active_one = priv->qos_data.def_qos_parm_CCK;
6804 } else {
6805 if (priv->qos_data.qos_enable == 0)
6806 active_one = &def_parameters_OFDM;
6807 else
6808 active_one = priv->qos_data.def_qos_parm_OFDM;
6810 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6811 } else {
6812 unsigned long flags;
6813 int active;
6815 spin_lock_irqsave(&priv->ieee->lock, flags);
6816 active_one = &(qos_network_data->parameters);
6817 qos_network_data->old_param_count =
6818 qos_network_data->param_count;
6819 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6820 active = qos_network_data->supported;
6821 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6823 if (active == 0) {
6824 burst_duration = ipw_qos_get_burst_duration(priv);
6825 for (i = 0; i < QOS_QUEUE_NUM; i++)
6826 qos_parameters[QOS_PARAM_SET_ACTIVE].
6827 tx_op_limit[i] = (u16)burst_duration;
6831 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6832 for (i = 0; i < 3; i++) {
6833 int j;
6834 for (j = 0; j < QOS_QUEUE_NUM; j++) {
6835 qos_parameters[i].cw_min[j] = cpu_to_le16(qos_parameters[i].cw_min[j]);
6836 qos_parameters[i].cw_max[j] = cpu_to_le16(qos_parameters[i].cw_max[j]);
6837 qos_parameters[i].tx_op_limit[j] = cpu_to_le16(qos_parameters[i].tx_op_limit[j]);
6841 err = ipw_send_qos_params_command(priv,
6842 (struct ieee80211_qos_parameters *)
6843 &(qos_parameters[0]));
6844 if (err)
6845 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6847 return err;
6851 * send IPW_CMD_WME_INFO to the firmware
6853 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6855 int ret = 0;
6856 struct ieee80211_qos_information_element qos_info;
6858 if (priv == NULL)
6859 return -1;
6861 qos_info.elementID = QOS_ELEMENT_ID;
6862 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6864 qos_info.version = QOS_VERSION_1;
6865 qos_info.ac_info = 0;
6867 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6868 qos_info.qui_type = QOS_OUI_TYPE;
6869 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6871 ret = ipw_send_qos_info_command(priv, &qos_info);
6872 if (ret != 0) {
6873 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6875 return ret;
6879 * Set the QoS parameter with the association request structure
6881 static int ipw_qos_association(struct ipw_priv *priv,
6882 struct ieee80211_network *network)
6884 int err = 0;
6885 struct ieee80211_qos_data *qos_data = NULL;
6886 struct ieee80211_qos_data ibss_data = {
6887 .supported = 1,
6888 .active = 1,
6891 switch (priv->ieee->iw_mode) {
6892 case IW_MODE_ADHOC:
6893 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6895 qos_data = &ibss_data;
6896 break;
6898 case IW_MODE_INFRA:
6899 qos_data = &network->qos_data;
6900 break;
6902 default:
6903 BUG();
6904 break;
6907 err = ipw_qos_activate(priv, qos_data);
6908 if (err) {
6909 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6910 return err;
6913 if (priv->qos_data.qos_enable && qos_data->supported) {
6914 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6915 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6916 return ipw_qos_set_info_element(priv);
6919 return 0;
6923 * handling the beaconing responces. if we get different QoS setting
6924 * of the network from the the associated setting adjust the QoS
6925 * setting
6927 static int ipw_qos_association_resp(struct ipw_priv *priv,
6928 struct ieee80211_network *network)
6930 int ret = 0;
6931 unsigned long flags;
6932 u32 size = sizeof(struct ieee80211_qos_parameters);
6933 int set_qos_param = 0;
6935 if ((priv == NULL) || (network == NULL) ||
6936 (priv->assoc_network == NULL))
6937 return ret;
6939 if (!(priv->status & STATUS_ASSOCIATED))
6940 return ret;
6942 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
6943 return ret;
6945 spin_lock_irqsave(&priv->ieee->lock, flags);
6946 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
6947 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
6948 sizeof(struct ieee80211_qos_data));
6949 priv->assoc_network->qos_data.active = 1;
6950 if ((network->qos_data.old_param_count !=
6951 network->qos_data.param_count)) {
6952 set_qos_param = 1;
6953 network->qos_data.old_param_count =
6954 network->qos_data.param_count;
6957 } else {
6958 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
6959 memcpy(&priv->assoc_network->qos_data.parameters,
6960 &def_parameters_CCK, size);
6961 else
6962 memcpy(&priv->assoc_network->qos_data.parameters,
6963 &def_parameters_OFDM, size);
6964 priv->assoc_network->qos_data.active = 0;
6965 priv->assoc_network->qos_data.supported = 0;
6966 set_qos_param = 1;
6969 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6971 if (set_qos_param == 1)
6972 schedule_work(&priv->qos_activate);
6974 return ret;
6977 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
6979 u32 ret = 0;
6981 if ((priv == NULL))
6982 return 0;
6984 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
6985 ret = priv->qos_data.burst_duration_CCK;
6986 else
6987 ret = priv->qos_data.burst_duration_OFDM;
6989 return ret;
6993 * Initialize the setting of QoS global
6995 static void ipw_qos_init(struct ipw_priv *priv, int enable,
6996 int burst_enable, u32 burst_duration_CCK,
6997 u32 burst_duration_OFDM)
6999 priv->qos_data.qos_enable = enable;
7001 if (priv->qos_data.qos_enable) {
7002 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7003 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7004 IPW_DEBUG_QOS("QoS is enabled\n");
7005 } else {
7006 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7007 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7008 IPW_DEBUG_QOS("QoS is not enabled\n");
7011 priv->qos_data.burst_enable = burst_enable;
7013 if (burst_enable) {
7014 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7015 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7016 } else {
7017 priv->qos_data.burst_duration_CCK = 0;
7018 priv->qos_data.burst_duration_OFDM = 0;
7023 * map the packet priority to the right TX Queue
7025 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7027 if (priority > 7 || !priv->qos_data.qos_enable)
7028 priority = 0;
7030 return from_priority_to_tx_queue[priority] - 1;
7033 static int ipw_is_qos_active(struct net_device *dev,
7034 struct sk_buff *skb)
7036 struct ipw_priv *priv = ieee80211_priv(dev);
7037 struct ieee80211_qos_data *qos_data = NULL;
7038 int active, supported;
7039 u8 *daddr = skb->data + ETH_ALEN;
7040 int unicast = !is_multicast_ether_addr(daddr);
7042 if (!(priv->status & STATUS_ASSOCIATED))
7043 return 0;
7045 qos_data = &priv->assoc_network->qos_data;
7047 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7048 if (unicast == 0)
7049 qos_data->active = 0;
7050 else
7051 qos_data->active = qos_data->supported;
7053 active = qos_data->active;
7054 supported = qos_data->supported;
7055 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7056 "unicast %d\n",
7057 priv->qos_data.qos_enable, active, supported, unicast);
7058 if (active && priv->qos_data.qos_enable)
7059 return 1;
7061 return 0;
7065 * add QoS parameter to the TX command
7067 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7068 u16 priority,
7069 struct tfd_data *tfd)
7071 int tx_queue_id = 0;
7074 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7075 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7077 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7078 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7079 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7081 return 0;
7085 * background support to run QoS activate functionality
7087 static void ipw_bg_qos_activate(void *data)
7089 struct ipw_priv *priv = data;
7091 if (priv == NULL)
7092 return;
7094 mutex_lock(&priv->mutex);
7096 if (priv->status & STATUS_ASSOCIATED)
7097 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7099 mutex_unlock(&priv->mutex);
7102 static int ipw_handle_probe_response(struct net_device *dev,
7103 struct ieee80211_probe_response *resp,
7104 struct ieee80211_network *network)
7106 struct ipw_priv *priv = ieee80211_priv(dev);
7107 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7108 (network == priv->assoc_network));
7110 ipw_qos_handle_probe_response(priv, active_network, network);
7112 return 0;
7115 static int ipw_handle_beacon(struct net_device *dev,
7116 struct ieee80211_beacon *resp,
7117 struct ieee80211_network *network)
7119 struct ipw_priv *priv = ieee80211_priv(dev);
7120 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7121 (network == priv->assoc_network));
7123 ipw_qos_handle_probe_response(priv, active_network, network);
7125 return 0;
7128 static int ipw_handle_assoc_response(struct net_device *dev,
7129 struct ieee80211_assoc_response *resp,
7130 struct ieee80211_network *network)
7132 struct ipw_priv *priv = ieee80211_priv(dev);
7133 ipw_qos_association_resp(priv, network);
7134 return 0;
7137 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7138 *qos_param)
7140 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7141 sizeof(*qos_param) * 3, qos_param);
7144 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7145 *qos_param)
7147 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7148 qos_param);
7151 #endif /* CONFIG_IPW2200_QOS */
7153 static int ipw_associate_network(struct ipw_priv *priv,
7154 struct ieee80211_network *network,
7155 struct ipw_supported_rates *rates, int roaming)
7157 int err;
7159 if (priv->config & CFG_FIXED_RATE)
7160 ipw_set_fixed_rate(priv, network->mode);
7162 if (!(priv->config & CFG_STATIC_ESSID)) {
7163 priv->essid_len = min(network->ssid_len,
7164 (u8) IW_ESSID_MAX_SIZE);
7165 memcpy(priv->essid, network->ssid, priv->essid_len);
7168 network->last_associate = jiffies;
7170 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7171 priv->assoc_request.channel = network->channel;
7172 priv->assoc_request.auth_key = 0;
7174 if ((priv->capability & CAP_PRIVACY_ON) &&
7175 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7176 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7177 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7179 if (priv->ieee->sec.level == SEC_LEVEL_1)
7180 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7182 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7183 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7184 priv->assoc_request.auth_type = AUTH_LEAP;
7185 else
7186 priv->assoc_request.auth_type = AUTH_OPEN;
7188 if (priv->ieee->wpa_ie_len) {
7189 priv->assoc_request.policy_support = 0x02; /* RSN active */
7190 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7191 priv->ieee->wpa_ie_len);
7195 * It is valid for our ieee device to support multiple modes, but
7196 * when it comes to associating to a given network we have to choose
7197 * just one mode.
7199 if (network->mode & priv->ieee->mode & IEEE_A)
7200 priv->assoc_request.ieee_mode = IPW_A_MODE;
7201 else if (network->mode & priv->ieee->mode & IEEE_G)
7202 priv->assoc_request.ieee_mode = IPW_G_MODE;
7203 else if (network->mode & priv->ieee->mode & IEEE_B)
7204 priv->assoc_request.ieee_mode = IPW_B_MODE;
7206 priv->assoc_request.capability = network->capability;
7207 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7208 && !(priv->config & CFG_PREAMBLE_LONG)) {
7209 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7210 } else {
7211 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7213 /* Clear the short preamble if we won't be supporting it */
7214 priv->assoc_request.capability &=
7215 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7218 /* Clear capability bits that aren't used in Ad Hoc */
7219 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7220 priv->assoc_request.capability &=
7221 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7223 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7224 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7225 roaming ? "Rea" : "A",
7226 escape_essid(priv->essid, priv->essid_len),
7227 network->channel,
7228 ipw_modes[priv->assoc_request.ieee_mode],
7229 rates->num_rates,
7230 (priv->assoc_request.preamble_length ==
7231 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7232 network->capability &
7233 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7234 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7235 priv->capability & CAP_PRIVACY_ON ?
7236 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7237 "(open)") : "",
7238 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7239 priv->capability & CAP_PRIVACY_ON ?
7240 '1' + priv->ieee->sec.active_key : '.',
7241 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7243 priv->assoc_request.beacon_interval = network->beacon_interval;
7244 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7245 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7246 priv->assoc_request.assoc_type = HC_IBSS_START;
7247 priv->assoc_request.assoc_tsf_msw = 0;
7248 priv->assoc_request.assoc_tsf_lsw = 0;
7249 } else {
7250 if (unlikely(roaming))
7251 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7252 else
7253 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7254 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7255 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7258 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7260 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7261 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7262 priv->assoc_request.atim_window = network->atim_window;
7263 } else {
7264 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7265 priv->assoc_request.atim_window = 0;
7268 priv->assoc_request.listen_interval = network->listen_interval;
7270 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7271 if (err) {
7272 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7273 return err;
7276 rates->ieee_mode = priv->assoc_request.ieee_mode;
7277 rates->purpose = IPW_RATE_CONNECT;
7278 ipw_send_supported_rates(priv, rates);
7280 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7281 priv->sys_config.dot11g_auto_detection = 1;
7282 else
7283 priv->sys_config.dot11g_auto_detection = 0;
7285 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7286 priv->sys_config.answer_broadcast_ssid_probe = 1;
7287 else
7288 priv->sys_config.answer_broadcast_ssid_probe = 0;
7290 err = ipw_send_system_config(priv);
7291 if (err) {
7292 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7293 return err;
7296 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7297 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7298 if (err) {
7299 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7300 return err;
7304 * If preemption is enabled, it is possible for the association
7305 * to complete before we return from ipw_send_associate. Therefore
7306 * we have to be sure and update our priviate data first.
7308 priv->channel = network->channel;
7309 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7310 priv->status |= STATUS_ASSOCIATING;
7311 priv->status &= ~STATUS_SECURITY_UPDATED;
7313 priv->assoc_network = network;
7315 #ifdef CONFIG_IPW2200_QOS
7316 ipw_qos_association(priv, network);
7317 #endif
7319 err = ipw_send_associate(priv, &priv->assoc_request);
7320 if (err) {
7321 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7322 return err;
7325 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
7326 escape_essid(priv->essid, priv->essid_len),
7327 MAC_ARG(priv->bssid));
7329 return 0;
7332 static void ipw_roam(void *data)
7334 struct ipw_priv *priv = data;
7335 struct ieee80211_network *network = NULL;
7336 struct ipw_network_match match = {
7337 .network = priv->assoc_network
7340 /* The roaming process is as follows:
7342 * 1. Missed beacon threshold triggers the roaming process by
7343 * setting the status ROAM bit and requesting a scan.
7344 * 2. When the scan completes, it schedules the ROAM work
7345 * 3. The ROAM work looks at all of the known networks for one that
7346 * is a better network than the currently associated. If none
7347 * found, the ROAM process is over (ROAM bit cleared)
7348 * 4. If a better network is found, a disassociation request is
7349 * sent.
7350 * 5. When the disassociation completes, the roam work is again
7351 * scheduled. The second time through, the driver is no longer
7352 * associated, and the newly selected network is sent an
7353 * association request.
7354 * 6. At this point ,the roaming process is complete and the ROAM
7355 * status bit is cleared.
7358 /* If we are no longer associated, and the roaming bit is no longer
7359 * set, then we are not actively roaming, so just return */
7360 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7361 return;
7363 if (priv->status & STATUS_ASSOCIATED) {
7364 /* First pass through ROAM process -- look for a better
7365 * network */
7366 unsigned long flags;
7367 u8 rssi = priv->assoc_network->stats.rssi;
7368 priv->assoc_network->stats.rssi = -128;
7369 spin_lock_irqsave(&priv->ieee->lock, flags);
7370 list_for_each_entry(network, &priv->ieee->network_list, list) {
7371 if (network != priv->assoc_network)
7372 ipw_best_network(priv, &match, network, 1);
7374 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7375 priv->assoc_network->stats.rssi = rssi;
7377 if (match.network == priv->assoc_network) {
7378 IPW_DEBUG_ASSOC("No better APs in this network to "
7379 "roam to.\n");
7380 priv->status &= ~STATUS_ROAMING;
7381 ipw_debug_config(priv);
7382 return;
7385 ipw_send_disassociate(priv, 1);
7386 priv->assoc_network = match.network;
7388 return;
7391 /* Second pass through ROAM process -- request association */
7392 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7393 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7394 priv->status &= ~STATUS_ROAMING;
7397 static void ipw_bg_roam(void *data)
7399 struct ipw_priv *priv = data;
7400 mutex_lock(&priv->mutex);
7401 ipw_roam(data);
7402 mutex_unlock(&priv->mutex);
7405 static int ipw_associate(void *data)
7407 struct ipw_priv *priv = data;
7409 struct ieee80211_network *network = NULL;
7410 struct ipw_network_match match = {
7411 .network = NULL
7413 struct ipw_supported_rates *rates;
7414 struct list_head *element;
7415 unsigned long flags;
7417 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7418 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7419 return 0;
7422 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7423 IPW_DEBUG_ASSOC("Not attempting association (already in "
7424 "progress)\n");
7425 return 0;
7428 if (priv->status & STATUS_DISASSOCIATING) {
7429 IPW_DEBUG_ASSOC("Not attempting association (in "
7430 "disassociating)\n ");
7431 queue_work(priv->workqueue, &priv->associate);
7432 return 0;
7435 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7436 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7437 "initialized)\n");
7438 return 0;
7441 if (!(priv->config & CFG_ASSOCIATE) &&
7442 !(priv->config & (CFG_STATIC_ESSID |
7443 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7444 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7445 return 0;
7448 /* Protect our use of the network_list */
7449 spin_lock_irqsave(&priv->ieee->lock, flags);
7450 list_for_each_entry(network, &priv->ieee->network_list, list)
7451 ipw_best_network(priv, &match, network, 0);
7453 network = match.network;
7454 rates = &match.rates;
7456 if (network == NULL &&
7457 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7458 priv->config & CFG_ADHOC_CREATE &&
7459 priv->config & CFG_STATIC_ESSID &&
7460 priv->config & CFG_STATIC_CHANNEL &&
7461 !list_empty(&priv->ieee->network_free_list)) {
7462 element = priv->ieee->network_free_list.next;
7463 network = list_entry(element, struct ieee80211_network, list);
7464 ipw_adhoc_create(priv, network);
7465 rates = &priv->rates;
7466 list_del(element);
7467 list_add_tail(&network->list, &priv->ieee->network_list);
7469 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7471 /* If we reached the end of the list, then we don't have any valid
7472 * matching APs */
7473 if (!network) {
7474 ipw_debug_config(priv);
7476 if (!(priv->status & STATUS_SCANNING)) {
7477 if (!(priv->config & CFG_SPEED_SCAN))
7478 queue_delayed_work(priv->workqueue,
7479 &priv->request_scan,
7480 SCAN_INTERVAL);
7481 else
7482 queue_work(priv->workqueue,
7483 &priv->request_scan);
7486 return 0;
7489 ipw_associate_network(priv, network, rates, 0);
7491 return 1;
7494 static void ipw_bg_associate(void *data)
7496 struct ipw_priv *priv = data;
7497 mutex_lock(&priv->mutex);
7498 ipw_associate(data);
7499 mutex_unlock(&priv->mutex);
7502 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7503 struct sk_buff *skb)
7505 struct ieee80211_hdr *hdr;
7506 u16 fc;
7508 hdr = (struct ieee80211_hdr *)skb->data;
7509 fc = le16_to_cpu(hdr->frame_ctl);
7510 if (!(fc & IEEE80211_FCTL_PROTECTED))
7511 return;
7513 fc &= ~IEEE80211_FCTL_PROTECTED;
7514 hdr->frame_ctl = cpu_to_le16(fc);
7515 switch (priv->ieee->sec.level) {
7516 case SEC_LEVEL_3:
7517 /* Remove CCMP HDR */
7518 memmove(skb->data + IEEE80211_3ADDR_LEN,
7519 skb->data + IEEE80211_3ADDR_LEN + 8,
7520 skb->len - IEEE80211_3ADDR_LEN - 8);
7521 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7522 break;
7523 case SEC_LEVEL_2:
7524 break;
7525 case SEC_LEVEL_1:
7526 /* Remove IV */
7527 memmove(skb->data + IEEE80211_3ADDR_LEN,
7528 skb->data + IEEE80211_3ADDR_LEN + 4,
7529 skb->len - IEEE80211_3ADDR_LEN - 4);
7530 skb_trim(skb, skb->len - 8); /* IV + ICV */
7531 break;
7532 case SEC_LEVEL_0:
7533 break;
7534 default:
7535 printk(KERN_ERR "Unknow security level %d\n",
7536 priv->ieee->sec.level);
7537 break;
7541 static void ipw_handle_data_packet(struct ipw_priv *priv,
7542 struct ipw_rx_mem_buffer *rxb,
7543 struct ieee80211_rx_stats *stats)
7545 struct ieee80211_hdr_4addr *hdr;
7546 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7548 /* We received data from the HW, so stop the watchdog */
7549 priv->net_dev->trans_start = jiffies;
7551 /* We only process data packets if the
7552 * interface is open */
7553 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7554 skb_tailroom(rxb->skb))) {
7555 priv->ieee->stats.rx_errors++;
7556 priv->wstats.discard.misc++;
7557 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7558 return;
7559 } else if (unlikely(!netif_running(priv->net_dev))) {
7560 priv->ieee->stats.rx_dropped++;
7561 priv->wstats.discard.misc++;
7562 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7563 return;
7566 /* Advance skb->data to the start of the actual payload */
7567 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7569 /* Set the size of the skb to the size of the frame */
7570 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7572 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7574 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7575 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7576 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7577 (is_multicast_ether_addr(hdr->addr1) ?
7578 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7579 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7581 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7582 priv->ieee->stats.rx_errors++;
7583 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7584 rxb->skb = NULL;
7585 __ipw_led_activity_on(priv);
7589 #ifdef CONFIG_IPW2200_RADIOTAP
7590 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7591 struct ipw_rx_mem_buffer *rxb,
7592 struct ieee80211_rx_stats *stats)
7594 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7595 struct ipw_rx_frame *frame = &pkt->u.frame;
7597 /* initial pull of some data */
7598 u16 received_channel = frame->received_channel;
7599 u8 antennaAndPhy = frame->antennaAndPhy;
7600 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7601 u16 pktrate = frame->rate;
7603 /* Magic struct that slots into the radiotap header -- no reason
7604 * to build this manually element by element, we can write it much
7605 * more efficiently than we can parse it. ORDER MATTERS HERE */
7606 struct ipw_rt_hdr *ipw_rt;
7608 short len = le16_to_cpu(pkt->u.frame.length);
7610 /* We received data from the HW, so stop the watchdog */
7611 priv->net_dev->trans_start = jiffies;
7613 /* We only process data packets if the
7614 * interface is open */
7615 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7616 skb_tailroom(rxb->skb))) {
7617 priv->ieee->stats.rx_errors++;
7618 priv->wstats.discard.misc++;
7619 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7620 return;
7621 } else if (unlikely(!netif_running(priv->net_dev))) {
7622 priv->ieee->stats.rx_dropped++;
7623 priv->wstats.discard.misc++;
7624 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7625 return;
7628 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7629 * that now */
7630 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7631 /* FIXME: Should alloc bigger skb instead */
7632 priv->ieee->stats.rx_dropped++;
7633 priv->wstats.discard.misc++;
7634 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7635 return;
7638 /* copy the frame itself */
7639 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7640 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7642 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7643 * part of our real header, saves a little time.
7645 * No longer necessary since we fill in all our data. Purge before merging
7646 * patch officially.
7647 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7648 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7651 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7653 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7654 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7655 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7657 /* Big bitfield of all the fields we provide in radiotap */
7658 ipw_rt->rt_hdr.it_present =
7659 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7660 (1 << IEEE80211_RADIOTAP_RATE) |
7661 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7662 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7663 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7664 (1 << IEEE80211_RADIOTAP_ANTENNA));
7666 /* Zero the flags, we'll add to them as we go */
7667 ipw_rt->rt_flags = 0;
7668 ipw_rt->rt_tsf = 0ULL;
7670 /* Convert signal to DBM */
7671 ipw_rt->rt_dbmsignal = antsignal;
7673 /* Convert the channel data and set the flags */
7674 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7675 if (received_channel > 14) { /* 802.11a */
7676 ipw_rt->rt_chbitmask =
7677 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7678 } else if (antennaAndPhy & 32) { /* 802.11b */
7679 ipw_rt->rt_chbitmask =
7680 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7681 } else { /* 802.11g */
7682 ipw_rt->rt_chbitmask =
7683 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7686 /* set the rate in multiples of 500k/s */
7687 switch (pktrate) {
7688 case IPW_TX_RATE_1MB:
7689 ipw_rt->rt_rate = 2;
7690 break;
7691 case IPW_TX_RATE_2MB:
7692 ipw_rt->rt_rate = 4;
7693 break;
7694 case IPW_TX_RATE_5MB:
7695 ipw_rt->rt_rate = 10;
7696 break;
7697 case IPW_TX_RATE_6MB:
7698 ipw_rt->rt_rate = 12;
7699 break;
7700 case IPW_TX_RATE_9MB:
7701 ipw_rt->rt_rate = 18;
7702 break;
7703 case IPW_TX_RATE_11MB:
7704 ipw_rt->rt_rate = 22;
7705 break;
7706 case IPW_TX_RATE_12MB:
7707 ipw_rt->rt_rate = 24;
7708 break;
7709 case IPW_TX_RATE_18MB:
7710 ipw_rt->rt_rate = 36;
7711 break;
7712 case IPW_TX_RATE_24MB:
7713 ipw_rt->rt_rate = 48;
7714 break;
7715 case IPW_TX_RATE_36MB:
7716 ipw_rt->rt_rate = 72;
7717 break;
7718 case IPW_TX_RATE_48MB:
7719 ipw_rt->rt_rate = 96;
7720 break;
7721 case IPW_TX_RATE_54MB:
7722 ipw_rt->rt_rate = 108;
7723 break;
7724 default:
7725 ipw_rt->rt_rate = 0;
7726 break;
7729 /* antenna number */
7730 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7732 /* set the preamble flag if we have it */
7733 if ((antennaAndPhy & 64))
7734 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7736 /* Set the size of the skb to the size of the frame */
7737 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7739 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7741 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7742 priv->ieee->stats.rx_errors++;
7743 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7744 rxb->skb = NULL;
7745 /* no LED during capture */
7748 #endif
7750 #ifdef CONFIG_IPW2200_PROMISCUOUS
7751 #define ieee80211_is_probe_response(fc) \
7752 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7753 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7755 #define ieee80211_is_management(fc) \
7756 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7758 #define ieee80211_is_control(fc) \
7759 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7761 #define ieee80211_is_data(fc) \
7762 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7764 #define ieee80211_is_assoc_request(fc) \
7765 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7767 #define ieee80211_is_reassoc_request(fc) \
7768 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7770 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7771 struct ipw_rx_mem_buffer *rxb,
7772 struct ieee80211_rx_stats *stats)
7774 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7775 struct ipw_rx_frame *frame = &pkt->u.frame;
7776 struct ipw_rt_hdr *ipw_rt;
7778 /* First cache any information we need before we overwrite
7779 * the information provided in the skb from the hardware */
7780 struct ieee80211_hdr *hdr;
7781 u16 channel = frame->received_channel;
7782 u8 phy_flags = frame->antennaAndPhy;
7783 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7784 s8 noise = frame->noise;
7785 u8 rate = frame->rate;
7786 short len = le16_to_cpu(pkt->u.frame.length);
7787 struct sk_buff *skb;
7788 int hdr_only = 0;
7789 u16 filter = priv->prom_priv->filter;
7791 /* If the filter is set to not include Rx frames then return */
7792 if (filter & IPW_PROM_NO_RX)
7793 return;
7795 /* We received data from the HW, so stop the watchdog */
7796 priv->prom_net_dev->trans_start = jiffies;
7798 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7799 priv->prom_priv->ieee->stats.rx_errors++;
7800 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7801 return;
7804 /* We only process data packets if the interface is open */
7805 if (unlikely(!netif_running(priv->prom_net_dev))) {
7806 priv->prom_priv->ieee->stats.rx_dropped++;
7807 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7808 return;
7811 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7812 * that now */
7813 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7814 /* FIXME: Should alloc bigger skb instead */
7815 priv->prom_priv->ieee->stats.rx_dropped++;
7816 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7817 return;
7820 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7821 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
7822 if (filter & IPW_PROM_NO_MGMT)
7823 return;
7824 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7825 hdr_only = 1;
7826 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
7827 if (filter & IPW_PROM_NO_CTL)
7828 return;
7829 if (filter & IPW_PROM_CTL_HEADER_ONLY)
7830 hdr_only = 1;
7831 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
7832 if (filter & IPW_PROM_NO_DATA)
7833 return;
7834 if (filter & IPW_PROM_DATA_HEADER_ONLY)
7835 hdr_only = 1;
7838 /* Copy the SKB since this is for the promiscuous side */
7839 skb = skb_copy(rxb->skb, GFP_ATOMIC);
7840 if (skb == NULL) {
7841 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7842 return;
7845 /* copy the frame data to write after where the radiotap header goes */
7846 ipw_rt = (void *)skb->data;
7848 if (hdr_only)
7849 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
7851 memcpy(ipw_rt->payload, hdr, len);
7853 /* Zero the radiotap static buffer ... We only need to zero the bytes
7854 * NOT part of our real header, saves a little time.
7856 * No longer necessary since we fill in all our data. Purge before
7857 * merging patch officially.
7858 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7859 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7862 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7863 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7864 ipw_rt->rt_hdr.it_len = sizeof(*ipw_rt); /* total header+data */
7866 /* Set the size of the skb to the size of the frame */
7867 skb_put(skb, ipw_rt->rt_hdr.it_len + len);
7869 /* Big bitfield of all the fields we provide in radiotap */
7870 ipw_rt->rt_hdr.it_present =
7871 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7872 (1 << IEEE80211_RADIOTAP_RATE) |
7873 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7874 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7875 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7876 (1 << IEEE80211_RADIOTAP_ANTENNA));
7878 /* Zero the flags, we'll add to them as we go */
7879 ipw_rt->rt_flags = 0;
7880 ipw_rt->rt_tsf = 0ULL;
7882 /* Convert to DBM */
7883 ipw_rt->rt_dbmsignal = signal;
7884 ipw_rt->rt_dbmnoise = noise;
7886 /* Convert the channel data and set the flags */
7887 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
7888 if (channel > 14) { /* 802.11a */
7889 ipw_rt->rt_chbitmask =
7890 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7891 } else if (phy_flags & (1 << 5)) { /* 802.11b */
7892 ipw_rt->rt_chbitmask =
7893 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7894 } else { /* 802.11g */
7895 ipw_rt->rt_chbitmask =
7896 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7899 /* set the rate in multiples of 500k/s */
7900 switch (rate) {
7901 case IPW_TX_RATE_1MB:
7902 ipw_rt->rt_rate = 2;
7903 break;
7904 case IPW_TX_RATE_2MB:
7905 ipw_rt->rt_rate = 4;
7906 break;
7907 case IPW_TX_RATE_5MB:
7908 ipw_rt->rt_rate = 10;
7909 break;
7910 case IPW_TX_RATE_6MB:
7911 ipw_rt->rt_rate = 12;
7912 break;
7913 case IPW_TX_RATE_9MB:
7914 ipw_rt->rt_rate = 18;
7915 break;
7916 case IPW_TX_RATE_11MB:
7917 ipw_rt->rt_rate = 22;
7918 break;
7919 case IPW_TX_RATE_12MB:
7920 ipw_rt->rt_rate = 24;
7921 break;
7922 case IPW_TX_RATE_18MB:
7923 ipw_rt->rt_rate = 36;
7924 break;
7925 case IPW_TX_RATE_24MB:
7926 ipw_rt->rt_rate = 48;
7927 break;
7928 case IPW_TX_RATE_36MB:
7929 ipw_rt->rt_rate = 72;
7930 break;
7931 case IPW_TX_RATE_48MB:
7932 ipw_rt->rt_rate = 96;
7933 break;
7934 case IPW_TX_RATE_54MB:
7935 ipw_rt->rt_rate = 108;
7936 break;
7937 default:
7938 ipw_rt->rt_rate = 0;
7939 break;
7942 /* antenna number */
7943 ipw_rt->rt_antenna = (phy_flags & 3);
7945 /* set the preamble flag if we have it */
7946 if (phy_flags & (1 << 6))
7947 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7949 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
7951 if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
7952 priv->prom_priv->ieee->stats.rx_errors++;
7953 dev_kfree_skb_any(skb);
7956 #endif
7958 static int is_network_packet(struct ipw_priv *priv,
7959 struct ieee80211_hdr_4addr *header)
7961 /* Filter incoming packets to determine if they are targetted toward
7962 * this network, discarding packets coming from ourselves */
7963 switch (priv->ieee->iw_mode) {
7964 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
7965 /* packets from our adapter are dropped (echo) */
7966 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
7967 return 0;
7969 /* {broad,multi}cast packets to our BSSID go through */
7970 if (is_multicast_ether_addr(header->addr1))
7971 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
7973 /* packets to our adapter go through */
7974 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7975 ETH_ALEN);
7977 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
7978 /* packets from our adapter are dropped (echo) */
7979 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
7980 return 0;
7982 /* {broad,multi}cast packets to our BSS go through */
7983 if (is_multicast_ether_addr(header->addr1))
7984 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
7986 /* packets to our adapter go through */
7987 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7988 ETH_ALEN);
7991 return 1;
7994 #define IPW_PACKET_RETRY_TIME HZ
7996 static int is_duplicate_packet(struct ipw_priv *priv,
7997 struct ieee80211_hdr_4addr *header)
7999 u16 sc = le16_to_cpu(header->seq_ctl);
8000 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8001 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8002 u16 *last_seq, *last_frag;
8003 unsigned long *last_time;
8005 switch (priv->ieee->iw_mode) {
8006 case IW_MODE_ADHOC:
8008 struct list_head *p;
8009 struct ipw_ibss_seq *entry = NULL;
8010 u8 *mac = header->addr2;
8011 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8013 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8014 entry =
8015 list_entry(p, struct ipw_ibss_seq, list);
8016 if (!memcmp(entry->mac, mac, ETH_ALEN))
8017 break;
8019 if (p == &priv->ibss_mac_hash[index]) {
8020 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8021 if (!entry) {
8022 IPW_ERROR
8023 ("Cannot malloc new mac entry\n");
8024 return 0;
8026 memcpy(entry->mac, mac, ETH_ALEN);
8027 entry->seq_num = seq;
8028 entry->frag_num = frag;
8029 entry->packet_time = jiffies;
8030 list_add(&entry->list,
8031 &priv->ibss_mac_hash[index]);
8032 return 0;
8034 last_seq = &entry->seq_num;
8035 last_frag = &entry->frag_num;
8036 last_time = &entry->packet_time;
8037 break;
8039 case IW_MODE_INFRA:
8040 last_seq = &priv->last_seq_num;
8041 last_frag = &priv->last_frag_num;
8042 last_time = &priv->last_packet_time;
8043 break;
8044 default:
8045 return 0;
8047 if ((*last_seq == seq) &&
8048 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8049 if (*last_frag == frag)
8050 goto drop;
8051 if (*last_frag + 1 != frag)
8052 /* out-of-order fragment */
8053 goto drop;
8054 } else
8055 *last_seq = seq;
8057 *last_frag = frag;
8058 *last_time = jiffies;
8059 return 0;
8061 drop:
8062 /* Comment this line now since we observed the card receives
8063 * duplicate packets but the FCTL_RETRY bit is not set in the
8064 * IBSS mode with fragmentation enabled.
8065 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
8066 return 1;
8069 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8070 struct ipw_rx_mem_buffer *rxb,
8071 struct ieee80211_rx_stats *stats)
8073 struct sk_buff *skb = rxb->skb;
8074 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8075 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8076 (skb->data + IPW_RX_FRAME_SIZE);
8078 ieee80211_rx_mgt(priv->ieee, header, stats);
8080 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8081 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8082 IEEE80211_STYPE_PROBE_RESP) ||
8083 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8084 IEEE80211_STYPE_BEACON))) {
8085 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8086 ipw_add_station(priv, header->addr2);
8089 if (priv->config & CFG_NET_STATS) {
8090 IPW_DEBUG_HC("sending stat packet\n");
8092 /* Set the size of the skb to the size of the full
8093 * ipw header and 802.11 frame */
8094 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8095 IPW_RX_FRAME_SIZE);
8097 /* Advance past the ipw packet header to the 802.11 frame */
8098 skb_pull(skb, IPW_RX_FRAME_SIZE);
8100 /* Push the ieee80211_rx_stats before the 802.11 frame */
8101 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8103 skb->dev = priv->ieee->dev;
8105 /* Point raw at the ieee80211_stats */
8106 skb->mac.raw = skb->data;
8108 skb->pkt_type = PACKET_OTHERHOST;
8109 skb->protocol = __constant_htons(ETH_P_80211_STATS);
8110 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8111 netif_rx(skb);
8112 rxb->skb = NULL;
8117 * Main entry function for recieving a packet with 80211 headers. This
8118 * should be called when ever the FW has notified us that there is a new
8119 * skb in the recieve queue.
8121 static void ipw_rx(struct ipw_priv *priv)
8123 struct ipw_rx_mem_buffer *rxb;
8124 struct ipw_rx_packet *pkt;
8125 struct ieee80211_hdr_4addr *header;
8126 u32 r, w, i;
8127 u8 network_packet;
8129 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8130 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8131 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
8133 while (i != r) {
8134 rxb = priv->rxq->queue[i];
8135 if (unlikely(rxb == NULL)) {
8136 printk(KERN_CRIT "Queue not allocated!\n");
8137 break;
8139 priv->rxq->queue[i] = NULL;
8141 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8142 IPW_RX_BUF_SIZE,
8143 PCI_DMA_FROMDEVICE);
8145 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8146 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8147 pkt->header.message_type,
8148 pkt->header.rx_seq_num, pkt->header.control_bits);
8150 switch (pkt->header.message_type) {
8151 case RX_FRAME_TYPE: /* 802.11 frame */ {
8152 struct ieee80211_rx_stats stats = {
8153 .rssi = pkt->u.frame.rssi_dbm -
8154 IPW_RSSI_TO_DBM,
8155 .signal =
8156 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8157 IPW_RSSI_TO_DBM + 0x100,
8158 .noise =
8159 le16_to_cpu(pkt->u.frame.noise),
8160 .rate = pkt->u.frame.rate,
8161 .mac_time = jiffies,
8162 .received_channel =
8163 pkt->u.frame.received_channel,
8164 .freq =
8165 (pkt->u.frame.
8166 control & (1 << 0)) ?
8167 IEEE80211_24GHZ_BAND :
8168 IEEE80211_52GHZ_BAND,
8169 .len = le16_to_cpu(pkt->u.frame.length),
8172 if (stats.rssi != 0)
8173 stats.mask |= IEEE80211_STATMASK_RSSI;
8174 if (stats.signal != 0)
8175 stats.mask |= IEEE80211_STATMASK_SIGNAL;
8176 if (stats.noise != 0)
8177 stats.mask |= IEEE80211_STATMASK_NOISE;
8178 if (stats.rate != 0)
8179 stats.mask |= IEEE80211_STATMASK_RATE;
8181 priv->rx_packets++;
8183 #ifdef CONFIG_IPW2200_PROMISCUOUS
8184 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8185 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8186 #endif
8188 #ifdef CONFIG_IPW2200_MONITOR
8189 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8190 #ifdef CONFIG_IPW2200_RADIOTAP
8192 ipw_handle_data_packet_monitor(priv,
8193 rxb,
8194 &stats);
8195 #else
8196 ipw_handle_data_packet(priv, rxb,
8197 &stats);
8198 #endif
8199 break;
8201 #endif
8203 header =
8204 (struct ieee80211_hdr_4addr *)(rxb->skb->
8205 data +
8206 IPW_RX_FRAME_SIZE);
8207 /* TODO: Check Ad-Hoc dest/source and make sure
8208 * that we are actually parsing these packets
8209 * correctly -- we should probably use the
8210 * frame control of the packet and disregard
8211 * the current iw_mode */
8213 network_packet =
8214 is_network_packet(priv, header);
8215 if (network_packet && priv->assoc_network) {
8216 priv->assoc_network->stats.rssi =
8217 stats.rssi;
8218 priv->exp_avg_rssi =
8219 exponential_average(priv->exp_avg_rssi,
8220 stats.rssi, DEPTH_RSSI);
8223 IPW_DEBUG_RX("Frame: len=%u\n",
8224 le16_to_cpu(pkt->u.frame.length));
8226 if (le16_to_cpu(pkt->u.frame.length) <
8227 ieee80211_get_hdrlen(le16_to_cpu(
8228 header->frame_ctl))) {
8229 IPW_DEBUG_DROP
8230 ("Received packet is too small. "
8231 "Dropping.\n");
8232 priv->ieee->stats.rx_errors++;
8233 priv->wstats.discard.misc++;
8234 break;
8237 switch (WLAN_FC_GET_TYPE
8238 (le16_to_cpu(header->frame_ctl))) {
8240 case IEEE80211_FTYPE_MGMT:
8241 ipw_handle_mgmt_packet(priv, rxb,
8242 &stats);
8243 break;
8245 case IEEE80211_FTYPE_CTL:
8246 break;
8248 case IEEE80211_FTYPE_DATA:
8249 if (unlikely(!network_packet ||
8250 is_duplicate_packet(priv,
8251 header)))
8253 IPW_DEBUG_DROP("Dropping: "
8254 MAC_FMT ", "
8255 MAC_FMT ", "
8256 MAC_FMT "\n",
8257 MAC_ARG(header->
8258 addr1),
8259 MAC_ARG(header->
8260 addr2),
8261 MAC_ARG(header->
8262 addr3));
8263 break;
8266 ipw_handle_data_packet(priv, rxb,
8267 &stats);
8269 break;
8271 break;
8274 case RX_HOST_NOTIFICATION_TYPE:{
8275 IPW_DEBUG_RX
8276 ("Notification: subtype=%02X flags=%02X size=%d\n",
8277 pkt->u.notification.subtype,
8278 pkt->u.notification.flags,
8279 pkt->u.notification.size);
8280 ipw_rx_notification(priv, &pkt->u.notification);
8281 break;
8284 default:
8285 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8286 pkt->header.message_type);
8287 break;
8290 /* For now we just don't re-use anything. We can tweak this
8291 * later to try and re-use notification packets and SKBs that
8292 * fail to Rx correctly */
8293 if (rxb->skb != NULL) {
8294 dev_kfree_skb_any(rxb->skb);
8295 rxb->skb = NULL;
8298 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8299 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8300 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8302 i = (i + 1) % RX_QUEUE_SIZE;
8305 /* Backtrack one entry */
8306 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
8308 ipw_rx_queue_restock(priv);
8311 #define DEFAULT_RTS_THRESHOLD 2304U
8312 #define MIN_RTS_THRESHOLD 1U
8313 #define MAX_RTS_THRESHOLD 2304U
8314 #define DEFAULT_BEACON_INTERVAL 100U
8315 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8316 #define DEFAULT_LONG_RETRY_LIMIT 4U
8319 * ipw_sw_reset
8320 * @option: options to control different reset behaviour
8321 * 0 = reset everything except the 'disable' module_param
8322 * 1 = reset everything and print out driver info (for probe only)
8323 * 2 = reset everything
8325 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8327 int band, modulation;
8328 int old_mode = priv->ieee->iw_mode;
8330 /* Initialize module parameter values here */
8331 priv->config = 0;
8333 /* We default to disabling the LED code as right now it causes
8334 * too many systems to lock up... */
8335 if (!led)
8336 priv->config |= CFG_NO_LED;
8338 if (associate)
8339 priv->config |= CFG_ASSOCIATE;
8340 else
8341 IPW_DEBUG_INFO("Auto associate disabled.\n");
8343 if (auto_create)
8344 priv->config |= CFG_ADHOC_CREATE;
8345 else
8346 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8348 priv->config &= ~CFG_STATIC_ESSID;
8349 priv->essid_len = 0;
8350 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8352 if (disable && option) {
8353 priv->status |= STATUS_RF_KILL_SW;
8354 IPW_DEBUG_INFO("Radio disabled.\n");
8357 if (channel != 0) {
8358 priv->config |= CFG_STATIC_CHANNEL;
8359 priv->channel = channel;
8360 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8361 /* TODO: Validate that provided channel is in range */
8363 #ifdef CONFIG_IPW2200_QOS
8364 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8365 burst_duration_CCK, burst_duration_OFDM);
8366 #endif /* CONFIG_IPW2200_QOS */
8368 switch (mode) {
8369 case 1:
8370 priv->ieee->iw_mode = IW_MODE_ADHOC;
8371 priv->net_dev->type = ARPHRD_ETHER;
8373 break;
8374 #ifdef CONFIG_IPW2200_MONITOR
8375 case 2:
8376 priv->ieee->iw_mode = IW_MODE_MONITOR;
8377 #ifdef CONFIG_IPW2200_RADIOTAP
8378 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8379 #else
8380 priv->net_dev->type = ARPHRD_IEEE80211;
8381 #endif
8382 break;
8383 #endif
8384 default:
8385 case 0:
8386 priv->net_dev->type = ARPHRD_ETHER;
8387 priv->ieee->iw_mode = IW_MODE_INFRA;
8388 break;
8391 if (hwcrypto) {
8392 priv->ieee->host_encrypt = 0;
8393 priv->ieee->host_encrypt_msdu = 0;
8394 priv->ieee->host_decrypt = 0;
8395 priv->ieee->host_mc_decrypt = 0;
8397 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8399 /* IPW2200/2915 is abled to do hardware fragmentation. */
8400 priv->ieee->host_open_frag = 0;
8402 if ((priv->pci_dev->device == 0x4223) ||
8403 (priv->pci_dev->device == 0x4224)) {
8404 if (option == 1)
8405 printk(KERN_INFO DRV_NAME
8406 ": Detected Intel PRO/Wireless 2915ABG Network "
8407 "Connection\n");
8408 priv->ieee->abg_true = 1;
8409 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8410 modulation = IEEE80211_OFDM_MODULATION |
8411 IEEE80211_CCK_MODULATION;
8412 priv->adapter = IPW_2915ABG;
8413 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8414 } else {
8415 if (option == 1)
8416 printk(KERN_INFO DRV_NAME
8417 ": Detected Intel PRO/Wireless 2200BG Network "
8418 "Connection\n");
8420 priv->ieee->abg_true = 0;
8421 band = IEEE80211_24GHZ_BAND;
8422 modulation = IEEE80211_OFDM_MODULATION |
8423 IEEE80211_CCK_MODULATION;
8424 priv->adapter = IPW_2200BG;
8425 priv->ieee->mode = IEEE_G | IEEE_B;
8428 priv->ieee->freq_band = band;
8429 priv->ieee->modulation = modulation;
8431 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8433 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8434 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8436 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8437 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8438 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8440 /* If power management is turned on, default to AC mode */
8441 priv->power_mode = IPW_POWER_AC;
8442 priv->tx_power = IPW_TX_POWER_DEFAULT;
8444 return old_mode == priv->ieee->iw_mode;
8448 * This file defines the Wireless Extension handlers. It does not
8449 * define any methods of hardware manipulation and relies on the
8450 * functions defined in ipw_main to provide the HW interaction.
8452 * The exception to this is the use of the ipw_get_ordinal()
8453 * function used to poll the hardware vs. making unecessary calls.
8457 static int ipw_wx_get_name(struct net_device *dev,
8458 struct iw_request_info *info,
8459 union iwreq_data *wrqu, char *extra)
8461 struct ipw_priv *priv = ieee80211_priv(dev);
8462 mutex_lock(&priv->mutex);
8463 if (priv->status & STATUS_RF_KILL_MASK)
8464 strcpy(wrqu->name, "radio off");
8465 else if (!(priv->status & STATUS_ASSOCIATED))
8466 strcpy(wrqu->name, "unassociated");
8467 else
8468 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8469 ipw_modes[priv->assoc_request.ieee_mode]);
8470 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8471 mutex_unlock(&priv->mutex);
8472 return 0;
8475 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8477 if (channel == 0) {
8478 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8479 priv->config &= ~CFG_STATIC_CHANNEL;
8480 IPW_DEBUG_ASSOC("Attempting to associate with new "
8481 "parameters.\n");
8482 ipw_associate(priv);
8483 return 0;
8486 priv->config |= CFG_STATIC_CHANNEL;
8488 if (priv->channel == channel) {
8489 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8490 channel);
8491 return 0;
8494 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8495 priv->channel = channel;
8497 #ifdef CONFIG_IPW2200_MONITOR
8498 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8499 int i;
8500 if (priv->status & STATUS_SCANNING) {
8501 IPW_DEBUG_SCAN("Scan abort triggered due to "
8502 "channel change.\n");
8503 ipw_abort_scan(priv);
8506 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8507 udelay(10);
8509 if (priv->status & STATUS_SCANNING)
8510 IPW_DEBUG_SCAN("Still scanning...\n");
8511 else
8512 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8513 1000 - i);
8515 return 0;
8517 #endif /* CONFIG_IPW2200_MONITOR */
8519 /* Network configuration changed -- force [re]association */
8520 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8521 if (!ipw_disassociate(priv))
8522 ipw_associate(priv);
8524 return 0;
8527 static int ipw_wx_set_freq(struct net_device *dev,
8528 struct iw_request_info *info,
8529 union iwreq_data *wrqu, char *extra)
8531 struct ipw_priv *priv = ieee80211_priv(dev);
8532 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8533 struct iw_freq *fwrq = &wrqu->freq;
8534 int ret = 0, i;
8535 u8 channel, flags;
8536 int band;
8538 if (fwrq->m == 0) {
8539 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8540 mutex_lock(&priv->mutex);
8541 ret = ipw_set_channel(priv, 0);
8542 mutex_unlock(&priv->mutex);
8543 return ret;
8545 /* if setting by freq convert to channel */
8546 if (fwrq->e == 1) {
8547 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8548 if (channel == 0)
8549 return -EINVAL;
8550 } else
8551 channel = fwrq->m;
8553 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8554 return -EINVAL;
8556 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8557 i = ieee80211_channel_to_index(priv->ieee, channel);
8558 if (i == -1)
8559 return -EINVAL;
8561 flags = (band == IEEE80211_24GHZ_BAND) ?
8562 geo->bg[i].flags : geo->a[i].flags;
8563 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8564 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8565 return -EINVAL;
8569 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8570 mutex_lock(&priv->mutex);
8571 ret = ipw_set_channel(priv, channel);
8572 mutex_unlock(&priv->mutex);
8573 return ret;
8576 static int ipw_wx_get_freq(struct net_device *dev,
8577 struct iw_request_info *info,
8578 union iwreq_data *wrqu, char *extra)
8580 struct ipw_priv *priv = ieee80211_priv(dev);
8582 wrqu->freq.e = 0;
8584 /* If we are associated, trying to associate, or have a statically
8585 * configured CHANNEL then return that; otherwise return ANY */
8586 mutex_lock(&priv->mutex);
8587 if (priv->config & CFG_STATIC_CHANNEL ||
8588 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8589 int i;
8591 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
8592 BUG_ON(i == -1);
8593 wrqu->freq.e = 1;
8595 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
8596 case IEEE80211_52GHZ_BAND:
8597 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8598 break;
8600 case IEEE80211_24GHZ_BAND:
8601 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8602 break;
8604 default:
8605 BUG();
8607 } else
8608 wrqu->freq.m = 0;
8610 mutex_unlock(&priv->mutex);
8611 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8612 return 0;
8615 static int ipw_wx_set_mode(struct net_device *dev,
8616 struct iw_request_info *info,
8617 union iwreq_data *wrqu, char *extra)
8619 struct ipw_priv *priv = ieee80211_priv(dev);
8620 int err = 0;
8622 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8624 switch (wrqu->mode) {
8625 #ifdef CONFIG_IPW2200_MONITOR
8626 case IW_MODE_MONITOR:
8627 #endif
8628 case IW_MODE_ADHOC:
8629 case IW_MODE_INFRA:
8630 break;
8631 case IW_MODE_AUTO:
8632 wrqu->mode = IW_MODE_INFRA;
8633 break;
8634 default:
8635 return -EINVAL;
8637 if (wrqu->mode == priv->ieee->iw_mode)
8638 return 0;
8640 mutex_lock(&priv->mutex);
8642 ipw_sw_reset(priv, 0);
8644 #ifdef CONFIG_IPW2200_MONITOR
8645 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8646 priv->net_dev->type = ARPHRD_ETHER;
8648 if (wrqu->mode == IW_MODE_MONITOR)
8649 #ifdef CONFIG_IPW2200_RADIOTAP
8650 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8651 #else
8652 priv->net_dev->type = ARPHRD_IEEE80211;
8653 #endif
8654 #endif /* CONFIG_IPW2200_MONITOR */
8656 /* Free the existing firmware and reset the fw_loaded
8657 * flag so ipw_load() will bring in the new firmawre */
8658 free_firmware();
8660 priv->ieee->iw_mode = wrqu->mode;
8662 queue_work(priv->workqueue, &priv->adapter_restart);
8663 mutex_unlock(&priv->mutex);
8664 return err;
8667 static int ipw_wx_get_mode(struct net_device *dev,
8668 struct iw_request_info *info,
8669 union iwreq_data *wrqu, char *extra)
8671 struct ipw_priv *priv = ieee80211_priv(dev);
8672 mutex_lock(&priv->mutex);
8673 wrqu->mode = priv->ieee->iw_mode;
8674 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8675 mutex_unlock(&priv->mutex);
8676 return 0;
8679 /* Values are in microsecond */
8680 static const s32 timeout_duration[] = {
8681 350000,
8682 250000,
8683 75000,
8684 37000,
8685 25000,
8688 static const s32 period_duration[] = {
8689 400000,
8690 700000,
8691 1000000,
8692 1000000,
8693 1000000
8696 static int ipw_wx_get_range(struct net_device *dev,
8697 struct iw_request_info *info,
8698 union iwreq_data *wrqu, char *extra)
8700 struct ipw_priv *priv = ieee80211_priv(dev);
8701 struct iw_range *range = (struct iw_range *)extra;
8702 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8703 int i = 0, j;
8705 wrqu->data.length = sizeof(*range);
8706 memset(range, 0, sizeof(*range));
8708 /* 54Mbs == ~27 Mb/s real (802.11g) */
8709 range->throughput = 27 * 1000 * 1000;
8711 range->max_qual.qual = 100;
8712 /* TODO: Find real max RSSI and stick here */
8713 range->max_qual.level = 0;
8714 range->max_qual.noise = 0;
8715 range->max_qual.updated = 7; /* Updated all three */
8717 range->avg_qual.qual = 70;
8718 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8719 range->avg_qual.level = 0; /* FIXME to real average level */
8720 range->avg_qual.noise = 0;
8721 range->avg_qual.updated = 7; /* Updated all three */
8722 mutex_lock(&priv->mutex);
8723 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8725 for (i = 0; i < range->num_bitrates; i++)
8726 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8727 500000;
8729 range->max_rts = DEFAULT_RTS_THRESHOLD;
8730 range->min_frag = MIN_FRAG_THRESHOLD;
8731 range->max_frag = MAX_FRAG_THRESHOLD;
8733 range->encoding_size[0] = 5;
8734 range->encoding_size[1] = 13;
8735 range->num_encoding_sizes = 2;
8736 range->max_encoding_tokens = WEP_KEYS;
8738 /* Set the Wireless Extension versions */
8739 range->we_version_compiled = WIRELESS_EXT;
8740 range->we_version_source = 18;
8742 i = 0;
8743 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8744 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8745 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8746 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8747 continue;
8749 range->freq[i].i = geo->bg[j].channel;
8750 range->freq[i].m = geo->bg[j].freq * 100000;
8751 range->freq[i].e = 1;
8752 i++;
8756 if (priv->ieee->mode & IEEE_A) {
8757 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8758 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8759 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8760 continue;
8762 range->freq[i].i = geo->a[j].channel;
8763 range->freq[i].m = geo->a[j].freq * 100000;
8764 range->freq[i].e = 1;
8765 i++;
8769 range->num_channels = i;
8770 range->num_frequency = i;
8772 mutex_unlock(&priv->mutex);
8774 /* Event capability (kernel + driver) */
8775 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8776 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8777 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8778 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8779 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8781 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8782 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8784 IPW_DEBUG_WX("GET Range\n");
8785 return 0;
8788 static int ipw_wx_set_wap(struct net_device *dev,
8789 struct iw_request_info *info,
8790 union iwreq_data *wrqu, char *extra)
8792 struct ipw_priv *priv = ieee80211_priv(dev);
8794 static const unsigned char any[] = {
8795 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8797 static const unsigned char off[] = {
8798 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8801 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8802 return -EINVAL;
8803 mutex_lock(&priv->mutex);
8804 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8805 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8806 /* we disable mandatory BSSID association */
8807 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8808 priv->config &= ~CFG_STATIC_BSSID;
8809 IPW_DEBUG_ASSOC("Attempting to associate with new "
8810 "parameters.\n");
8811 ipw_associate(priv);
8812 mutex_unlock(&priv->mutex);
8813 return 0;
8816 priv->config |= CFG_STATIC_BSSID;
8817 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8818 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8819 mutex_unlock(&priv->mutex);
8820 return 0;
8823 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
8824 MAC_ARG(wrqu->ap_addr.sa_data));
8826 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8828 /* Network configuration changed -- force [re]association */
8829 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8830 if (!ipw_disassociate(priv))
8831 ipw_associate(priv);
8833 mutex_unlock(&priv->mutex);
8834 return 0;
8837 static int ipw_wx_get_wap(struct net_device *dev,
8838 struct iw_request_info *info,
8839 union iwreq_data *wrqu, char *extra)
8841 struct ipw_priv *priv = ieee80211_priv(dev);
8842 /* If we are associated, trying to associate, or have a statically
8843 * configured BSSID then return that; otherwise return ANY */
8844 mutex_lock(&priv->mutex);
8845 if (priv->config & CFG_STATIC_BSSID ||
8846 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8847 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8848 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8849 } else
8850 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8852 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8853 MAC_ARG(wrqu->ap_addr.sa_data));
8854 mutex_unlock(&priv->mutex);
8855 return 0;
8858 static int ipw_wx_set_essid(struct net_device *dev,
8859 struct iw_request_info *info,
8860 union iwreq_data *wrqu, char *extra)
8862 struct ipw_priv *priv = ieee80211_priv(dev);
8863 int length;
8865 mutex_lock(&priv->mutex);
8867 if (!wrqu->essid.flags)
8869 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8870 ipw_disassociate(priv);
8871 priv->config &= ~CFG_STATIC_ESSID;
8872 ipw_associate(priv);
8873 mutex_unlock(&priv->mutex);
8874 return 0;
8877 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
8879 priv->config |= CFG_STATIC_ESSID;
8881 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
8882 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
8883 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8884 mutex_unlock(&priv->mutex);
8885 return 0;
8888 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(extra, length),
8889 length);
8891 priv->essid_len = length;
8892 memcpy(priv->essid, extra, priv->essid_len);
8894 /* Network configuration changed -- force [re]association */
8895 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8896 if (!ipw_disassociate(priv))
8897 ipw_associate(priv);
8899 mutex_unlock(&priv->mutex);
8900 return 0;
8903 static int ipw_wx_get_essid(struct net_device *dev,
8904 struct iw_request_info *info,
8905 union iwreq_data *wrqu, char *extra)
8907 struct ipw_priv *priv = ieee80211_priv(dev);
8909 /* If we are associated, trying to associate, or have a statically
8910 * configured ESSID then return that; otherwise return ANY */
8911 mutex_lock(&priv->mutex);
8912 if (priv->config & CFG_STATIC_ESSID ||
8913 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8914 IPW_DEBUG_WX("Getting essid: '%s'\n",
8915 escape_essid(priv->essid, priv->essid_len));
8916 memcpy(extra, priv->essid, priv->essid_len);
8917 wrqu->essid.length = priv->essid_len;
8918 wrqu->essid.flags = 1; /* active */
8919 } else {
8920 IPW_DEBUG_WX("Getting essid: ANY\n");
8921 wrqu->essid.length = 0;
8922 wrqu->essid.flags = 0; /* active */
8924 mutex_unlock(&priv->mutex);
8925 return 0;
8928 static int ipw_wx_set_nick(struct net_device *dev,
8929 struct iw_request_info *info,
8930 union iwreq_data *wrqu, char *extra)
8932 struct ipw_priv *priv = ieee80211_priv(dev);
8934 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
8935 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
8936 return -E2BIG;
8937 mutex_lock(&priv->mutex);
8938 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
8939 memset(priv->nick, 0, sizeof(priv->nick));
8940 memcpy(priv->nick, extra, wrqu->data.length);
8941 IPW_DEBUG_TRACE("<<\n");
8942 mutex_unlock(&priv->mutex);
8943 return 0;
8947 static int ipw_wx_get_nick(struct net_device *dev,
8948 struct iw_request_info *info,
8949 union iwreq_data *wrqu, char *extra)
8951 struct ipw_priv *priv = ieee80211_priv(dev);
8952 IPW_DEBUG_WX("Getting nick\n");
8953 mutex_lock(&priv->mutex);
8954 wrqu->data.length = strlen(priv->nick);
8955 memcpy(extra, priv->nick, wrqu->data.length);
8956 wrqu->data.flags = 1; /* active */
8957 mutex_unlock(&priv->mutex);
8958 return 0;
8961 static int ipw_wx_set_sens(struct net_device *dev,
8962 struct iw_request_info *info,
8963 union iwreq_data *wrqu, char *extra)
8965 struct ipw_priv *priv = ieee80211_priv(dev);
8966 int err = 0;
8968 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
8969 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
8970 mutex_lock(&priv->mutex);
8972 if (wrqu->sens.fixed == 0)
8974 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8975 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8976 goto out;
8978 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
8979 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
8980 err = -EINVAL;
8981 goto out;
8984 priv->roaming_threshold = wrqu->sens.value;
8985 priv->disassociate_threshold = 3*wrqu->sens.value;
8986 out:
8987 mutex_unlock(&priv->mutex);
8988 return err;
8991 static int ipw_wx_get_sens(struct net_device *dev,
8992 struct iw_request_info *info,
8993 union iwreq_data *wrqu, char *extra)
8995 struct ipw_priv *priv = ieee80211_priv(dev);
8996 mutex_lock(&priv->mutex);
8997 wrqu->sens.fixed = 1;
8998 wrqu->sens.value = priv->roaming_threshold;
8999 mutex_unlock(&priv->mutex);
9001 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9002 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9004 return 0;
9007 static int ipw_wx_set_rate(struct net_device *dev,
9008 struct iw_request_info *info,
9009 union iwreq_data *wrqu, char *extra)
9011 /* TODO: We should use semaphores or locks for access to priv */
9012 struct ipw_priv *priv = ieee80211_priv(dev);
9013 u32 target_rate = wrqu->bitrate.value;
9014 u32 fixed, mask;
9016 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9017 /* value = X, fixed = 1 means only rate X */
9018 /* value = X, fixed = 0 means all rates lower equal X */
9020 if (target_rate == -1) {
9021 fixed = 0;
9022 mask = IEEE80211_DEFAULT_RATES_MASK;
9023 /* Now we should reassociate */
9024 goto apply;
9027 mask = 0;
9028 fixed = wrqu->bitrate.fixed;
9030 if (target_rate == 1000000 || !fixed)
9031 mask |= IEEE80211_CCK_RATE_1MB_MASK;
9032 if (target_rate == 1000000)
9033 goto apply;
9035 if (target_rate == 2000000 || !fixed)
9036 mask |= IEEE80211_CCK_RATE_2MB_MASK;
9037 if (target_rate == 2000000)
9038 goto apply;
9040 if (target_rate == 5500000 || !fixed)
9041 mask |= IEEE80211_CCK_RATE_5MB_MASK;
9042 if (target_rate == 5500000)
9043 goto apply;
9045 if (target_rate == 6000000 || !fixed)
9046 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9047 if (target_rate == 6000000)
9048 goto apply;
9050 if (target_rate == 9000000 || !fixed)
9051 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9052 if (target_rate == 9000000)
9053 goto apply;
9055 if (target_rate == 11000000 || !fixed)
9056 mask |= IEEE80211_CCK_RATE_11MB_MASK;
9057 if (target_rate == 11000000)
9058 goto apply;
9060 if (target_rate == 12000000 || !fixed)
9061 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9062 if (target_rate == 12000000)
9063 goto apply;
9065 if (target_rate == 18000000 || !fixed)
9066 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9067 if (target_rate == 18000000)
9068 goto apply;
9070 if (target_rate == 24000000 || !fixed)
9071 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9072 if (target_rate == 24000000)
9073 goto apply;
9075 if (target_rate == 36000000 || !fixed)
9076 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9077 if (target_rate == 36000000)
9078 goto apply;
9080 if (target_rate == 48000000 || !fixed)
9081 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9082 if (target_rate == 48000000)
9083 goto apply;
9085 if (target_rate == 54000000 || !fixed)
9086 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9087 if (target_rate == 54000000)
9088 goto apply;
9090 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9091 return -EINVAL;
9093 apply:
9094 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9095 mask, fixed ? "fixed" : "sub-rates");
9096 mutex_lock(&priv->mutex);
9097 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9098 priv->config &= ~CFG_FIXED_RATE;
9099 ipw_set_fixed_rate(priv, priv->ieee->mode);
9100 } else
9101 priv->config |= CFG_FIXED_RATE;
9103 if (priv->rates_mask == mask) {
9104 IPW_DEBUG_WX("Mask set to current mask.\n");
9105 mutex_unlock(&priv->mutex);
9106 return 0;
9109 priv->rates_mask = mask;
9111 /* Network configuration changed -- force [re]association */
9112 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9113 if (!ipw_disassociate(priv))
9114 ipw_associate(priv);
9116 mutex_unlock(&priv->mutex);
9117 return 0;
9120 static int ipw_wx_get_rate(struct net_device *dev,
9121 struct iw_request_info *info,
9122 union iwreq_data *wrqu, char *extra)
9124 struct ipw_priv *priv = ieee80211_priv(dev);
9125 mutex_lock(&priv->mutex);
9126 wrqu->bitrate.value = priv->last_rate;
9127 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9128 mutex_unlock(&priv->mutex);
9129 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9130 return 0;
9133 static int ipw_wx_set_rts(struct net_device *dev,
9134 struct iw_request_info *info,
9135 union iwreq_data *wrqu, char *extra)
9137 struct ipw_priv *priv = ieee80211_priv(dev);
9138 mutex_lock(&priv->mutex);
9139 if (wrqu->rts.disabled)
9140 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9141 else {
9142 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9143 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9144 mutex_unlock(&priv->mutex);
9145 return -EINVAL;
9147 priv->rts_threshold = wrqu->rts.value;
9150 ipw_send_rts_threshold(priv, priv->rts_threshold);
9151 mutex_unlock(&priv->mutex);
9152 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9153 return 0;
9156 static int ipw_wx_get_rts(struct net_device *dev,
9157 struct iw_request_info *info,
9158 union iwreq_data *wrqu, char *extra)
9160 struct ipw_priv *priv = ieee80211_priv(dev);
9161 mutex_lock(&priv->mutex);
9162 wrqu->rts.value = priv->rts_threshold;
9163 wrqu->rts.fixed = 0; /* no auto select */
9164 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9165 mutex_unlock(&priv->mutex);
9166 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9167 return 0;
9170 static int ipw_wx_set_txpow(struct net_device *dev,
9171 struct iw_request_info *info,
9172 union iwreq_data *wrqu, char *extra)
9174 struct ipw_priv *priv = ieee80211_priv(dev);
9175 int err = 0;
9177 mutex_lock(&priv->mutex);
9178 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9179 err = -EINPROGRESS;
9180 goto out;
9183 if (!wrqu->power.fixed)
9184 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9186 if (wrqu->power.flags != IW_TXPOW_DBM) {
9187 err = -EINVAL;
9188 goto out;
9191 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9192 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9193 err = -EINVAL;
9194 goto out;
9197 priv->tx_power = wrqu->power.value;
9198 err = ipw_set_tx_power(priv);
9199 out:
9200 mutex_unlock(&priv->mutex);
9201 return err;
9204 static int ipw_wx_get_txpow(struct net_device *dev,
9205 struct iw_request_info *info,
9206 union iwreq_data *wrqu, char *extra)
9208 struct ipw_priv *priv = ieee80211_priv(dev);
9209 mutex_lock(&priv->mutex);
9210 wrqu->power.value = priv->tx_power;
9211 wrqu->power.fixed = 1;
9212 wrqu->power.flags = IW_TXPOW_DBM;
9213 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9214 mutex_unlock(&priv->mutex);
9216 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9217 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9219 return 0;
9222 static int ipw_wx_set_frag(struct net_device *dev,
9223 struct iw_request_info *info,
9224 union iwreq_data *wrqu, char *extra)
9226 struct ipw_priv *priv = ieee80211_priv(dev);
9227 mutex_lock(&priv->mutex);
9228 if (wrqu->frag.disabled)
9229 priv->ieee->fts = DEFAULT_FTS;
9230 else {
9231 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9232 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9233 mutex_unlock(&priv->mutex);
9234 return -EINVAL;
9237 priv->ieee->fts = wrqu->frag.value & ~0x1;
9240 ipw_send_frag_threshold(priv, wrqu->frag.value);
9241 mutex_unlock(&priv->mutex);
9242 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9243 return 0;
9246 static int ipw_wx_get_frag(struct net_device *dev,
9247 struct iw_request_info *info,
9248 union iwreq_data *wrqu, char *extra)
9250 struct ipw_priv *priv = ieee80211_priv(dev);
9251 mutex_lock(&priv->mutex);
9252 wrqu->frag.value = priv->ieee->fts;
9253 wrqu->frag.fixed = 0; /* no auto select */
9254 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9255 mutex_unlock(&priv->mutex);
9256 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9258 return 0;
9261 static int ipw_wx_set_retry(struct net_device *dev,
9262 struct iw_request_info *info,
9263 union iwreq_data *wrqu, char *extra)
9265 struct ipw_priv *priv = ieee80211_priv(dev);
9267 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9268 return -EINVAL;
9270 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9271 return 0;
9273 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9274 return -EINVAL;
9276 mutex_lock(&priv->mutex);
9277 if (wrqu->retry.flags & IW_RETRY_SHORT)
9278 priv->short_retry_limit = (u8) wrqu->retry.value;
9279 else if (wrqu->retry.flags & IW_RETRY_LONG)
9280 priv->long_retry_limit = (u8) wrqu->retry.value;
9281 else {
9282 priv->short_retry_limit = (u8) wrqu->retry.value;
9283 priv->long_retry_limit = (u8) wrqu->retry.value;
9286 ipw_send_retry_limit(priv, priv->short_retry_limit,
9287 priv->long_retry_limit);
9288 mutex_unlock(&priv->mutex);
9289 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9290 priv->short_retry_limit, priv->long_retry_limit);
9291 return 0;
9294 static int ipw_wx_get_retry(struct net_device *dev,
9295 struct iw_request_info *info,
9296 union iwreq_data *wrqu, char *extra)
9298 struct ipw_priv *priv = ieee80211_priv(dev);
9300 mutex_lock(&priv->mutex);
9301 wrqu->retry.disabled = 0;
9303 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9304 mutex_unlock(&priv->mutex);
9305 return -EINVAL;
9308 if (wrqu->retry.flags & IW_RETRY_LONG) {
9309 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9310 wrqu->retry.value = priv->long_retry_limit;
9311 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9312 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9313 wrqu->retry.value = priv->short_retry_limit;
9314 } else {
9315 wrqu->retry.flags = IW_RETRY_LIMIT;
9316 wrqu->retry.value = priv->short_retry_limit;
9318 mutex_unlock(&priv->mutex);
9320 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9322 return 0;
9325 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
9326 int essid_len)
9328 struct ipw_scan_request_ext scan;
9329 int err = 0, scan_type;
9331 if (!(priv->status & STATUS_INIT) ||
9332 (priv->status & STATUS_EXIT_PENDING))
9333 return 0;
9335 mutex_lock(&priv->mutex);
9337 if (priv->status & STATUS_RF_KILL_MASK) {
9338 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
9339 priv->status |= STATUS_SCAN_PENDING;
9340 goto done;
9343 IPW_DEBUG_HC("starting request direct scan!\n");
9345 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
9346 /* We should not sleep here; otherwise we will block most
9347 * of the system (for instance, we hold rtnl_lock when we
9348 * get here).
9350 err = -EAGAIN;
9351 goto done;
9353 memset(&scan, 0, sizeof(scan));
9355 if (priv->config & CFG_SPEED_SCAN)
9356 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9357 cpu_to_le16(30);
9358 else
9359 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9360 cpu_to_le16(20);
9362 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
9363 cpu_to_le16(20);
9364 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
9365 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
9367 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
9369 err = ipw_send_ssid(priv, essid, essid_len);
9370 if (err) {
9371 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
9372 goto done;
9374 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
9376 ipw_add_scan_channels(priv, &scan, scan_type);
9378 err = ipw_send_scan_request_ext(priv, &scan);
9379 if (err) {
9380 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
9381 goto done;
9384 priv->status |= STATUS_SCANNING;
9386 done:
9387 mutex_unlock(&priv->mutex);
9388 return err;
9391 static int ipw_wx_set_scan(struct net_device *dev,
9392 struct iw_request_info *info,
9393 union iwreq_data *wrqu, char *extra)
9395 struct ipw_priv *priv = ieee80211_priv(dev);
9396 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9398 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9399 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9400 ipw_request_direct_scan(priv, req->essid,
9401 req->essid_len);
9402 return 0;
9404 if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9405 queue_work(priv->workqueue,
9406 &priv->request_passive_scan);
9407 return 0;
9411 IPW_DEBUG_WX("Start scan\n");
9413 queue_work(priv->workqueue, &priv->request_scan);
9415 return 0;
9418 static int ipw_wx_get_scan(struct net_device *dev,
9419 struct iw_request_info *info,
9420 union iwreq_data *wrqu, char *extra)
9422 struct ipw_priv *priv = ieee80211_priv(dev);
9423 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9426 static int ipw_wx_set_encode(struct net_device *dev,
9427 struct iw_request_info *info,
9428 union iwreq_data *wrqu, char *key)
9430 struct ipw_priv *priv = ieee80211_priv(dev);
9431 int ret;
9432 u32 cap = priv->capability;
9434 mutex_lock(&priv->mutex);
9435 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9437 /* In IBSS mode, we need to notify the firmware to update
9438 * the beacon info after we changed the capability. */
9439 if (cap != priv->capability &&
9440 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9441 priv->status & STATUS_ASSOCIATED)
9442 ipw_disassociate(priv);
9444 mutex_unlock(&priv->mutex);
9445 return ret;
9448 static int ipw_wx_get_encode(struct net_device *dev,
9449 struct iw_request_info *info,
9450 union iwreq_data *wrqu, char *key)
9452 struct ipw_priv *priv = ieee80211_priv(dev);
9453 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9456 static int ipw_wx_set_power(struct net_device *dev,
9457 struct iw_request_info *info,
9458 union iwreq_data *wrqu, char *extra)
9460 struct ipw_priv *priv = ieee80211_priv(dev);
9461 int err;
9462 mutex_lock(&priv->mutex);
9463 if (wrqu->power.disabled) {
9464 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9465 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9466 if (err) {
9467 IPW_DEBUG_WX("failed setting power mode.\n");
9468 mutex_unlock(&priv->mutex);
9469 return err;
9471 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9472 mutex_unlock(&priv->mutex);
9473 return 0;
9476 switch (wrqu->power.flags & IW_POWER_MODE) {
9477 case IW_POWER_ON: /* If not specified */
9478 case IW_POWER_MODE: /* If set all mask */
9479 case IW_POWER_ALL_R: /* If explicitely state all */
9480 break;
9481 default: /* Otherwise we don't support it */
9482 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9483 wrqu->power.flags);
9484 mutex_unlock(&priv->mutex);
9485 return -EOPNOTSUPP;
9488 /* If the user hasn't specified a power management mode yet, default
9489 * to BATTERY */
9490 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9491 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9492 else
9493 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9494 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9495 if (err) {
9496 IPW_DEBUG_WX("failed setting power mode.\n");
9497 mutex_unlock(&priv->mutex);
9498 return err;
9501 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9502 mutex_unlock(&priv->mutex);
9503 return 0;
9506 static int ipw_wx_get_power(struct net_device *dev,
9507 struct iw_request_info *info,
9508 union iwreq_data *wrqu, char *extra)
9510 struct ipw_priv *priv = ieee80211_priv(dev);
9511 mutex_lock(&priv->mutex);
9512 if (!(priv->power_mode & IPW_POWER_ENABLED))
9513 wrqu->power.disabled = 1;
9514 else
9515 wrqu->power.disabled = 0;
9517 mutex_unlock(&priv->mutex);
9518 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9520 return 0;
9523 static int ipw_wx_set_powermode(struct net_device *dev,
9524 struct iw_request_info *info,
9525 union iwreq_data *wrqu, char *extra)
9527 struct ipw_priv *priv = ieee80211_priv(dev);
9528 int mode = *(int *)extra;
9529 int err;
9530 mutex_lock(&priv->mutex);
9531 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
9532 mode = IPW_POWER_AC;
9533 priv->power_mode = mode;
9534 } else {
9535 priv->power_mode = IPW_POWER_ENABLED | mode;
9538 if (priv->power_mode != mode) {
9539 err = ipw_send_power_mode(priv, mode);
9541 if (err) {
9542 IPW_DEBUG_WX("failed setting power mode.\n");
9543 mutex_unlock(&priv->mutex);
9544 return err;
9547 mutex_unlock(&priv->mutex);
9548 return 0;
9551 #define MAX_WX_STRING 80
9552 static int ipw_wx_get_powermode(struct net_device *dev,
9553 struct iw_request_info *info,
9554 union iwreq_data *wrqu, char *extra)
9556 struct ipw_priv *priv = ieee80211_priv(dev);
9557 int level = IPW_POWER_LEVEL(priv->power_mode);
9558 char *p = extra;
9560 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9562 switch (level) {
9563 case IPW_POWER_AC:
9564 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9565 break;
9566 case IPW_POWER_BATTERY:
9567 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9568 break;
9569 default:
9570 p += snprintf(p, MAX_WX_STRING - (p - extra),
9571 "(Timeout %dms, Period %dms)",
9572 timeout_duration[level - 1] / 1000,
9573 period_duration[level - 1] / 1000);
9576 if (!(priv->power_mode & IPW_POWER_ENABLED))
9577 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9579 wrqu->data.length = p - extra + 1;
9581 return 0;
9584 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9585 struct iw_request_info *info,
9586 union iwreq_data *wrqu, char *extra)
9588 struct ipw_priv *priv = ieee80211_priv(dev);
9589 int mode = *(int *)extra;
9590 u8 band = 0, modulation = 0;
9592 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9593 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9594 return -EINVAL;
9596 mutex_lock(&priv->mutex);
9597 if (priv->adapter == IPW_2915ABG) {
9598 priv->ieee->abg_true = 1;
9599 if (mode & IEEE_A) {
9600 band |= IEEE80211_52GHZ_BAND;
9601 modulation |= IEEE80211_OFDM_MODULATION;
9602 } else
9603 priv->ieee->abg_true = 0;
9604 } else {
9605 if (mode & IEEE_A) {
9606 IPW_WARNING("Attempt to set 2200BG into "
9607 "802.11a mode\n");
9608 mutex_unlock(&priv->mutex);
9609 return -EINVAL;
9612 priv->ieee->abg_true = 0;
9615 if (mode & IEEE_B) {
9616 band |= IEEE80211_24GHZ_BAND;
9617 modulation |= IEEE80211_CCK_MODULATION;
9618 } else
9619 priv->ieee->abg_true = 0;
9621 if (mode & IEEE_G) {
9622 band |= IEEE80211_24GHZ_BAND;
9623 modulation |= IEEE80211_OFDM_MODULATION;
9624 } else
9625 priv->ieee->abg_true = 0;
9627 priv->ieee->mode = mode;
9628 priv->ieee->freq_band = band;
9629 priv->ieee->modulation = modulation;
9630 init_supported_rates(priv, &priv->rates);
9632 /* Network configuration changed -- force [re]association */
9633 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9634 if (!ipw_disassociate(priv)) {
9635 ipw_send_supported_rates(priv, &priv->rates);
9636 ipw_associate(priv);
9639 /* Update the band LEDs */
9640 ipw_led_band_on(priv);
9642 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9643 mode & IEEE_A ? 'a' : '.',
9644 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9645 mutex_unlock(&priv->mutex);
9646 return 0;
9649 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9650 struct iw_request_info *info,
9651 union iwreq_data *wrqu, char *extra)
9653 struct ipw_priv *priv = ieee80211_priv(dev);
9654 mutex_lock(&priv->mutex);
9655 switch (priv->ieee->mode) {
9656 case IEEE_A:
9657 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9658 break;
9659 case IEEE_B:
9660 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9661 break;
9662 case IEEE_A | IEEE_B:
9663 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9664 break;
9665 case IEEE_G:
9666 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9667 break;
9668 case IEEE_A | IEEE_G:
9669 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9670 break;
9671 case IEEE_B | IEEE_G:
9672 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9673 break;
9674 case IEEE_A | IEEE_B | IEEE_G:
9675 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9676 break;
9677 default:
9678 strncpy(extra, "unknown", MAX_WX_STRING);
9679 break;
9682 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9684 wrqu->data.length = strlen(extra) + 1;
9685 mutex_unlock(&priv->mutex);
9687 return 0;
9690 static int ipw_wx_set_preamble(struct net_device *dev,
9691 struct iw_request_info *info,
9692 union iwreq_data *wrqu, char *extra)
9694 struct ipw_priv *priv = ieee80211_priv(dev);
9695 int mode = *(int *)extra;
9696 mutex_lock(&priv->mutex);
9697 /* Switching from SHORT -> LONG requires a disassociation */
9698 if (mode == 1) {
9699 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9700 priv->config |= CFG_PREAMBLE_LONG;
9702 /* Network configuration changed -- force [re]association */
9703 IPW_DEBUG_ASSOC
9704 ("[re]association triggered due to preamble change.\n");
9705 if (!ipw_disassociate(priv))
9706 ipw_associate(priv);
9708 goto done;
9711 if (mode == 0) {
9712 priv->config &= ~CFG_PREAMBLE_LONG;
9713 goto done;
9715 mutex_unlock(&priv->mutex);
9716 return -EINVAL;
9718 done:
9719 mutex_unlock(&priv->mutex);
9720 return 0;
9723 static int ipw_wx_get_preamble(struct net_device *dev,
9724 struct iw_request_info *info,
9725 union iwreq_data *wrqu, char *extra)
9727 struct ipw_priv *priv = ieee80211_priv(dev);
9728 mutex_lock(&priv->mutex);
9729 if (priv->config & CFG_PREAMBLE_LONG)
9730 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9731 else
9732 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9733 mutex_unlock(&priv->mutex);
9734 return 0;
9737 #ifdef CONFIG_IPW2200_MONITOR
9738 static int ipw_wx_set_monitor(struct net_device *dev,
9739 struct iw_request_info *info,
9740 union iwreq_data *wrqu, char *extra)
9742 struct ipw_priv *priv = ieee80211_priv(dev);
9743 int *parms = (int *)extra;
9744 int enable = (parms[0] > 0);
9745 mutex_lock(&priv->mutex);
9746 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9747 if (enable) {
9748 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9749 #ifdef CONFIG_IPW2200_RADIOTAP
9750 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9751 #else
9752 priv->net_dev->type = ARPHRD_IEEE80211;
9753 #endif
9754 queue_work(priv->workqueue, &priv->adapter_restart);
9757 ipw_set_channel(priv, parms[1]);
9758 } else {
9759 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9760 mutex_unlock(&priv->mutex);
9761 return 0;
9763 priv->net_dev->type = ARPHRD_ETHER;
9764 queue_work(priv->workqueue, &priv->adapter_restart);
9766 mutex_unlock(&priv->mutex);
9767 return 0;
9770 #endif /* CONFIG_IPW2200_MONITOR */
9772 static int ipw_wx_reset(struct net_device *dev,
9773 struct iw_request_info *info,
9774 union iwreq_data *wrqu, char *extra)
9776 struct ipw_priv *priv = ieee80211_priv(dev);
9777 IPW_DEBUG_WX("RESET\n");
9778 queue_work(priv->workqueue, &priv->adapter_restart);
9779 return 0;
9782 static int ipw_wx_sw_reset(struct net_device *dev,
9783 struct iw_request_info *info,
9784 union iwreq_data *wrqu, char *extra)
9786 struct ipw_priv *priv = ieee80211_priv(dev);
9787 union iwreq_data wrqu_sec = {
9788 .encoding = {
9789 .flags = IW_ENCODE_DISABLED,
9792 int ret;
9794 IPW_DEBUG_WX("SW_RESET\n");
9796 mutex_lock(&priv->mutex);
9798 ret = ipw_sw_reset(priv, 2);
9799 if (!ret) {
9800 free_firmware();
9801 ipw_adapter_restart(priv);
9804 /* The SW reset bit might have been toggled on by the 'disable'
9805 * module parameter, so take appropriate action */
9806 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9808 mutex_unlock(&priv->mutex);
9809 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9810 mutex_lock(&priv->mutex);
9812 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9813 /* Configuration likely changed -- force [re]association */
9814 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9815 "reset.\n");
9816 if (!ipw_disassociate(priv))
9817 ipw_associate(priv);
9820 mutex_unlock(&priv->mutex);
9822 return 0;
9825 /* Rebase the WE IOCTLs to zero for the handler array */
9826 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9827 static iw_handler ipw_wx_handlers[] = {
9828 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9829 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9830 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9831 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9832 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9833 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9834 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9835 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9836 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9837 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9838 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9839 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9840 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9841 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9842 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9843 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9844 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9845 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9846 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9847 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9848 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9849 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9850 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9851 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9852 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9853 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9854 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9855 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9856 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9857 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9858 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9859 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9860 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9861 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9862 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9863 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9864 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9865 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9866 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9867 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9868 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9871 enum {
9872 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9873 IPW_PRIV_GET_POWER,
9874 IPW_PRIV_SET_MODE,
9875 IPW_PRIV_GET_MODE,
9876 IPW_PRIV_SET_PREAMBLE,
9877 IPW_PRIV_GET_PREAMBLE,
9878 IPW_PRIV_RESET,
9879 IPW_PRIV_SW_RESET,
9880 #ifdef CONFIG_IPW2200_MONITOR
9881 IPW_PRIV_SET_MONITOR,
9882 #endif
9885 static struct iw_priv_args ipw_priv_args[] = {
9887 .cmd = IPW_PRIV_SET_POWER,
9888 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9889 .name = "set_power"},
9891 .cmd = IPW_PRIV_GET_POWER,
9892 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9893 .name = "get_power"},
9895 .cmd = IPW_PRIV_SET_MODE,
9896 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9897 .name = "set_mode"},
9899 .cmd = IPW_PRIV_GET_MODE,
9900 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9901 .name = "get_mode"},
9903 .cmd = IPW_PRIV_SET_PREAMBLE,
9904 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9905 .name = "set_preamble"},
9907 .cmd = IPW_PRIV_GET_PREAMBLE,
9908 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9909 .name = "get_preamble"},
9911 IPW_PRIV_RESET,
9912 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9914 IPW_PRIV_SW_RESET,
9915 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9916 #ifdef CONFIG_IPW2200_MONITOR
9918 IPW_PRIV_SET_MONITOR,
9919 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9920 #endif /* CONFIG_IPW2200_MONITOR */
9923 static iw_handler ipw_priv_handler[] = {
9924 ipw_wx_set_powermode,
9925 ipw_wx_get_powermode,
9926 ipw_wx_set_wireless_mode,
9927 ipw_wx_get_wireless_mode,
9928 ipw_wx_set_preamble,
9929 ipw_wx_get_preamble,
9930 ipw_wx_reset,
9931 ipw_wx_sw_reset,
9932 #ifdef CONFIG_IPW2200_MONITOR
9933 ipw_wx_set_monitor,
9934 #endif
9937 static struct iw_handler_def ipw_wx_handler_def = {
9938 .standard = ipw_wx_handlers,
9939 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
9940 .num_private = ARRAY_SIZE(ipw_priv_handler),
9941 .num_private_args = ARRAY_SIZE(ipw_priv_args),
9942 .private = ipw_priv_handler,
9943 .private_args = ipw_priv_args,
9944 .get_wireless_stats = ipw_get_wireless_stats,
9948 * Get wireless statistics.
9949 * Called by /proc/net/wireless
9950 * Also called by SIOCGIWSTATS
9952 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9954 struct ipw_priv *priv = ieee80211_priv(dev);
9955 struct iw_statistics *wstats;
9957 wstats = &priv->wstats;
9959 /* if hw is disabled, then ipw_get_ordinal() can't be called.
9960 * netdev->get_wireless_stats seems to be called before fw is
9961 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
9962 * and associated; if not associcated, the values are all meaningless
9963 * anyway, so set them all to NULL and INVALID */
9964 if (!(priv->status & STATUS_ASSOCIATED)) {
9965 wstats->miss.beacon = 0;
9966 wstats->discard.retries = 0;
9967 wstats->qual.qual = 0;
9968 wstats->qual.level = 0;
9969 wstats->qual.noise = 0;
9970 wstats->qual.updated = 7;
9971 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
9972 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
9973 return wstats;
9976 wstats->qual.qual = priv->quality;
9977 wstats->qual.level = priv->exp_avg_rssi;
9978 wstats->qual.noise = priv->exp_avg_noise;
9979 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
9980 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
9982 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
9983 wstats->discard.retries = priv->last_tx_failures;
9984 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
9986 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
9987 goto fail_get_ordinal;
9988 wstats->discard.retries += tx_retry; */
9990 return wstats;
9993 /* net device stuff */
9995 static void init_sys_config(struct ipw_sys_config *sys_config)
9997 memset(sys_config, 0, sizeof(struct ipw_sys_config));
9998 sys_config->bt_coexistence = 0;
9999 sys_config->answer_broadcast_ssid_probe = 0;
10000 sys_config->accept_all_data_frames = 0;
10001 sys_config->accept_non_directed_frames = 1;
10002 sys_config->exclude_unicast_unencrypted = 0;
10003 sys_config->disable_unicast_decryption = 1;
10004 sys_config->exclude_multicast_unencrypted = 0;
10005 sys_config->disable_multicast_decryption = 1;
10006 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10007 antenna = CFG_SYS_ANTENNA_BOTH;
10008 sys_config->antenna_diversity = antenna;
10009 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10010 sys_config->dot11g_auto_detection = 0;
10011 sys_config->enable_cts_to_self = 0;
10012 sys_config->bt_coexist_collision_thr = 0;
10013 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10014 sys_config->silence_threshold = 0x1e;
10017 static int ipw_net_open(struct net_device *dev)
10019 struct ipw_priv *priv = ieee80211_priv(dev);
10020 IPW_DEBUG_INFO("dev->open\n");
10021 /* we should be verifying the device is ready to be opened */
10022 mutex_lock(&priv->mutex);
10023 if (!(priv->status & STATUS_RF_KILL_MASK) &&
10024 (priv->status & STATUS_ASSOCIATED))
10025 netif_start_queue(dev);
10026 mutex_unlock(&priv->mutex);
10027 return 0;
10030 static int ipw_net_stop(struct net_device *dev)
10032 IPW_DEBUG_INFO("dev->close\n");
10033 netif_stop_queue(dev);
10034 return 0;
10038 todo:
10040 modify to send one tfd per fragment instead of using chunking. otherwise
10041 we need to heavily modify the ieee80211_skb_to_txb.
10044 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10045 int pri)
10047 struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10048 txb->fragments[0]->data;
10049 int i = 0;
10050 struct tfd_frame *tfd;
10051 #ifdef CONFIG_IPW2200_QOS
10052 int tx_id = ipw_get_tx_queue_number(priv, pri);
10053 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10054 #else
10055 struct clx2_tx_queue *txq = &priv->txq[0];
10056 #endif
10057 struct clx2_queue *q = &txq->q;
10058 u8 id, hdr_len, unicast;
10059 u16 remaining_bytes;
10060 int fc;
10062 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10063 switch (priv->ieee->iw_mode) {
10064 case IW_MODE_ADHOC:
10065 unicast = !is_multicast_ether_addr(hdr->addr1);
10066 id = ipw_find_station(priv, hdr->addr1);
10067 if (id == IPW_INVALID_STATION) {
10068 id = ipw_add_station(priv, hdr->addr1);
10069 if (id == IPW_INVALID_STATION) {
10070 IPW_WARNING("Attempt to send data to "
10071 "invalid cell: " MAC_FMT "\n",
10072 MAC_ARG(hdr->addr1));
10073 goto drop;
10076 break;
10078 case IW_MODE_INFRA:
10079 default:
10080 unicast = !is_multicast_ether_addr(hdr->addr3);
10081 id = 0;
10082 break;
10085 tfd = &txq->bd[q->first_empty];
10086 txq->txb[q->first_empty] = txb;
10087 memset(tfd, 0, sizeof(*tfd));
10088 tfd->u.data.station_number = id;
10090 tfd->control_flags.message_type = TX_FRAME_TYPE;
10091 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10093 tfd->u.data.cmd_id = DINO_CMD_TX;
10094 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10095 remaining_bytes = txb->payload_size;
10097 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10098 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10099 else
10100 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10102 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10103 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10105 fc = le16_to_cpu(hdr->frame_ctl);
10106 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10108 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10110 if (likely(unicast))
10111 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10113 if (txb->encrypted && !priv->ieee->host_encrypt) {
10114 switch (priv->ieee->sec.level) {
10115 case SEC_LEVEL_3:
10116 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10117 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10118 /* XXX: ACK flag must be set for CCMP even if it
10119 * is a multicast/broadcast packet, because CCMP
10120 * group communication encrypted by GTK is
10121 * actually done by the AP. */
10122 if (!unicast)
10123 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10125 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10126 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10127 tfd->u.data.key_index = 0;
10128 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10129 break;
10130 case SEC_LEVEL_2:
10131 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10132 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10133 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10134 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10135 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10136 break;
10137 case SEC_LEVEL_1:
10138 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10139 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10140 tfd->u.data.key_index = priv->ieee->tx_keyidx;
10141 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
10143 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10144 else
10145 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10146 break;
10147 case SEC_LEVEL_0:
10148 break;
10149 default:
10150 printk(KERN_ERR "Unknow security level %d\n",
10151 priv->ieee->sec.level);
10152 break;
10154 } else
10155 /* No hardware encryption */
10156 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10158 #ifdef CONFIG_IPW2200_QOS
10159 if (fc & IEEE80211_STYPE_QOS_DATA)
10160 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10161 #endif /* CONFIG_IPW2200_QOS */
10163 /* payload */
10164 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10165 txb->nr_frags));
10166 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10167 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10168 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10169 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10170 i, le32_to_cpu(tfd->u.data.num_chunks),
10171 txb->fragments[i]->len - hdr_len);
10172 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10173 i, tfd->u.data.num_chunks,
10174 txb->fragments[i]->len - hdr_len);
10175 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10176 txb->fragments[i]->len - hdr_len);
10178 tfd->u.data.chunk_ptr[i] =
10179 cpu_to_le32(pci_map_single
10180 (priv->pci_dev,
10181 txb->fragments[i]->data + hdr_len,
10182 txb->fragments[i]->len - hdr_len,
10183 PCI_DMA_TODEVICE));
10184 tfd->u.data.chunk_len[i] =
10185 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10188 if (i != txb->nr_frags) {
10189 struct sk_buff *skb;
10190 u16 remaining_bytes = 0;
10191 int j;
10193 for (j = i; j < txb->nr_frags; j++)
10194 remaining_bytes += txb->fragments[j]->len - hdr_len;
10196 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10197 remaining_bytes);
10198 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10199 if (skb != NULL) {
10200 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10201 for (j = i; j < txb->nr_frags; j++) {
10202 int size = txb->fragments[j]->len - hdr_len;
10204 printk(KERN_INFO "Adding frag %d %d...\n",
10205 j, size);
10206 memcpy(skb_put(skb, size),
10207 txb->fragments[j]->data + hdr_len, size);
10209 dev_kfree_skb_any(txb->fragments[i]);
10210 txb->fragments[i] = skb;
10211 tfd->u.data.chunk_ptr[i] =
10212 cpu_to_le32(pci_map_single
10213 (priv->pci_dev, skb->data,
10214 tfd->u.data.chunk_len[i],
10215 PCI_DMA_TODEVICE));
10217 tfd->u.data.num_chunks =
10218 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
10223 /* kick DMA */
10224 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10225 ipw_write32(priv, q->reg_w, q->first_empty);
10227 if (ipw_queue_space(q) < q->high_mark)
10228 netif_stop_queue(priv->net_dev);
10230 return NETDEV_TX_OK;
10232 drop:
10233 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10234 ieee80211_txb_free(txb);
10235 return NETDEV_TX_OK;
10238 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10240 struct ipw_priv *priv = ieee80211_priv(dev);
10241 #ifdef CONFIG_IPW2200_QOS
10242 int tx_id = ipw_get_tx_queue_number(priv, pri);
10243 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10244 #else
10245 struct clx2_tx_queue *txq = &priv->txq[0];
10246 #endif /* CONFIG_IPW2200_QOS */
10248 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
10249 return 1;
10251 return 0;
10254 #ifdef CONFIG_IPW2200_PROMISCUOUS
10255 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10256 struct ieee80211_txb *txb)
10258 struct ieee80211_rx_stats dummystats;
10259 struct ieee80211_hdr *hdr;
10260 u8 n;
10261 u16 filter = priv->prom_priv->filter;
10262 int hdr_only = 0;
10264 if (filter & IPW_PROM_NO_TX)
10265 return;
10267 memset(&dummystats, 0, sizeof(dummystats));
10269 /* Filtering of fragment chains is done agains the first fragment */
10270 hdr = (void *)txb->fragments[0]->data;
10271 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
10272 if (filter & IPW_PROM_NO_MGMT)
10273 return;
10274 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10275 hdr_only = 1;
10276 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
10277 if (filter & IPW_PROM_NO_CTL)
10278 return;
10279 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10280 hdr_only = 1;
10281 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
10282 if (filter & IPW_PROM_NO_DATA)
10283 return;
10284 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10285 hdr_only = 1;
10288 for(n=0; n<txb->nr_frags; ++n) {
10289 struct sk_buff *src = txb->fragments[n];
10290 struct sk_buff *dst;
10291 struct ieee80211_radiotap_header *rt_hdr;
10292 int len;
10294 if (hdr_only) {
10295 hdr = (void *)src->data;
10296 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10297 } else
10298 len = src->len;
10300 dst = alloc_skb(
10301 len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC);
10302 if (!dst) continue;
10304 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10306 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10307 rt_hdr->it_pad = 0;
10308 rt_hdr->it_present = 0; /* after all, it's just an idea */
10309 rt_hdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
10311 *(u16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10312 ieee80211chan2mhz(priv->channel));
10313 if (priv->channel > 14) /* 802.11a */
10314 *(u16*)skb_put(dst, sizeof(u16)) =
10315 cpu_to_le16(IEEE80211_CHAN_OFDM |
10316 IEEE80211_CHAN_5GHZ);
10317 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10318 *(u16*)skb_put(dst, sizeof(u16)) =
10319 cpu_to_le16(IEEE80211_CHAN_CCK |
10320 IEEE80211_CHAN_2GHZ);
10321 else /* 802.11g */
10322 *(u16*)skb_put(dst, sizeof(u16)) =
10323 cpu_to_le16(IEEE80211_CHAN_OFDM |
10324 IEEE80211_CHAN_2GHZ);
10326 rt_hdr->it_len = dst->len;
10328 memcpy(skb_put(dst, len), src->data, len);
10330 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10331 dev_kfree_skb_any(dst);
10334 #endif
10336 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10337 struct net_device *dev, int pri)
10339 struct ipw_priv *priv = ieee80211_priv(dev);
10340 unsigned long flags;
10341 int ret;
10343 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10344 spin_lock_irqsave(&priv->lock, flags);
10346 if (!(priv->status & STATUS_ASSOCIATED)) {
10347 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
10348 priv->ieee->stats.tx_carrier_errors++;
10349 netif_stop_queue(dev);
10350 goto fail_unlock;
10353 #ifdef CONFIG_IPW2200_PROMISCUOUS
10354 if (rtap_iface && netif_running(priv->prom_net_dev))
10355 ipw_handle_promiscuous_tx(priv, txb);
10356 #endif
10358 ret = ipw_tx_skb(priv, txb, pri);
10359 if (ret == NETDEV_TX_OK)
10360 __ipw_led_activity_on(priv);
10361 spin_unlock_irqrestore(&priv->lock, flags);
10363 return ret;
10365 fail_unlock:
10366 spin_unlock_irqrestore(&priv->lock, flags);
10367 return 1;
10370 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
10372 struct ipw_priv *priv = ieee80211_priv(dev);
10374 priv->ieee->stats.tx_packets = priv->tx_packets;
10375 priv->ieee->stats.rx_packets = priv->rx_packets;
10376 return &priv->ieee->stats;
10379 static void ipw_net_set_multicast_list(struct net_device *dev)
10384 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10386 struct ipw_priv *priv = ieee80211_priv(dev);
10387 struct sockaddr *addr = p;
10388 if (!is_valid_ether_addr(addr->sa_data))
10389 return -EADDRNOTAVAIL;
10390 mutex_lock(&priv->mutex);
10391 priv->config |= CFG_CUSTOM_MAC;
10392 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10393 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
10394 priv->net_dev->name, MAC_ARG(priv->mac_addr));
10395 queue_work(priv->workqueue, &priv->adapter_restart);
10396 mutex_unlock(&priv->mutex);
10397 return 0;
10400 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10401 struct ethtool_drvinfo *info)
10403 struct ipw_priv *p = ieee80211_priv(dev);
10404 char vers[64];
10405 char date[32];
10406 u32 len;
10408 strcpy(info->driver, DRV_NAME);
10409 strcpy(info->version, DRV_VERSION);
10411 len = sizeof(vers);
10412 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10413 len = sizeof(date);
10414 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10416 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10417 vers, date);
10418 strcpy(info->bus_info, pci_name(p->pci_dev));
10419 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10422 static u32 ipw_ethtool_get_link(struct net_device *dev)
10424 struct ipw_priv *priv = ieee80211_priv(dev);
10425 return (priv->status & STATUS_ASSOCIATED) != 0;
10428 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10430 return IPW_EEPROM_IMAGE_SIZE;
10433 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10434 struct ethtool_eeprom *eeprom, u8 * bytes)
10436 struct ipw_priv *p = ieee80211_priv(dev);
10438 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10439 return -EINVAL;
10440 mutex_lock(&p->mutex);
10441 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10442 mutex_unlock(&p->mutex);
10443 return 0;
10446 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10447 struct ethtool_eeprom *eeprom, u8 * bytes)
10449 struct ipw_priv *p = ieee80211_priv(dev);
10450 int i;
10452 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10453 return -EINVAL;
10454 mutex_lock(&p->mutex);
10455 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10456 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10457 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10458 mutex_unlock(&p->mutex);
10459 return 0;
10462 static const struct ethtool_ops ipw_ethtool_ops = {
10463 .get_link = ipw_ethtool_get_link,
10464 .get_drvinfo = ipw_ethtool_get_drvinfo,
10465 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10466 .get_eeprom = ipw_ethtool_get_eeprom,
10467 .set_eeprom = ipw_ethtool_set_eeprom,
10470 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
10472 struct ipw_priv *priv = data;
10473 u32 inta, inta_mask;
10475 if (!priv)
10476 return IRQ_NONE;
10478 spin_lock(&priv->irq_lock);
10480 if (!(priv->status & STATUS_INT_ENABLED)) {
10481 /* Shared IRQ */
10482 goto none;
10485 inta = ipw_read32(priv, IPW_INTA_RW);
10486 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10488 if (inta == 0xFFFFFFFF) {
10489 /* Hardware disappeared */
10490 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10491 goto none;
10494 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10495 /* Shared interrupt */
10496 goto none;
10499 /* tell the device to stop sending interrupts */
10500 __ipw_disable_interrupts(priv);
10502 /* ack current interrupts */
10503 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10504 ipw_write32(priv, IPW_INTA_RW, inta);
10506 /* Cache INTA value for our tasklet */
10507 priv->isr_inta = inta;
10509 tasklet_schedule(&priv->irq_tasklet);
10511 spin_unlock(&priv->irq_lock);
10513 return IRQ_HANDLED;
10514 none:
10515 spin_unlock(&priv->irq_lock);
10516 return IRQ_NONE;
10519 static void ipw_rf_kill(void *adapter)
10521 struct ipw_priv *priv = adapter;
10522 unsigned long flags;
10524 spin_lock_irqsave(&priv->lock, flags);
10526 if (rf_kill_active(priv)) {
10527 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10528 if (priv->workqueue)
10529 queue_delayed_work(priv->workqueue,
10530 &priv->rf_kill, 2 * HZ);
10531 goto exit_unlock;
10534 /* RF Kill is now disabled, so bring the device back up */
10536 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10537 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10538 "device\n");
10540 /* we can not do an adapter restart while inside an irq lock */
10541 queue_work(priv->workqueue, &priv->adapter_restart);
10542 } else
10543 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10544 "enabled\n");
10546 exit_unlock:
10547 spin_unlock_irqrestore(&priv->lock, flags);
10550 static void ipw_bg_rf_kill(void *data)
10552 struct ipw_priv *priv = data;
10553 mutex_lock(&priv->mutex);
10554 ipw_rf_kill(data);
10555 mutex_unlock(&priv->mutex);
10558 static void ipw_link_up(struct ipw_priv *priv)
10560 priv->last_seq_num = -1;
10561 priv->last_frag_num = -1;
10562 priv->last_packet_time = 0;
10564 netif_carrier_on(priv->net_dev);
10565 if (netif_queue_stopped(priv->net_dev)) {
10566 IPW_DEBUG_NOTIF("waking queue\n");
10567 netif_wake_queue(priv->net_dev);
10568 } else {
10569 IPW_DEBUG_NOTIF("starting queue\n");
10570 netif_start_queue(priv->net_dev);
10573 cancel_delayed_work(&priv->request_scan);
10574 ipw_reset_stats(priv);
10575 /* Ensure the rate is updated immediately */
10576 priv->last_rate = ipw_get_current_rate(priv);
10577 ipw_gather_stats(priv);
10578 ipw_led_link_up(priv);
10579 notify_wx_assoc_event(priv);
10581 if (priv->config & CFG_BACKGROUND_SCAN)
10582 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10585 static void ipw_bg_link_up(void *data)
10587 struct ipw_priv *priv = data;
10588 mutex_lock(&priv->mutex);
10589 ipw_link_up(data);
10590 mutex_unlock(&priv->mutex);
10593 static void ipw_link_down(struct ipw_priv *priv)
10595 ipw_led_link_down(priv);
10596 netif_carrier_off(priv->net_dev);
10597 netif_stop_queue(priv->net_dev);
10598 notify_wx_assoc_event(priv);
10600 /* Cancel any queued work ... */
10601 cancel_delayed_work(&priv->request_scan);
10602 cancel_delayed_work(&priv->adhoc_check);
10603 cancel_delayed_work(&priv->gather_stats);
10605 ipw_reset_stats(priv);
10607 if (!(priv->status & STATUS_EXIT_PENDING)) {
10608 /* Queue up another scan... */
10609 queue_work(priv->workqueue, &priv->request_scan);
10613 static void ipw_bg_link_down(void *data)
10615 struct ipw_priv *priv = data;
10616 mutex_lock(&priv->mutex);
10617 ipw_link_down(data);
10618 mutex_unlock(&priv->mutex);
10621 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10623 int ret = 0;
10625 priv->workqueue = create_workqueue(DRV_NAME);
10626 init_waitqueue_head(&priv->wait_command_queue);
10627 init_waitqueue_head(&priv->wait_state);
10629 INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
10630 INIT_WORK(&priv->associate, ipw_bg_associate, priv);
10631 INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
10632 INIT_WORK(&priv->system_config, ipw_system_config, priv);
10633 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
10634 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
10635 INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
10636 INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
10637 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
10638 INIT_WORK(&priv->request_scan,
10639 (void (*)(void *))ipw_request_scan, priv);
10640 INIT_WORK(&priv->request_passive_scan,
10641 (void (*)(void *))ipw_request_passive_scan, priv);
10642 INIT_WORK(&priv->gather_stats,
10643 (void (*)(void *))ipw_bg_gather_stats, priv);
10644 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
10645 INIT_WORK(&priv->roam, ipw_bg_roam, priv);
10646 INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
10647 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
10648 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
10649 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
10650 priv);
10651 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
10652 priv);
10653 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
10654 priv);
10655 INIT_WORK(&priv->merge_networks,
10656 (void (*)(void *))ipw_merge_adhoc_network, priv);
10658 #ifdef CONFIG_IPW2200_QOS
10659 INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
10660 priv);
10661 #endif /* CONFIG_IPW2200_QOS */
10663 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10664 ipw_irq_tasklet, (unsigned long)priv);
10666 return ret;
10669 static void shim__set_security(struct net_device *dev,
10670 struct ieee80211_security *sec)
10672 struct ipw_priv *priv = ieee80211_priv(dev);
10673 int i;
10674 for (i = 0; i < 4; i++) {
10675 if (sec->flags & (1 << i)) {
10676 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10677 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10678 if (sec->key_sizes[i] == 0)
10679 priv->ieee->sec.flags &= ~(1 << i);
10680 else {
10681 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10682 sec->key_sizes[i]);
10683 priv->ieee->sec.flags |= (1 << i);
10685 priv->status |= STATUS_SECURITY_UPDATED;
10686 } else if (sec->level != SEC_LEVEL_1)
10687 priv->ieee->sec.flags &= ~(1 << i);
10690 if (sec->flags & SEC_ACTIVE_KEY) {
10691 if (sec->active_key <= 3) {
10692 priv->ieee->sec.active_key = sec->active_key;
10693 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10694 } else
10695 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10696 priv->status |= STATUS_SECURITY_UPDATED;
10697 } else
10698 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10700 if ((sec->flags & SEC_AUTH_MODE) &&
10701 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10702 priv->ieee->sec.auth_mode = sec->auth_mode;
10703 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10704 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10705 priv->capability |= CAP_SHARED_KEY;
10706 else
10707 priv->capability &= ~CAP_SHARED_KEY;
10708 priv->status |= STATUS_SECURITY_UPDATED;
10711 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10712 priv->ieee->sec.flags |= SEC_ENABLED;
10713 priv->ieee->sec.enabled = sec->enabled;
10714 priv->status |= STATUS_SECURITY_UPDATED;
10715 if (sec->enabled)
10716 priv->capability |= CAP_PRIVACY_ON;
10717 else
10718 priv->capability &= ~CAP_PRIVACY_ON;
10721 if (sec->flags & SEC_ENCRYPT)
10722 priv->ieee->sec.encrypt = sec->encrypt;
10724 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10725 priv->ieee->sec.level = sec->level;
10726 priv->ieee->sec.flags |= SEC_LEVEL;
10727 priv->status |= STATUS_SECURITY_UPDATED;
10730 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10731 ipw_set_hwcrypto_keys(priv);
10733 /* To match current functionality of ipw2100 (which works well w/
10734 * various supplicants, we don't force a disassociate if the
10735 * privacy capability changes ... */
10736 #if 0
10737 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10738 (((priv->assoc_request.capability &
10739 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10740 (!(priv->assoc_request.capability &
10741 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10742 IPW_DEBUG_ASSOC("Disassociating due to capability "
10743 "change.\n");
10744 ipw_disassociate(priv);
10746 #endif
10749 static int init_supported_rates(struct ipw_priv *priv,
10750 struct ipw_supported_rates *rates)
10752 /* TODO: Mask out rates based on priv->rates_mask */
10754 memset(rates, 0, sizeof(*rates));
10755 /* configure supported rates */
10756 switch (priv->ieee->freq_band) {
10757 case IEEE80211_52GHZ_BAND:
10758 rates->ieee_mode = IPW_A_MODE;
10759 rates->purpose = IPW_RATE_CAPABILITIES;
10760 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10761 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10762 break;
10764 default: /* Mixed or 2.4Ghz */
10765 rates->ieee_mode = IPW_G_MODE;
10766 rates->purpose = IPW_RATE_CAPABILITIES;
10767 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10768 IEEE80211_CCK_DEFAULT_RATES_MASK);
10769 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10770 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10771 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10773 break;
10776 return 0;
10779 static int ipw_config(struct ipw_priv *priv)
10781 /* This is only called from ipw_up, which resets/reloads the firmware
10782 so, we don't need to first disable the card before we configure
10783 it */
10784 if (ipw_set_tx_power(priv))
10785 goto error;
10787 /* initialize adapter address */
10788 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10789 goto error;
10791 /* set basic system config settings */
10792 init_sys_config(&priv->sys_config);
10794 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10795 * Does not support BT priority yet (don't abort or defer our Tx) */
10796 if (bt_coexist) {
10797 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10799 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10800 priv->sys_config.bt_coexistence
10801 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10802 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10803 priv->sys_config.bt_coexistence
10804 |= CFG_BT_COEXISTENCE_OOB;
10807 #ifdef CONFIG_IPW2200_PROMISCUOUS
10808 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10809 priv->sys_config.accept_all_data_frames = 1;
10810 priv->sys_config.accept_non_directed_frames = 1;
10811 priv->sys_config.accept_all_mgmt_bcpr = 1;
10812 priv->sys_config.accept_all_mgmt_frames = 1;
10814 #endif
10816 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10817 priv->sys_config.answer_broadcast_ssid_probe = 1;
10818 else
10819 priv->sys_config.answer_broadcast_ssid_probe = 0;
10821 if (ipw_send_system_config(priv))
10822 goto error;
10824 init_supported_rates(priv, &priv->rates);
10825 if (ipw_send_supported_rates(priv, &priv->rates))
10826 goto error;
10828 /* Set request-to-send threshold */
10829 if (priv->rts_threshold) {
10830 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10831 goto error;
10833 #ifdef CONFIG_IPW2200_QOS
10834 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10835 ipw_qos_activate(priv, NULL);
10836 #endif /* CONFIG_IPW2200_QOS */
10838 if (ipw_set_random_seed(priv))
10839 goto error;
10841 /* final state transition to the RUN state */
10842 if (ipw_send_host_complete(priv))
10843 goto error;
10845 priv->status |= STATUS_INIT;
10847 ipw_led_init(priv);
10848 ipw_led_radio_on(priv);
10849 priv->notif_missed_beacons = 0;
10851 /* Set hardware WEP key if it is configured. */
10852 if ((priv->capability & CAP_PRIVACY_ON) &&
10853 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10854 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10855 ipw_set_hwcrypto_keys(priv);
10857 return 0;
10859 error:
10860 return -EIO;
10864 * NOTE:
10866 * These tables have been tested in conjunction with the
10867 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10869 * Altering this values, using it on other hardware, or in geographies
10870 * not intended for resale of the above mentioned Intel adapters has
10871 * not been tested.
10873 * Remember to update the table in README.ipw2200 when changing this
10874 * table.
10877 static const struct ieee80211_geo ipw_geos[] = {
10878 { /* Restricted */
10879 "---",
10880 .bg_channels = 11,
10881 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10882 {2427, 4}, {2432, 5}, {2437, 6},
10883 {2442, 7}, {2447, 8}, {2452, 9},
10884 {2457, 10}, {2462, 11}},
10887 { /* Custom US/Canada */
10888 "ZZF",
10889 .bg_channels = 11,
10890 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10891 {2427, 4}, {2432, 5}, {2437, 6},
10892 {2442, 7}, {2447, 8}, {2452, 9},
10893 {2457, 10}, {2462, 11}},
10894 .a_channels = 8,
10895 .a = {{5180, 36},
10896 {5200, 40},
10897 {5220, 44},
10898 {5240, 48},
10899 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10900 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10901 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10902 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10905 { /* Rest of World */
10906 "ZZD",
10907 .bg_channels = 13,
10908 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10909 {2427, 4}, {2432, 5}, {2437, 6},
10910 {2442, 7}, {2447, 8}, {2452, 9},
10911 {2457, 10}, {2462, 11}, {2467, 12},
10912 {2472, 13}},
10915 { /* Custom USA & Europe & High */
10916 "ZZA",
10917 .bg_channels = 11,
10918 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10919 {2427, 4}, {2432, 5}, {2437, 6},
10920 {2442, 7}, {2447, 8}, {2452, 9},
10921 {2457, 10}, {2462, 11}},
10922 .a_channels = 13,
10923 .a = {{5180, 36},
10924 {5200, 40},
10925 {5220, 44},
10926 {5240, 48},
10927 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10928 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10929 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10930 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10931 {5745, 149},
10932 {5765, 153},
10933 {5785, 157},
10934 {5805, 161},
10935 {5825, 165}},
10938 { /* Custom NA & Europe */
10939 "ZZB",
10940 .bg_channels = 11,
10941 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10942 {2427, 4}, {2432, 5}, {2437, 6},
10943 {2442, 7}, {2447, 8}, {2452, 9},
10944 {2457, 10}, {2462, 11}},
10945 .a_channels = 13,
10946 .a = {{5180, 36},
10947 {5200, 40},
10948 {5220, 44},
10949 {5240, 48},
10950 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10951 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10952 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10953 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10954 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10955 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10956 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10957 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10958 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10961 { /* Custom Japan */
10962 "ZZC",
10963 .bg_channels = 11,
10964 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10965 {2427, 4}, {2432, 5}, {2437, 6},
10966 {2442, 7}, {2447, 8}, {2452, 9},
10967 {2457, 10}, {2462, 11}},
10968 .a_channels = 4,
10969 .a = {{5170, 34}, {5190, 38},
10970 {5210, 42}, {5230, 46}},
10973 { /* Custom */
10974 "ZZM",
10975 .bg_channels = 11,
10976 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10977 {2427, 4}, {2432, 5}, {2437, 6},
10978 {2442, 7}, {2447, 8}, {2452, 9},
10979 {2457, 10}, {2462, 11}},
10982 { /* Europe */
10983 "ZZE",
10984 .bg_channels = 13,
10985 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10986 {2427, 4}, {2432, 5}, {2437, 6},
10987 {2442, 7}, {2447, 8}, {2452, 9},
10988 {2457, 10}, {2462, 11}, {2467, 12},
10989 {2472, 13}},
10990 .a_channels = 19,
10991 .a = {{5180, 36},
10992 {5200, 40},
10993 {5220, 44},
10994 {5240, 48},
10995 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10996 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10997 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10998 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10999 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11000 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11001 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11002 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11003 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11004 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11005 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11006 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11007 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11008 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11009 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
11012 { /* Custom Japan */
11013 "ZZJ",
11014 .bg_channels = 14,
11015 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11016 {2427, 4}, {2432, 5}, {2437, 6},
11017 {2442, 7}, {2447, 8}, {2452, 9},
11018 {2457, 10}, {2462, 11}, {2467, 12},
11019 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
11020 .a_channels = 4,
11021 .a = {{5170, 34}, {5190, 38},
11022 {5210, 42}, {5230, 46}},
11025 { /* Rest of World */
11026 "ZZR",
11027 .bg_channels = 14,
11028 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11029 {2427, 4}, {2432, 5}, {2437, 6},
11030 {2442, 7}, {2447, 8}, {2452, 9},
11031 {2457, 10}, {2462, 11}, {2467, 12},
11032 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11033 IEEE80211_CH_PASSIVE_ONLY}},
11036 { /* High Band */
11037 "ZZH",
11038 .bg_channels = 13,
11039 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11040 {2427, 4}, {2432, 5}, {2437, 6},
11041 {2442, 7}, {2447, 8}, {2452, 9},
11042 {2457, 10}, {2462, 11},
11043 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11044 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11045 .a_channels = 4,
11046 .a = {{5745, 149}, {5765, 153},
11047 {5785, 157}, {5805, 161}},
11050 { /* Custom Europe */
11051 "ZZG",
11052 .bg_channels = 13,
11053 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11054 {2427, 4}, {2432, 5}, {2437, 6},
11055 {2442, 7}, {2447, 8}, {2452, 9},
11056 {2457, 10}, {2462, 11},
11057 {2467, 12}, {2472, 13}},
11058 .a_channels = 4,
11059 .a = {{5180, 36}, {5200, 40},
11060 {5220, 44}, {5240, 48}},
11063 { /* Europe */
11064 "ZZK",
11065 .bg_channels = 13,
11066 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11067 {2427, 4}, {2432, 5}, {2437, 6},
11068 {2442, 7}, {2447, 8}, {2452, 9},
11069 {2457, 10}, {2462, 11},
11070 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11071 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11072 .a_channels = 24,
11073 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11074 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11075 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11076 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11077 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11078 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11079 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11080 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11081 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11082 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11083 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11084 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11085 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11086 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11087 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11088 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11089 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11090 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11091 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11092 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11093 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11094 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11095 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11096 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11099 { /* Europe */
11100 "ZZL",
11101 .bg_channels = 11,
11102 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11103 {2427, 4}, {2432, 5}, {2437, 6},
11104 {2442, 7}, {2447, 8}, {2452, 9},
11105 {2457, 10}, {2462, 11}},
11106 .a_channels = 13,
11107 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11108 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11109 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11110 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11111 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11112 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11113 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11114 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11115 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11116 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11117 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11118 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11119 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11123 #define MAX_HW_RESTARTS 5
11124 static int ipw_up(struct ipw_priv *priv)
11126 int rc, i, j;
11128 if (priv->status & STATUS_EXIT_PENDING)
11129 return -EIO;
11131 if (cmdlog && !priv->cmdlog) {
11132 priv->cmdlog = kmalloc(sizeof(*priv->cmdlog) * cmdlog,
11133 GFP_KERNEL);
11134 if (priv->cmdlog == NULL) {
11135 IPW_ERROR("Error allocating %d command log entries.\n",
11136 cmdlog);
11137 return -ENOMEM;
11138 } else {
11139 memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
11140 priv->cmdlog_len = cmdlog;
11144 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11145 /* Load the microcode, firmware, and eeprom.
11146 * Also start the clocks. */
11147 rc = ipw_load(priv);
11148 if (rc) {
11149 IPW_ERROR("Unable to load firmware: %d\n", rc);
11150 return rc;
11153 ipw_init_ordinals(priv);
11154 if (!(priv->config & CFG_CUSTOM_MAC))
11155 eeprom_parse_mac(priv, priv->mac_addr);
11156 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11158 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11159 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11160 ipw_geos[j].name, 3))
11161 break;
11163 if (j == ARRAY_SIZE(ipw_geos)) {
11164 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11165 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11166 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11167 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11168 j = 0;
11170 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11171 IPW_WARNING("Could not set geography.");
11172 return 0;
11175 if (priv->status & STATUS_RF_KILL_SW) {
11176 IPW_WARNING("Radio disabled by module parameter.\n");
11177 return 0;
11178 } else if (rf_kill_active(priv)) {
11179 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11180 "Kill switch must be turned off for "
11181 "wireless networking to work.\n");
11182 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11183 2 * HZ);
11184 return 0;
11187 rc = ipw_config(priv);
11188 if (!rc) {
11189 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11191 /* If configure to try and auto-associate, kick
11192 * off a scan. */
11193 queue_work(priv->workqueue, &priv->request_scan);
11195 return 0;
11198 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11199 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11200 i, MAX_HW_RESTARTS);
11202 /* We had an error bringing up the hardware, so take it
11203 * all the way back down so we can try again */
11204 ipw_down(priv);
11207 /* tried to restart and config the device for as long as our
11208 * patience could withstand */
11209 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11211 return -EIO;
11214 static void ipw_bg_up(void *data)
11216 struct ipw_priv *priv = data;
11217 mutex_lock(&priv->mutex);
11218 ipw_up(data);
11219 mutex_unlock(&priv->mutex);
11222 static void ipw_deinit(struct ipw_priv *priv)
11224 int i;
11226 if (priv->status & STATUS_SCANNING) {
11227 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11228 ipw_abort_scan(priv);
11231 if (priv->status & STATUS_ASSOCIATED) {
11232 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11233 ipw_disassociate(priv);
11236 ipw_led_shutdown(priv);
11238 /* Wait up to 1s for status to change to not scanning and not
11239 * associated (disassociation can take a while for a ful 802.11
11240 * exchange */
11241 for (i = 1000; i && (priv->status &
11242 (STATUS_DISASSOCIATING |
11243 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11244 udelay(10);
11246 if (priv->status & (STATUS_DISASSOCIATING |
11247 STATUS_ASSOCIATED | STATUS_SCANNING))
11248 IPW_DEBUG_INFO("Still associated or scanning...\n");
11249 else
11250 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11252 /* Attempt to disable the card */
11253 ipw_send_card_disable(priv, 0);
11255 priv->status &= ~STATUS_INIT;
11258 static void ipw_down(struct ipw_priv *priv)
11260 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11262 priv->status |= STATUS_EXIT_PENDING;
11264 if (ipw_is_init(priv))
11265 ipw_deinit(priv);
11267 /* Wipe out the EXIT_PENDING status bit if we are not actually
11268 * exiting the module */
11269 if (!exit_pending)
11270 priv->status &= ~STATUS_EXIT_PENDING;
11272 /* tell the device to stop sending interrupts */
11273 ipw_disable_interrupts(priv);
11275 /* Clear all bits but the RF Kill */
11276 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11277 netif_carrier_off(priv->net_dev);
11278 netif_stop_queue(priv->net_dev);
11280 ipw_stop_nic(priv);
11282 ipw_led_radio_off(priv);
11285 static void ipw_bg_down(void *data)
11287 struct ipw_priv *priv = data;
11288 mutex_lock(&priv->mutex);
11289 ipw_down(data);
11290 mutex_unlock(&priv->mutex);
11293 /* Called by register_netdev() */
11294 static int ipw_net_init(struct net_device *dev)
11296 struct ipw_priv *priv = ieee80211_priv(dev);
11297 mutex_lock(&priv->mutex);
11299 if (ipw_up(priv)) {
11300 mutex_unlock(&priv->mutex);
11301 return -EIO;
11304 mutex_unlock(&priv->mutex);
11305 return 0;
11308 /* PCI driver stuff */
11309 static struct pci_device_id card_ids[] = {
11310 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11311 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11312 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11313 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11314 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11315 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11316 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11317 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11318 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11319 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11320 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11321 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11322 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11323 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11324 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11325 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11326 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11327 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11328 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11329 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11330 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11331 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11333 /* required last entry */
11334 {0,}
11337 MODULE_DEVICE_TABLE(pci, card_ids);
11339 static struct attribute *ipw_sysfs_entries[] = {
11340 &dev_attr_rf_kill.attr,
11341 &dev_attr_direct_dword.attr,
11342 &dev_attr_indirect_byte.attr,
11343 &dev_attr_indirect_dword.attr,
11344 &dev_attr_mem_gpio_reg.attr,
11345 &dev_attr_command_event_reg.attr,
11346 &dev_attr_nic_type.attr,
11347 &dev_attr_status.attr,
11348 &dev_attr_cfg.attr,
11349 &dev_attr_error.attr,
11350 &dev_attr_event_log.attr,
11351 &dev_attr_cmd_log.attr,
11352 &dev_attr_eeprom_delay.attr,
11353 &dev_attr_ucode_version.attr,
11354 &dev_attr_rtc.attr,
11355 &dev_attr_scan_age.attr,
11356 &dev_attr_led.attr,
11357 &dev_attr_speed_scan.attr,
11358 &dev_attr_net_stats.attr,
11359 #ifdef CONFIG_IPW2200_PROMISCUOUS
11360 &dev_attr_rtap_iface.attr,
11361 &dev_attr_rtap_filter.attr,
11362 #endif
11363 NULL
11366 static struct attribute_group ipw_attribute_group = {
11367 .name = NULL, /* put in device directory */
11368 .attrs = ipw_sysfs_entries,
11371 #ifdef CONFIG_IPW2200_PROMISCUOUS
11372 static int ipw_prom_open(struct net_device *dev)
11374 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11375 struct ipw_priv *priv = prom_priv->priv;
11377 IPW_DEBUG_INFO("prom dev->open\n");
11378 netif_carrier_off(dev);
11379 netif_stop_queue(dev);
11381 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11382 priv->sys_config.accept_all_data_frames = 1;
11383 priv->sys_config.accept_non_directed_frames = 1;
11384 priv->sys_config.accept_all_mgmt_bcpr = 1;
11385 priv->sys_config.accept_all_mgmt_frames = 1;
11387 ipw_send_system_config(priv);
11390 return 0;
11393 static int ipw_prom_stop(struct net_device *dev)
11395 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11396 struct ipw_priv *priv = prom_priv->priv;
11398 IPW_DEBUG_INFO("prom dev->stop\n");
11400 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11401 priv->sys_config.accept_all_data_frames = 0;
11402 priv->sys_config.accept_non_directed_frames = 0;
11403 priv->sys_config.accept_all_mgmt_bcpr = 0;
11404 priv->sys_config.accept_all_mgmt_frames = 0;
11406 ipw_send_system_config(priv);
11409 return 0;
11412 static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11414 IPW_DEBUG_INFO("prom dev->xmit\n");
11415 netif_stop_queue(dev);
11416 return -EOPNOTSUPP;
11419 static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
11421 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11422 return &prom_priv->ieee->stats;
11425 static int ipw_prom_alloc(struct ipw_priv *priv)
11427 int rc = 0;
11429 if (priv->prom_net_dev)
11430 return -EPERM;
11432 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11433 if (priv->prom_net_dev == NULL)
11434 return -ENOMEM;
11436 priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11437 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11438 priv->prom_priv->priv = priv;
11440 strcpy(priv->prom_net_dev->name, "rtap%d");
11442 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11443 priv->prom_net_dev->open = ipw_prom_open;
11444 priv->prom_net_dev->stop = ipw_prom_stop;
11445 priv->prom_net_dev->get_stats = ipw_prom_get_stats;
11446 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11448 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11450 rc = register_netdev(priv->prom_net_dev);
11451 if (rc) {
11452 free_ieee80211(priv->prom_net_dev);
11453 priv->prom_net_dev = NULL;
11454 return rc;
11457 return 0;
11460 static void ipw_prom_free(struct ipw_priv *priv)
11462 if (!priv->prom_net_dev)
11463 return;
11465 unregister_netdev(priv->prom_net_dev);
11466 free_ieee80211(priv->prom_net_dev);
11468 priv->prom_net_dev = NULL;
11471 #endif
11474 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11476 int err = 0;
11477 struct net_device *net_dev;
11478 void __iomem *base;
11479 u32 length, val;
11480 struct ipw_priv *priv;
11481 int i;
11483 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11484 if (net_dev == NULL) {
11485 err = -ENOMEM;
11486 goto out;
11489 priv = ieee80211_priv(net_dev);
11490 priv->ieee = netdev_priv(net_dev);
11492 priv->net_dev = net_dev;
11493 priv->pci_dev = pdev;
11494 ipw_debug_level = debug;
11495 spin_lock_init(&priv->irq_lock);
11496 spin_lock_init(&priv->lock);
11497 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11498 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11500 mutex_init(&priv->mutex);
11501 if (pci_enable_device(pdev)) {
11502 err = -ENODEV;
11503 goto out_free_ieee80211;
11506 pci_set_master(pdev);
11508 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11509 if (!err)
11510 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11511 if (err) {
11512 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11513 goto out_pci_disable_device;
11516 pci_set_drvdata(pdev, priv);
11518 err = pci_request_regions(pdev, DRV_NAME);
11519 if (err)
11520 goto out_pci_disable_device;
11522 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11523 * PCI Tx retries from interfering with C3 CPU state */
11524 pci_read_config_dword(pdev, 0x40, &val);
11525 if ((val & 0x0000ff00) != 0)
11526 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11528 length = pci_resource_len(pdev, 0);
11529 priv->hw_len = length;
11531 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
11532 if (!base) {
11533 err = -ENODEV;
11534 goto out_pci_release_regions;
11537 priv->hw_base = base;
11538 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11539 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11541 err = ipw_setup_deferred_work(priv);
11542 if (err) {
11543 IPW_ERROR("Unable to setup deferred work\n");
11544 goto out_iounmap;
11547 ipw_sw_reset(priv, 1);
11549 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11550 if (err) {
11551 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11552 goto out_destroy_workqueue;
11555 SET_MODULE_OWNER(net_dev);
11556 SET_NETDEV_DEV(net_dev, &pdev->dev);
11558 mutex_lock(&priv->mutex);
11560 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11561 priv->ieee->set_security = shim__set_security;
11562 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11564 #ifdef CONFIG_IPW2200_QOS
11565 priv->ieee->is_qos_active = ipw_is_qos_active;
11566 priv->ieee->handle_probe_response = ipw_handle_beacon;
11567 priv->ieee->handle_beacon = ipw_handle_probe_response;
11568 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11569 #endif /* CONFIG_IPW2200_QOS */
11571 priv->ieee->perfect_rssi = -20;
11572 priv->ieee->worst_rssi = -85;
11574 net_dev->open = ipw_net_open;
11575 net_dev->stop = ipw_net_stop;
11576 net_dev->init = ipw_net_init;
11577 net_dev->get_stats = ipw_net_get_stats;
11578 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11579 net_dev->set_mac_address = ipw_net_set_mac_address;
11580 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11581 net_dev->wireless_data = &priv->wireless_data;
11582 net_dev->wireless_handlers = &ipw_wx_handler_def;
11583 net_dev->ethtool_ops = &ipw_ethtool_ops;
11584 net_dev->irq = pdev->irq;
11585 net_dev->base_addr = (unsigned long)priv->hw_base;
11586 net_dev->mem_start = pci_resource_start(pdev, 0);
11587 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11589 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11590 if (err) {
11591 IPW_ERROR("failed to create sysfs device attributes\n");
11592 mutex_unlock(&priv->mutex);
11593 goto out_release_irq;
11596 mutex_unlock(&priv->mutex);
11597 err = register_netdev(net_dev);
11598 if (err) {
11599 IPW_ERROR("failed to register network device\n");
11600 goto out_remove_sysfs;
11603 #ifdef CONFIG_IPW2200_PROMISCUOUS
11604 if (rtap_iface) {
11605 err = ipw_prom_alloc(priv);
11606 if (err) {
11607 IPW_ERROR("Failed to register promiscuous network "
11608 "device (error %d).\n", err);
11609 unregister_netdev(priv->net_dev);
11610 goto out_remove_sysfs;
11613 #endif
11615 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11616 "channels, %d 802.11a channels)\n",
11617 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11618 priv->ieee->geo.a_channels);
11620 return 0;
11622 out_remove_sysfs:
11623 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11624 out_release_irq:
11625 free_irq(pdev->irq, priv);
11626 out_destroy_workqueue:
11627 destroy_workqueue(priv->workqueue);
11628 priv->workqueue = NULL;
11629 out_iounmap:
11630 iounmap(priv->hw_base);
11631 out_pci_release_regions:
11632 pci_release_regions(pdev);
11633 out_pci_disable_device:
11634 pci_disable_device(pdev);
11635 pci_set_drvdata(pdev, NULL);
11636 out_free_ieee80211:
11637 free_ieee80211(priv->net_dev);
11638 out:
11639 return err;
11642 static void ipw_pci_remove(struct pci_dev *pdev)
11644 struct ipw_priv *priv = pci_get_drvdata(pdev);
11645 struct list_head *p, *q;
11646 int i;
11648 if (!priv)
11649 return;
11651 mutex_lock(&priv->mutex);
11653 priv->status |= STATUS_EXIT_PENDING;
11654 ipw_down(priv);
11655 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11657 mutex_unlock(&priv->mutex);
11659 unregister_netdev(priv->net_dev);
11661 if (priv->rxq) {
11662 ipw_rx_queue_free(priv, priv->rxq);
11663 priv->rxq = NULL;
11665 ipw_tx_queue_free(priv);
11667 if (priv->cmdlog) {
11668 kfree(priv->cmdlog);
11669 priv->cmdlog = NULL;
11671 /* ipw_down will ensure that there is no more pending work
11672 * in the workqueue's, so we can safely remove them now. */
11673 cancel_delayed_work(&priv->adhoc_check);
11674 cancel_delayed_work(&priv->gather_stats);
11675 cancel_delayed_work(&priv->request_scan);
11676 cancel_delayed_work(&priv->rf_kill);
11677 cancel_delayed_work(&priv->scan_check);
11678 destroy_workqueue(priv->workqueue);
11679 priv->workqueue = NULL;
11681 /* Free MAC hash list for ADHOC */
11682 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11683 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11684 list_del(p);
11685 kfree(list_entry(p, struct ipw_ibss_seq, list));
11689 kfree(priv->error);
11690 priv->error = NULL;
11692 #ifdef CONFIG_IPW2200_PROMISCUOUS
11693 ipw_prom_free(priv);
11694 #endif
11696 free_irq(pdev->irq, priv);
11697 iounmap(priv->hw_base);
11698 pci_release_regions(pdev);
11699 pci_disable_device(pdev);
11700 pci_set_drvdata(pdev, NULL);
11701 free_ieee80211(priv->net_dev);
11702 free_firmware();
11705 #ifdef CONFIG_PM
11706 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11708 struct ipw_priv *priv = pci_get_drvdata(pdev);
11709 struct net_device *dev = priv->net_dev;
11711 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11713 /* Take down the device; powers it off, etc. */
11714 ipw_down(priv);
11716 /* Remove the PRESENT state of the device */
11717 netif_device_detach(dev);
11719 pci_save_state(pdev);
11720 pci_disable_device(pdev);
11721 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11723 return 0;
11726 static int ipw_pci_resume(struct pci_dev *pdev)
11728 struct ipw_priv *priv = pci_get_drvdata(pdev);
11729 struct net_device *dev = priv->net_dev;
11730 u32 val;
11732 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11734 pci_set_power_state(pdev, PCI_D0);
11735 pci_enable_device(pdev);
11736 pci_restore_state(pdev);
11739 * Suspend/Resume resets the PCI configuration space, so we have to
11740 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11741 * from interfering with C3 CPU state. pci_restore_state won't help
11742 * here since it only restores the first 64 bytes pci config header.
11744 pci_read_config_dword(pdev, 0x40, &val);
11745 if ((val & 0x0000ff00) != 0)
11746 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11748 /* Set the device back into the PRESENT state; this will also wake
11749 * the queue of needed */
11750 netif_device_attach(dev);
11752 /* Bring the device back up */
11753 queue_work(priv->workqueue, &priv->up);
11755 return 0;
11757 #endif
11759 static void ipw_pci_shutdown(struct pci_dev *pdev)
11761 struct ipw_priv *priv = pci_get_drvdata(pdev);
11763 /* Take down the device; powers it off, etc. */
11764 ipw_down(priv);
11766 pci_disable_device(pdev);
11769 /* driver initialization stuff */
11770 static struct pci_driver ipw_driver = {
11771 .name = DRV_NAME,
11772 .id_table = card_ids,
11773 .probe = ipw_pci_probe,
11774 .remove = __devexit_p(ipw_pci_remove),
11775 #ifdef CONFIG_PM
11776 .suspend = ipw_pci_suspend,
11777 .resume = ipw_pci_resume,
11778 #endif
11779 .shutdown = ipw_pci_shutdown,
11782 static int __init ipw_init(void)
11784 int ret;
11786 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11787 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11789 ret = pci_register_driver(&ipw_driver);
11790 if (ret) {
11791 IPW_ERROR("Unable to initialize PCI module\n");
11792 return ret;
11795 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11796 if (ret) {
11797 IPW_ERROR("Unable to create driver sysfs file\n");
11798 pci_unregister_driver(&ipw_driver);
11799 return ret;
11802 return ret;
11805 static void __exit ipw_exit(void)
11807 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11808 pci_unregister_driver(&ipw_driver);
11811 module_param(disable, int, 0444);
11812 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11814 module_param(associate, int, 0444);
11815 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11817 module_param(auto_create, int, 0444);
11818 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11820 module_param(led, int, 0444);
11821 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11823 module_param(debug, int, 0444);
11824 MODULE_PARM_DESC(debug, "debug output mask");
11826 module_param(channel, int, 0444);
11827 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11829 #ifdef CONFIG_IPW2200_PROMISCUOUS
11830 module_param(rtap_iface, int, 0444);
11831 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11832 #endif
11834 #ifdef CONFIG_IPW2200_QOS
11835 module_param(qos_enable, int, 0444);
11836 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11838 module_param(qos_burst_enable, int, 0444);
11839 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11841 module_param(qos_no_ack_mask, int, 0444);
11842 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11844 module_param(burst_duration_CCK, int, 0444);
11845 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11847 module_param(burst_duration_OFDM, int, 0444);
11848 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11849 #endif /* CONFIG_IPW2200_QOS */
11851 #ifdef CONFIG_IPW2200_MONITOR
11852 module_param(mode, int, 0444);
11853 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11854 #else
11855 module_param(mode, int, 0444);
11856 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11857 #endif
11859 module_param(bt_coexist, int, 0444);
11860 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11862 module_param(hwcrypto, int, 0444);
11863 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11865 module_param(cmdlog, int, 0444);
11866 MODULE_PARM_DESC(cmdlog,
11867 "allocate a ring buffer for logging firmware commands");
11869 module_param(roaming, int, 0444);
11870 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11872 module_param(antenna, int, 0444);
11873 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
11875 module_exit(ipw_exit);
11876 module_init(ipw_init);