ipw2200: firmware DMA loading rework
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / wireless / ipw2x00 / ipw2200.c
blobf593fbbb4e525c08c82513005b58e42be55e712b
1 /******************************************************************************
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 ******************************************************************************/
33 #include "ipw2200.h"
36 #ifndef KBUILD_EXTMOD
37 #define VK "k"
38 #else
39 #define VK
40 #endif
42 #ifdef CONFIG_IPW2200_DEBUG
43 #define VD "d"
44 #else
45 #define VD
46 #endif
48 #ifdef CONFIG_IPW2200_MONITOR
49 #define VM "m"
50 #else
51 #define VM
52 #endif
54 #ifdef CONFIG_IPW2200_PROMISCUOUS
55 #define VP "p"
56 #else
57 #define VP
58 #endif
60 #ifdef CONFIG_IPW2200_RADIOTAP
61 #define VR "r"
62 #else
63 #define VR
64 #endif
66 #ifdef CONFIG_IPW2200_QOS
67 #define VQ "q"
68 #else
69 #define VQ
70 #endif
72 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
73 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
74 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
75 #define DRV_VERSION IPW2200_VERSION
77 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
79 MODULE_DESCRIPTION(DRV_DESCRIPTION);
80 MODULE_VERSION(DRV_VERSION);
81 MODULE_AUTHOR(DRV_COPYRIGHT);
82 MODULE_LICENSE("GPL");
84 static int cmdlog = 0;
85 static int debug = 0;
86 static int channel = 0;
87 static int mode = 0;
89 static u32 ipw_debug_level;
90 static int associate;
91 static int auto_create = 1;
92 static int led = 0;
93 static int disable = 0;
94 static int bt_coexist = 0;
95 static int hwcrypto = 0;
96 static int roaming = 1;
97 static const char ipw_modes[] = {
98 'a', 'b', 'g', '?'
100 static int antenna = CFG_SYS_ANTENNA_BOTH;
102 #ifdef CONFIG_IPW2200_PROMISCUOUS
103 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
104 #endif
107 #ifdef CONFIG_IPW2200_QOS
108 static int qos_enable = 0;
109 static int qos_burst_enable = 0;
110 static int qos_no_ack_mask = 0;
111 static int burst_duration_CCK = 0;
112 static int burst_duration_OFDM = 0;
114 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
115 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
116 QOS_TX3_CW_MIN_OFDM},
117 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
118 QOS_TX3_CW_MAX_OFDM},
119 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
120 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
121 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
122 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
125 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
126 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
127 QOS_TX3_CW_MIN_CCK},
128 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
129 QOS_TX3_CW_MAX_CCK},
130 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
131 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
132 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
133 QOS_TX3_TXOP_LIMIT_CCK}
136 static struct ieee80211_qos_parameters def_parameters_OFDM = {
137 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
138 DEF_TX3_CW_MIN_OFDM},
139 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
140 DEF_TX3_CW_MAX_OFDM},
141 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
142 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
143 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
144 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
147 static struct ieee80211_qos_parameters def_parameters_CCK = {
148 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
149 DEF_TX3_CW_MIN_CCK},
150 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
151 DEF_TX3_CW_MAX_CCK},
152 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
153 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
154 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
155 DEF_TX3_TXOP_LIMIT_CCK}
158 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
160 static int from_priority_to_tx_queue[] = {
161 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
162 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
165 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
167 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
168 *qos_param);
169 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
170 *qos_param);
171 #endif /* CONFIG_IPW2200_QOS */
173 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
174 static void ipw_remove_current_network(struct ipw_priv *priv);
175 static void ipw_rx(struct ipw_priv *priv);
176 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
177 struct clx2_tx_queue *txq, int qindex);
178 static int ipw_queue_reset(struct ipw_priv *priv);
180 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
181 int len, int sync);
183 static void ipw_tx_queue_free(struct ipw_priv *);
185 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
186 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
187 static void ipw_rx_queue_replenish(void *);
188 static int ipw_up(struct ipw_priv *);
189 static void ipw_bg_up(struct work_struct *work);
190 static void ipw_down(struct ipw_priv *);
191 static void ipw_bg_down(struct work_struct *work);
192 static int ipw_config(struct ipw_priv *);
193 static int init_supported_rates(struct ipw_priv *priv,
194 struct ipw_supported_rates *prates);
195 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
196 static void ipw_send_wep_keys(struct ipw_priv *, int);
198 static int snprint_line(char *buf, size_t count,
199 const u8 * data, u32 len, u32 ofs)
201 int out, i, j, l;
202 char c;
204 out = snprintf(buf, count, "%08X", ofs);
206 for (l = 0, i = 0; i < 2; i++) {
207 out += snprintf(buf + out, count - out, " ");
208 for (j = 0; j < 8 && l < len; j++, l++)
209 out += snprintf(buf + out, count - out, "%02X ",
210 data[(i * 8 + j)]);
211 for (; j < 8; j++)
212 out += snprintf(buf + out, count - out, " ");
215 out += snprintf(buf + out, count - out, " ");
216 for (l = 0, i = 0; i < 2; i++) {
217 out += snprintf(buf + out, count - out, " ");
218 for (j = 0; j < 8 && l < len; j++, l++) {
219 c = data[(i * 8 + j)];
220 if (!isascii(c) || !isprint(c))
221 c = '.';
223 out += snprintf(buf + out, count - out, "%c", c);
226 for (; j < 8; j++)
227 out += snprintf(buf + out, count - out, " ");
230 return out;
233 static void printk_buf(int level, const u8 * data, u32 len)
235 char line[81];
236 u32 ofs = 0;
237 if (!(ipw_debug_level & level))
238 return;
240 while (len) {
241 snprint_line(line, sizeof(line), &data[ofs],
242 min(len, 16U), ofs);
243 printk(KERN_DEBUG "%s\n", line);
244 ofs += 16;
245 len -= min(len, 16U);
249 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
251 size_t out = size;
252 u32 ofs = 0;
253 int total = 0;
255 while (size && len) {
256 out = snprint_line(output, size, &data[ofs],
257 min_t(size_t, len, 16U), ofs);
259 ofs += 16;
260 output += out;
261 size -= out;
262 len -= min_t(size_t, len, 16U);
263 total += out;
265 return total;
268 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
269 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
270 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
272 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
273 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
274 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
276 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
277 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
278 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
280 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
281 __LINE__, (u32) (b), (u32) (c));
282 _ipw_write_reg8(a, b, c);
285 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
286 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
287 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
289 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
290 __LINE__, (u32) (b), (u32) (c));
291 _ipw_write_reg16(a, b, c);
294 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
295 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
296 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
298 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
299 __LINE__, (u32) (b), (u32) (c));
300 _ipw_write_reg32(a, b, c);
303 /* 8-bit direct write (low 4K) */
304 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
305 u8 val)
307 writeb(val, ipw->hw_base + ofs);
310 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
311 #define ipw_write8(ipw, ofs, val) do { \
312 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
313 __LINE__, (u32)(ofs), (u32)(val)); \
314 _ipw_write8(ipw, ofs, val); \
315 } while (0)
317 /* 16-bit direct write (low 4K) */
318 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
319 u16 val)
321 writew(val, ipw->hw_base + ofs);
324 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
325 #define ipw_write16(ipw, ofs, val) do { \
326 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
327 __LINE__, (u32)(ofs), (u32)(val)); \
328 _ipw_write16(ipw, ofs, val); \
329 } while (0)
331 /* 32-bit direct write (low 4K) */
332 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
333 u32 val)
335 writel(val, ipw->hw_base + ofs);
338 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
339 #define ipw_write32(ipw, ofs, val) do { \
340 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
341 __LINE__, (u32)(ofs), (u32)(val)); \
342 _ipw_write32(ipw, ofs, val); \
343 } while (0)
345 /* 8-bit direct read (low 4K) */
346 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
348 return readb(ipw->hw_base + ofs);
351 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
352 #define ipw_read8(ipw, ofs) ({ \
353 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
354 (u32)(ofs)); \
355 _ipw_read8(ipw, ofs); \
358 /* 16-bit direct read (low 4K) */
359 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
361 return readw(ipw->hw_base + ofs);
364 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
365 #define ipw_read16(ipw, ofs) ({ \
366 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
367 (u32)(ofs)); \
368 _ipw_read16(ipw, ofs); \
371 /* 32-bit direct read (low 4K) */
372 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
374 return readl(ipw->hw_base + ofs);
377 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
378 #define ipw_read32(ipw, ofs) ({ \
379 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
380 (u32)(ofs)); \
381 _ipw_read32(ipw, ofs); \
384 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
385 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
386 #define ipw_read_indirect(a, b, c, d) ({ \
387 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
388 __LINE__, (u32)(b), (u32)(d)); \
389 _ipw_read_indirect(a, b, c, d); \
392 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
393 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
394 int num);
395 #define ipw_write_indirect(a, b, c, d) do { \
396 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
397 __LINE__, (u32)(b), (u32)(d)); \
398 _ipw_write_indirect(a, b, c, d); \
399 } while (0)
401 /* 32-bit indirect write (above 4K) */
402 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
404 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
405 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
406 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
409 /* 8-bit indirect write (above 4K) */
410 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
412 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
413 u32 dif_len = reg - aligned_addr;
415 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
416 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
417 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
420 /* 16-bit indirect write (above 4K) */
421 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
423 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
424 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
426 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
427 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
428 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
431 /* 8-bit indirect read (above 4K) */
432 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
434 u32 word;
435 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
436 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
437 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
438 return (word >> ((reg & 0x3) * 8)) & 0xff;
441 /* 32-bit indirect read (above 4K) */
442 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
444 u32 value;
446 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
448 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
449 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
450 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
451 return value;
454 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
455 /* for area above 1st 4K of SRAM/reg space */
456 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
457 int num)
459 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
460 u32 dif_len = addr - aligned_addr;
461 u32 i;
463 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
465 if (num <= 0) {
466 return;
469 /* Read the first dword (or portion) byte by byte */
470 if (unlikely(dif_len)) {
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472 /* Start reading at aligned_addr + dif_len */
473 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
474 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
475 aligned_addr += 4;
478 /* Read all of the middle dwords as dwords, with auto-increment */
479 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
480 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
481 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
483 /* Read the last dword (or portion) byte by byte */
484 if (unlikely(num)) {
485 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
486 for (i = 0; num > 0; i++, num--)
487 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
491 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
492 /* for area above 1st 4K of SRAM/reg space */
493 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
494 int num)
496 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
497 u32 dif_len = addr - aligned_addr;
498 u32 i;
500 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
502 if (num <= 0) {
503 return;
506 /* Write the first dword (or portion) byte by byte */
507 if (unlikely(dif_len)) {
508 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
509 /* Start writing at aligned_addr + dif_len */
510 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
511 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
512 aligned_addr += 4;
515 /* Write all of the middle dwords as dwords, with auto-increment */
516 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
517 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
518 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
520 /* Write the last dword (or portion) byte by byte */
521 if (unlikely(num)) {
522 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
523 for (i = 0; num > 0; i++, num--, buf++)
524 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
528 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
529 /* for 1st 4K of SRAM/regs space */
530 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
531 int num)
533 memcpy_toio((priv->hw_base + addr), buf, num);
536 /* Set bit(s) in low 4K of SRAM/regs */
537 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
539 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
542 /* Clear bit(s) in low 4K of SRAM/regs */
543 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
545 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
548 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
550 if (priv->status & STATUS_INT_ENABLED)
551 return;
552 priv->status |= STATUS_INT_ENABLED;
553 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
556 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
558 if (!(priv->status & STATUS_INT_ENABLED))
559 return;
560 priv->status &= ~STATUS_INT_ENABLED;
561 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
564 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
566 unsigned long flags;
568 spin_lock_irqsave(&priv->irq_lock, flags);
569 __ipw_enable_interrupts(priv);
570 spin_unlock_irqrestore(&priv->irq_lock, flags);
573 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
575 unsigned long flags;
577 spin_lock_irqsave(&priv->irq_lock, flags);
578 __ipw_disable_interrupts(priv);
579 spin_unlock_irqrestore(&priv->irq_lock, flags);
582 static char *ipw_error_desc(u32 val)
584 switch (val) {
585 case IPW_FW_ERROR_OK:
586 return "ERROR_OK";
587 case IPW_FW_ERROR_FAIL:
588 return "ERROR_FAIL";
589 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
590 return "MEMORY_UNDERFLOW";
591 case IPW_FW_ERROR_MEMORY_OVERFLOW:
592 return "MEMORY_OVERFLOW";
593 case IPW_FW_ERROR_BAD_PARAM:
594 return "BAD_PARAM";
595 case IPW_FW_ERROR_BAD_CHECKSUM:
596 return "BAD_CHECKSUM";
597 case IPW_FW_ERROR_NMI_INTERRUPT:
598 return "NMI_INTERRUPT";
599 case IPW_FW_ERROR_BAD_DATABASE:
600 return "BAD_DATABASE";
601 case IPW_FW_ERROR_ALLOC_FAIL:
602 return "ALLOC_FAIL";
603 case IPW_FW_ERROR_DMA_UNDERRUN:
604 return "DMA_UNDERRUN";
605 case IPW_FW_ERROR_DMA_STATUS:
606 return "DMA_STATUS";
607 case IPW_FW_ERROR_DINO_ERROR:
608 return "DINO_ERROR";
609 case IPW_FW_ERROR_EEPROM_ERROR:
610 return "EEPROM_ERROR";
611 case IPW_FW_ERROR_SYSASSERT:
612 return "SYSASSERT";
613 case IPW_FW_ERROR_FATAL_ERROR:
614 return "FATAL_ERROR";
615 default:
616 return "UNKNOWN_ERROR";
620 static void ipw_dump_error_log(struct ipw_priv *priv,
621 struct ipw_fw_error *error)
623 u32 i;
625 if (!error) {
626 IPW_ERROR("Error allocating and capturing error log. "
627 "Nothing to dump.\n");
628 return;
631 IPW_ERROR("Start IPW Error Log Dump:\n");
632 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
633 error->status, error->config);
635 for (i = 0; i < error->elem_len; i++)
636 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
637 ipw_error_desc(error->elem[i].desc),
638 error->elem[i].time,
639 error->elem[i].blink1,
640 error->elem[i].blink2,
641 error->elem[i].link1,
642 error->elem[i].link2, error->elem[i].data);
643 for (i = 0; i < error->log_len; i++)
644 IPW_ERROR("%i\t0x%08x\t%i\n",
645 error->log[i].time,
646 error->log[i].data, error->log[i].event);
649 static inline int ipw_is_init(struct ipw_priv *priv)
651 return (priv->status & STATUS_INIT) ? 1 : 0;
654 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
656 u32 addr, field_info, field_len, field_count, total_len;
658 IPW_DEBUG_ORD("ordinal = %i\n", ord);
660 if (!priv || !val || !len) {
661 IPW_DEBUG_ORD("Invalid argument\n");
662 return -EINVAL;
665 /* verify device ordinal tables have been initialized */
666 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
667 IPW_DEBUG_ORD("Access ordinals before initialization\n");
668 return -EINVAL;
671 switch (IPW_ORD_TABLE_ID_MASK & ord) {
672 case IPW_ORD_TABLE_0_MASK:
674 * TABLE 0: Direct access to a table of 32 bit values
676 * This is a very simple table with the data directly
677 * read from the table
680 /* remove the table id from the ordinal */
681 ord &= IPW_ORD_TABLE_VALUE_MASK;
683 /* boundary check */
684 if (ord > priv->table0_len) {
685 IPW_DEBUG_ORD("ordinal value (%i) longer then "
686 "max (%i)\n", ord, priv->table0_len);
687 return -EINVAL;
690 /* verify we have enough room to store the value */
691 if (*len < sizeof(u32)) {
692 IPW_DEBUG_ORD("ordinal buffer length too small, "
693 "need %zd\n", sizeof(u32));
694 return -EINVAL;
697 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
698 ord, priv->table0_addr + (ord << 2));
700 *len = sizeof(u32);
701 ord <<= 2;
702 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
703 break;
705 case IPW_ORD_TABLE_1_MASK:
707 * TABLE 1: Indirect access to a table of 32 bit values
709 * This is a fairly large table of u32 values each
710 * representing starting addr for the data (which is
711 * also a u32)
714 /* remove the table id from the ordinal */
715 ord &= IPW_ORD_TABLE_VALUE_MASK;
717 /* boundary check */
718 if (ord > priv->table1_len) {
719 IPW_DEBUG_ORD("ordinal value too long\n");
720 return -EINVAL;
723 /* verify we have enough room to store the value */
724 if (*len < sizeof(u32)) {
725 IPW_DEBUG_ORD("ordinal buffer length too small, "
726 "need %zd\n", sizeof(u32));
727 return -EINVAL;
730 *((u32 *) val) =
731 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
732 *len = sizeof(u32);
733 break;
735 case IPW_ORD_TABLE_2_MASK:
737 * TABLE 2: Indirect access to a table of variable sized values
739 * This table consist of six values, each containing
740 * - dword containing the starting offset of the data
741 * - dword containing the lengh in the first 16bits
742 * and the count in the second 16bits
745 /* remove the table id from the ordinal */
746 ord &= IPW_ORD_TABLE_VALUE_MASK;
748 /* boundary check */
749 if (ord > priv->table2_len) {
750 IPW_DEBUG_ORD("ordinal value too long\n");
751 return -EINVAL;
754 /* get the address of statistic */
755 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
757 /* get the second DW of statistics ;
758 * two 16-bit words - first is length, second is count */
759 field_info =
760 ipw_read_reg32(priv,
761 priv->table2_addr + (ord << 3) +
762 sizeof(u32));
764 /* get each entry length */
765 field_len = *((u16 *) & field_info);
767 /* get number of entries */
768 field_count = *(((u16 *) & field_info) + 1);
770 /* abort if not enought memory */
771 total_len = field_len * field_count;
772 if (total_len > *len) {
773 *len = total_len;
774 return -EINVAL;
777 *len = total_len;
778 if (!total_len)
779 return 0;
781 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
782 "field_info = 0x%08x\n",
783 addr, total_len, field_info);
784 ipw_read_indirect(priv, addr, val, total_len);
785 break;
787 default:
788 IPW_DEBUG_ORD("Invalid ordinal!\n");
789 return -EINVAL;
793 return 0;
796 static void ipw_init_ordinals(struct ipw_priv *priv)
798 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
799 priv->table0_len = ipw_read32(priv, priv->table0_addr);
801 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
802 priv->table0_addr, priv->table0_len);
804 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
805 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
807 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
808 priv->table1_addr, priv->table1_len);
810 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
811 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
812 priv->table2_len &= 0x0000ffff; /* use first two bytes */
814 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
815 priv->table2_addr, priv->table2_len);
819 static u32 ipw_register_toggle(u32 reg)
821 reg &= ~IPW_START_STANDBY;
822 if (reg & IPW_GATE_ODMA)
823 reg &= ~IPW_GATE_ODMA;
824 if (reg & IPW_GATE_IDMA)
825 reg &= ~IPW_GATE_IDMA;
826 if (reg & IPW_GATE_ADMA)
827 reg &= ~IPW_GATE_ADMA;
828 return reg;
832 * LED behavior:
833 * - On radio ON, turn on any LEDs that require to be on during start
834 * - On initialization, start unassociated blink
835 * - On association, disable unassociated blink
836 * - On disassociation, start unassociated blink
837 * - On radio OFF, turn off any LEDs started during radio on
840 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
841 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
842 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
844 static void ipw_led_link_on(struct ipw_priv *priv)
846 unsigned long flags;
847 u32 led;
849 /* If configured to not use LEDs, or nic_type is 1,
850 * then we don't toggle a LINK led */
851 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
852 return;
854 spin_lock_irqsave(&priv->lock, flags);
856 if (!(priv->status & STATUS_RF_KILL_MASK) &&
857 !(priv->status & STATUS_LED_LINK_ON)) {
858 IPW_DEBUG_LED("Link LED On\n");
859 led = ipw_read_reg32(priv, IPW_EVENT_REG);
860 led |= priv->led_association_on;
862 led = ipw_register_toggle(led);
864 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
865 ipw_write_reg32(priv, IPW_EVENT_REG, led);
867 priv->status |= STATUS_LED_LINK_ON;
869 /* If we aren't associated, schedule turning the LED off */
870 if (!(priv->status & STATUS_ASSOCIATED))
871 queue_delayed_work(priv->workqueue,
872 &priv->led_link_off,
873 LD_TIME_LINK_ON);
876 spin_unlock_irqrestore(&priv->lock, flags);
879 static void ipw_bg_led_link_on(struct work_struct *work)
881 struct ipw_priv *priv =
882 container_of(work, struct ipw_priv, led_link_on.work);
883 mutex_lock(&priv->mutex);
884 ipw_led_link_on(priv);
885 mutex_unlock(&priv->mutex);
888 static void ipw_led_link_off(struct ipw_priv *priv)
890 unsigned long flags;
891 u32 led;
893 /* If configured not to use LEDs, or nic type is 1,
894 * then we don't goggle the LINK led. */
895 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
896 return;
898 spin_lock_irqsave(&priv->lock, flags);
900 if (priv->status & STATUS_LED_LINK_ON) {
901 led = ipw_read_reg32(priv, IPW_EVENT_REG);
902 led &= priv->led_association_off;
903 led = ipw_register_toggle(led);
905 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
906 ipw_write_reg32(priv, IPW_EVENT_REG, led);
908 IPW_DEBUG_LED("Link LED Off\n");
910 priv->status &= ~STATUS_LED_LINK_ON;
912 /* If we aren't associated and the radio is on, schedule
913 * turning the LED on (blink while unassociated) */
914 if (!(priv->status & STATUS_RF_KILL_MASK) &&
915 !(priv->status & STATUS_ASSOCIATED))
916 queue_delayed_work(priv->workqueue, &priv->led_link_on,
917 LD_TIME_LINK_OFF);
921 spin_unlock_irqrestore(&priv->lock, flags);
924 static void ipw_bg_led_link_off(struct work_struct *work)
926 struct ipw_priv *priv =
927 container_of(work, struct ipw_priv, led_link_off.work);
928 mutex_lock(&priv->mutex);
929 ipw_led_link_off(priv);
930 mutex_unlock(&priv->mutex);
933 static void __ipw_led_activity_on(struct ipw_priv *priv)
935 u32 led;
937 if (priv->config & CFG_NO_LED)
938 return;
940 if (priv->status & STATUS_RF_KILL_MASK)
941 return;
943 if (!(priv->status & STATUS_LED_ACT_ON)) {
944 led = ipw_read_reg32(priv, IPW_EVENT_REG);
945 led |= priv->led_activity_on;
947 led = ipw_register_toggle(led);
949 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
950 ipw_write_reg32(priv, IPW_EVENT_REG, led);
952 IPW_DEBUG_LED("Activity LED On\n");
954 priv->status |= STATUS_LED_ACT_ON;
956 cancel_delayed_work(&priv->led_act_off);
957 queue_delayed_work(priv->workqueue, &priv->led_act_off,
958 LD_TIME_ACT_ON);
959 } else {
960 /* Reschedule LED off for full time period */
961 cancel_delayed_work(&priv->led_act_off);
962 queue_delayed_work(priv->workqueue, &priv->led_act_off,
963 LD_TIME_ACT_ON);
967 #if 0
968 void ipw_led_activity_on(struct ipw_priv *priv)
970 unsigned long flags;
971 spin_lock_irqsave(&priv->lock, flags);
972 __ipw_led_activity_on(priv);
973 spin_unlock_irqrestore(&priv->lock, flags);
975 #endif /* 0 */
977 static void ipw_led_activity_off(struct ipw_priv *priv)
979 unsigned long flags;
980 u32 led;
982 if (priv->config & CFG_NO_LED)
983 return;
985 spin_lock_irqsave(&priv->lock, flags);
987 if (priv->status & STATUS_LED_ACT_ON) {
988 led = ipw_read_reg32(priv, IPW_EVENT_REG);
989 led &= priv->led_activity_off;
991 led = ipw_register_toggle(led);
993 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
994 ipw_write_reg32(priv, IPW_EVENT_REG, led);
996 IPW_DEBUG_LED("Activity LED Off\n");
998 priv->status &= ~STATUS_LED_ACT_ON;
1001 spin_unlock_irqrestore(&priv->lock, flags);
1004 static void ipw_bg_led_activity_off(struct work_struct *work)
1006 struct ipw_priv *priv =
1007 container_of(work, struct ipw_priv, led_act_off.work);
1008 mutex_lock(&priv->mutex);
1009 ipw_led_activity_off(priv);
1010 mutex_unlock(&priv->mutex);
1013 static void ipw_led_band_on(struct ipw_priv *priv)
1015 unsigned long flags;
1016 u32 led;
1018 /* Only nic type 1 supports mode LEDs */
1019 if (priv->config & CFG_NO_LED ||
1020 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1021 return;
1023 spin_lock_irqsave(&priv->lock, flags);
1025 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1026 if (priv->assoc_network->mode == IEEE_A) {
1027 led |= priv->led_ofdm_on;
1028 led &= priv->led_association_off;
1029 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1030 } else if (priv->assoc_network->mode == IEEE_G) {
1031 led |= priv->led_ofdm_on;
1032 led |= priv->led_association_on;
1033 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1034 } else {
1035 led &= priv->led_ofdm_off;
1036 led |= priv->led_association_on;
1037 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1040 led = ipw_register_toggle(led);
1042 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1043 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1045 spin_unlock_irqrestore(&priv->lock, flags);
1048 static void ipw_led_band_off(struct ipw_priv *priv)
1050 unsigned long flags;
1051 u32 led;
1053 /* Only nic type 1 supports mode LEDs */
1054 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1055 return;
1057 spin_lock_irqsave(&priv->lock, flags);
1059 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1060 led &= priv->led_ofdm_off;
1061 led &= priv->led_association_off;
1063 led = ipw_register_toggle(led);
1065 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1066 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1068 spin_unlock_irqrestore(&priv->lock, flags);
1071 static void ipw_led_radio_on(struct ipw_priv *priv)
1073 ipw_led_link_on(priv);
1076 static void ipw_led_radio_off(struct ipw_priv *priv)
1078 ipw_led_activity_off(priv);
1079 ipw_led_link_off(priv);
1082 static void ipw_led_link_up(struct ipw_priv *priv)
1084 /* Set the Link Led on for all nic types */
1085 ipw_led_link_on(priv);
1088 static void ipw_led_link_down(struct ipw_priv *priv)
1090 ipw_led_activity_off(priv);
1091 ipw_led_link_off(priv);
1093 if (priv->status & STATUS_RF_KILL_MASK)
1094 ipw_led_radio_off(priv);
1097 static void ipw_led_init(struct ipw_priv *priv)
1099 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1101 /* Set the default PINs for the link and activity leds */
1102 priv->led_activity_on = IPW_ACTIVITY_LED;
1103 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1105 priv->led_association_on = IPW_ASSOCIATED_LED;
1106 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1108 /* Set the default PINs for the OFDM leds */
1109 priv->led_ofdm_on = IPW_OFDM_LED;
1110 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1112 switch (priv->nic_type) {
1113 case EEPROM_NIC_TYPE_1:
1114 /* In this NIC type, the LEDs are reversed.... */
1115 priv->led_activity_on = IPW_ASSOCIATED_LED;
1116 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1117 priv->led_association_on = IPW_ACTIVITY_LED;
1118 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1120 if (!(priv->config & CFG_NO_LED))
1121 ipw_led_band_on(priv);
1123 /* And we don't blink link LEDs for this nic, so
1124 * just return here */
1125 return;
1127 case EEPROM_NIC_TYPE_3:
1128 case EEPROM_NIC_TYPE_2:
1129 case EEPROM_NIC_TYPE_4:
1130 case EEPROM_NIC_TYPE_0:
1131 break;
1133 default:
1134 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1135 priv->nic_type);
1136 priv->nic_type = EEPROM_NIC_TYPE_0;
1137 break;
1140 if (!(priv->config & CFG_NO_LED)) {
1141 if (priv->status & STATUS_ASSOCIATED)
1142 ipw_led_link_on(priv);
1143 else
1144 ipw_led_link_off(priv);
1148 static void ipw_led_shutdown(struct ipw_priv *priv)
1150 ipw_led_activity_off(priv);
1151 ipw_led_link_off(priv);
1152 ipw_led_band_off(priv);
1153 cancel_delayed_work(&priv->led_link_on);
1154 cancel_delayed_work(&priv->led_link_off);
1155 cancel_delayed_work(&priv->led_act_off);
1159 * The following adds a new attribute to the sysfs representation
1160 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1161 * used for controling the debug level.
1163 * See the level definitions in ipw for details.
1165 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1167 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1170 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1171 size_t count)
1173 char *p = (char *)buf;
1174 u32 val;
1176 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1177 p++;
1178 if (p[0] == 'x' || p[0] == 'X')
1179 p++;
1180 val = simple_strtoul(p, &p, 16);
1181 } else
1182 val = simple_strtoul(p, &p, 10);
1183 if (p == buf)
1184 printk(KERN_INFO DRV_NAME
1185 ": %s is not in hex or decimal form.\n", buf);
1186 else
1187 ipw_debug_level = val;
1189 return strnlen(buf, count);
1192 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1193 show_debug_level, store_debug_level);
1195 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1197 /* length = 1st dword in log */
1198 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1201 static void ipw_capture_event_log(struct ipw_priv *priv,
1202 u32 log_len, struct ipw_event *log)
1204 u32 base;
1206 if (log_len) {
1207 base = ipw_read32(priv, IPW_EVENT_LOG);
1208 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1209 (u8 *) log, sizeof(*log) * log_len);
1213 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1215 struct ipw_fw_error *error;
1216 u32 log_len = ipw_get_event_log_len(priv);
1217 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1218 u32 elem_len = ipw_read_reg32(priv, base);
1220 error = kmalloc(sizeof(*error) +
1221 sizeof(*error->elem) * elem_len +
1222 sizeof(*error->log) * log_len, GFP_ATOMIC);
1223 if (!error) {
1224 IPW_ERROR("Memory allocation for firmware error log "
1225 "failed.\n");
1226 return NULL;
1228 error->jiffies = jiffies;
1229 error->status = priv->status;
1230 error->config = priv->config;
1231 error->elem_len = elem_len;
1232 error->log_len = log_len;
1233 error->elem = (struct ipw_error_elem *)error->payload;
1234 error->log = (struct ipw_event *)(error->elem + elem_len);
1236 ipw_capture_event_log(priv, log_len, error->log);
1238 if (elem_len)
1239 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1240 sizeof(*error->elem) * elem_len);
1242 return error;
1245 static ssize_t show_event_log(struct device *d,
1246 struct device_attribute *attr, char *buf)
1248 struct ipw_priv *priv = dev_get_drvdata(d);
1249 u32 log_len = ipw_get_event_log_len(priv);
1250 u32 log_size;
1251 struct ipw_event *log;
1252 u32 len = 0, i;
1254 /* not using min() because of its strict type checking */
1255 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1256 sizeof(*log) * log_len : PAGE_SIZE;
1257 log = kzalloc(log_size, GFP_KERNEL);
1258 if (!log) {
1259 IPW_ERROR("Unable to allocate memory for log\n");
1260 return 0;
1262 log_len = log_size / sizeof(*log);
1263 ipw_capture_event_log(priv, log_len, log);
1265 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1266 for (i = 0; i < log_len; i++)
1267 len += snprintf(buf + len, PAGE_SIZE - len,
1268 "\n%08X%08X%08X",
1269 log[i].time, log[i].event, log[i].data);
1270 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1271 kfree(log);
1272 return len;
1275 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1277 static ssize_t show_error(struct device *d,
1278 struct device_attribute *attr, char *buf)
1280 struct ipw_priv *priv = dev_get_drvdata(d);
1281 u32 len = 0, i;
1282 if (!priv->error)
1283 return 0;
1284 len += snprintf(buf + len, PAGE_SIZE - len,
1285 "%08lX%08X%08X%08X",
1286 priv->error->jiffies,
1287 priv->error->status,
1288 priv->error->config, priv->error->elem_len);
1289 for (i = 0; i < priv->error->elem_len; i++)
1290 len += snprintf(buf + len, PAGE_SIZE - len,
1291 "\n%08X%08X%08X%08X%08X%08X%08X",
1292 priv->error->elem[i].time,
1293 priv->error->elem[i].desc,
1294 priv->error->elem[i].blink1,
1295 priv->error->elem[i].blink2,
1296 priv->error->elem[i].link1,
1297 priv->error->elem[i].link2,
1298 priv->error->elem[i].data);
1300 len += snprintf(buf + len, PAGE_SIZE - len,
1301 "\n%08X", priv->error->log_len);
1302 for (i = 0; i < priv->error->log_len; i++)
1303 len += snprintf(buf + len, PAGE_SIZE - len,
1304 "\n%08X%08X%08X",
1305 priv->error->log[i].time,
1306 priv->error->log[i].event,
1307 priv->error->log[i].data);
1308 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1309 return len;
1312 static ssize_t clear_error(struct device *d,
1313 struct device_attribute *attr,
1314 const char *buf, size_t count)
1316 struct ipw_priv *priv = dev_get_drvdata(d);
1318 kfree(priv->error);
1319 priv->error = NULL;
1320 return count;
1323 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1325 static ssize_t show_cmd_log(struct device *d,
1326 struct device_attribute *attr, char *buf)
1328 struct ipw_priv *priv = dev_get_drvdata(d);
1329 u32 len = 0, i;
1330 if (!priv->cmdlog)
1331 return 0;
1332 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1333 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1334 i = (i + 1) % priv->cmdlog_len) {
1335 len +=
1336 snprintf(buf + len, PAGE_SIZE - len,
1337 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1338 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1339 priv->cmdlog[i].cmd.len);
1340 len +=
1341 snprintk_buf(buf + len, PAGE_SIZE - len,
1342 (u8 *) priv->cmdlog[i].cmd.param,
1343 priv->cmdlog[i].cmd.len);
1344 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1346 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1347 return len;
1350 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1352 #ifdef CONFIG_IPW2200_PROMISCUOUS
1353 static void ipw_prom_free(struct ipw_priv *priv);
1354 static int ipw_prom_alloc(struct ipw_priv *priv);
1355 static ssize_t store_rtap_iface(struct device *d,
1356 struct device_attribute *attr,
1357 const char *buf, size_t count)
1359 struct ipw_priv *priv = dev_get_drvdata(d);
1360 int rc = 0;
1362 if (count < 1)
1363 return -EINVAL;
1365 switch (buf[0]) {
1366 case '0':
1367 if (!rtap_iface)
1368 return count;
1370 if (netif_running(priv->prom_net_dev)) {
1371 IPW_WARNING("Interface is up. Cannot unregister.\n");
1372 return count;
1375 ipw_prom_free(priv);
1376 rtap_iface = 0;
1377 break;
1379 case '1':
1380 if (rtap_iface)
1381 return count;
1383 rc = ipw_prom_alloc(priv);
1384 if (!rc)
1385 rtap_iface = 1;
1386 break;
1388 default:
1389 return -EINVAL;
1392 if (rc) {
1393 IPW_ERROR("Failed to register promiscuous network "
1394 "device (error %d).\n", rc);
1397 return count;
1400 static ssize_t show_rtap_iface(struct device *d,
1401 struct device_attribute *attr,
1402 char *buf)
1404 struct ipw_priv *priv = dev_get_drvdata(d);
1405 if (rtap_iface)
1406 return sprintf(buf, "%s", priv->prom_net_dev->name);
1407 else {
1408 buf[0] = '-';
1409 buf[1] = '1';
1410 buf[2] = '\0';
1411 return 3;
1415 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1416 store_rtap_iface);
1418 static ssize_t store_rtap_filter(struct device *d,
1419 struct device_attribute *attr,
1420 const char *buf, size_t count)
1422 struct ipw_priv *priv = dev_get_drvdata(d);
1424 if (!priv->prom_priv) {
1425 IPW_ERROR("Attempting to set filter without "
1426 "rtap_iface enabled.\n");
1427 return -EPERM;
1430 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1432 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1433 BIT_ARG16(priv->prom_priv->filter));
1435 return count;
1438 static ssize_t show_rtap_filter(struct device *d,
1439 struct device_attribute *attr,
1440 char *buf)
1442 struct ipw_priv *priv = dev_get_drvdata(d);
1443 return sprintf(buf, "0x%04X",
1444 priv->prom_priv ? priv->prom_priv->filter : 0);
1447 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1448 store_rtap_filter);
1449 #endif
1451 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1452 char *buf)
1454 struct ipw_priv *priv = dev_get_drvdata(d);
1455 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1458 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1459 const char *buf, size_t count)
1461 struct ipw_priv *priv = dev_get_drvdata(d);
1462 struct net_device *dev = priv->net_dev;
1463 char buffer[] = "00000000";
1464 unsigned long len =
1465 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1466 unsigned long val;
1467 char *p = buffer;
1469 IPW_DEBUG_INFO("enter\n");
1471 strncpy(buffer, buf, len);
1472 buffer[len] = 0;
1474 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1475 p++;
1476 if (p[0] == 'x' || p[0] == 'X')
1477 p++;
1478 val = simple_strtoul(p, &p, 16);
1479 } else
1480 val = simple_strtoul(p, &p, 10);
1481 if (p == buffer) {
1482 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1483 } else {
1484 priv->ieee->scan_age = val;
1485 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1488 IPW_DEBUG_INFO("exit\n");
1489 return len;
1492 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1494 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1495 char *buf)
1497 struct ipw_priv *priv = dev_get_drvdata(d);
1498 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1501 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1502 const char *buf, size_t count)
1504 struct ipw_priv *priv = dev_get_drvdata(d);
1506 IPW_DEBUG_INFO("enter\n");
1508 if (count == 0)
1509 return 0;
1511 if (*buf == 0) {
1512 IPW_DEBUG_LED("Disabling LED control.\n");
1513 priv->config |= CFG_NO_LED;
1514 ipw_led_shutdown(priv);
1515 } else {
1516 IPW_DEBUG_LED("Enabling LED control.\n");
1517 priv->config &= ~CFG_NO_LED;
1518 ipw_led_init(priv);
1521 IPW_DEBUG_INFO("exit\n");
1522 return count;
1525 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1527 static ssize_t show_status(struct device *d,
1528 struct device_attribute *attr, char *buf)
1530 struct ipw_priv *p = dev_get_drvdata(d);
1531 return sprintf(buf, "0x%08x\n", (int)p->status);
1534 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1536 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1537 char *buf)
1539 struct ipw_priv *p = dev_get_drvdata(d);
1540 return sprintf(buf, "0x%08x\n", (int)p->config);
1543 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1545 static ssize_t show_nic_type(struct device *d,
1546 struct device_attribute *attr, char *buf)
1548 struct ipw_priv *priv = dev_get_drvdata(d);
1549 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1552 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1554 static ssize_t show_ucode_version(struct device *d,
1555 struct device_attribute *attr, char *buf)
1557 u32 len = sizeof(u32), tmp = 0;
1558 struct ipw_priv *p = dev_get_drvdata(d);
1560 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1561 return 0;
1563 return sprintf(buf, "0x%08x\n", tmp);
1566 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1568 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1569 char *buf)
1571 u32 len = sizeof(u32), tmp = 0;
1572 struct ipw_priv *p = dev_get_drvdata(d);
1574 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1575 return 0;
1577 return sprintf(buf, "0x%08x\n", tmp);
1580 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1583 * Add a device attribute to view/control the delay between eeprom
1584 * operations.
1586 static ssize_t show_eeprom_delay(struct device *d,
1587 struct device_attribute *attr, char *buf)
1589 struct ipw_priv *p = dev_get_drvdata(d);
1590 int n = p->eeprom_delay;
1591 return sprintf(buf, "%i\n", n);
1593 static ssize_t store_eeprom_delay(struct device *d,
1594 struct device_attribute *attr,
1595 const char *buf, size_t count)
1597 struct ipw_priv *p = dev_get_drvdata(d);
1598 sscanf(buf, "%i", &p->eeprom_delay);
1599 return strnlen(buf, count);
1602 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1603 show_eeprom_delay, store_eeprom_delay);
1605 static ssize_t show_command_event_reg(struct device *d,
1606 struct device_attribute *attr, char *buf)
1608 u32 reg = 0;
1609 struct ipw_priv *p = dev_get_drvdata(d);
1611 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1612 return sprintf(buf, "0x%08x\n", reg);
1614 static ssize_t store_command_event_reg(struct device *d,
1615 struct device_attribute *attr,
1616 const char *buf, size_t count)
1618 u32 reg;
1619 struct ipw_priv *p = dev_get_drvdata(d);
1621 sscanf(buf, "%x", &reg);
1622 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1623 return strnlen(buf, count);
1626 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1627 show_command_event_reg, store_command_event_reg);
1629 static ssize_t show_mem_gpio_reg(struct device *d,
1630 struct device_attribute *attr, char *buf)
1632 u32 reg = 0;
1633 struct ipw_priv *p = dev_get_drvdata(d);
1635 reg = ipw_read_reg32(p, 0x301100);
1636 return sprintf(buf, "0x%08x\n", reg);
1638 static ssize_t store_mem_gpio_reg(struct device *d,
1639 struct device_attribute *attr,
1640 const char *buf, size_t count)
1642 u32 reg;
1643 struct ipw_priv *p = dev_get_drvdata(d);
1645 sscanf(buf, "%x", &reg);
1646 ipw_write_reg32(p, 0x301100, reg);
1647 return strnlen(buf, count);
1650 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1651 show_mem_gpio_reg, store_mem_gpio_reg);
1653 static ssize_t show_indirect_dword(struct device *d,
1654 struct device_attribute *attr, char *buf)
1656 u32 reg = 0;
1657 struct ipw_priv *priv = dev_get_drvdata(d);
1659 if (priv->status & STATUS_INDIRECT_DWORD)
1660 reg = ipw_read_reg32(priv, priv->indirect_dword);
1661 else
1662 reg = 0;
1664 return sprintf(buf, "0x%08x\n", reg);
1666 static ssize_t store_indirect_dword(struct device *d,
1667 struct device_attribute *attr,
1668 const char *buf, size_t count)
1670 struct ipw_priv *priv = dev_get_drvdata(d);
1672 sscanf(buf, "%x", &priv->indirect_dword);
1673 priv->status |= STATUS_INDIRECT_DWORD;
1674 return strnlen(buf, count);
1677 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1678 show_indirect_dword, store_indirect_dword);
1680 static ssize_t show_indirect_byte(struct device *d,
1681 struct device_attribute *attr, char *buf)
1683 u8 reg = 0;
1684 struct ipw_priv *priv = dev_get_drvdata(d);
1686 if (priv->status & STATUS_INDIRECT_BYTE)
1687 reg = ipw_read_reg8(priv, priv->indirect_byte);
1688 else
1689 reg = 0;
1691 return sprintf(buf, "0x%02x\n", reg);
1693 static ssize_t store_indirect_byte(struct device *d,
1694 struct device_attribute *attr,
1695 const char *buf, size_t count)
1697 struct ipw_priv *priv = dev_get_drvdata(d);
1699 sscanf(buf, "%x", &priv->indirect_byte);
1700 priv->status |= STATUS_INDIRECT_BYTE;
1701 return strnlen(buf, count);
1704 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1705 show_indirect_byte, store_indirect_byte);
1707 static ssize_t show_direct_dword(struct device *d,
1708 struct device_attribute *attr, char *buf)
1710 u32 reg = 0;
1711 struct ipw_priv *priv = dev_get_drvdata(d);
1713 if (priv->status & STATUS_DIRECT_DWORD)
1714 reg = ipw_read32(priv, priv->direct_dword);
1715 else
1716 reg = 0;
1718 return sprintf(buf, "0x%08x\n", reg);
1720 static ssize_t store_direct_dword(struct device *d,
1721 struct device_attribute *attr,
1722 const char *buf, size_t count)
1724 struct ipw_priv *priv = dev_get_drvdata(d);
1726 sscanf(buf, "%x", &priv->direct_dword);
1727 priv->status |= STATUS_DIRECT_DWORD;
1728 return strnlen(buf, count);
1731 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1732 show_direct_dword, store_direct_dword);
1734 static int rf_kill_active(struct ipw_priv *priv)
1736 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1737 priv->status |= STATUS_RF_KILL_HW;
1738 else
1739 priv->status &= ~STATUS_RF_KILL_HW;
1741 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1744 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1745 char *buf)
1747 /* 0 - RF kill not enabled
1748 1 - SW based RF kill active (sysfs)
1749 2 - HW based RF kill active
1750 3 - Both HW and SW baed RF kill active */
1751 struct ipw_priv *priv = dev_get_drvdata(d);
1752 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1753 (rf_kill_active(priv) ? 0x2 : 0x0);
1754 return sprintf(buf, "%i\n", val);
1757 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1759 if ((disable_radio ? 1 : 0) ==
1760 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1761 return 0;
1763 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1764 disable_radio ? "OFF" : "ON");
1766 if (disable_radio) {
1767 priv->status |= STATUS_RF_KILL_SW;
1769 if (priv->workqueue) {
1770 cancel_delayed_work(&priv->request_scan);
1771 cancel_delayed_work(&priv->request_direct_scan);
1772 cancel_delayed_work(&priv->request_passive_scan);
1773 cancel_delayed_work(&priv->scan_event);
1775 queue_work(priv->workqueue, &priv->down);
1776 } else {
1777 priv->status &= ~STATUS_RF_KILL_SW;
1778 if (rf_kill_active(priv)) {
1779 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1780 "disabled by HW switch\n");
1781 /* Make sure the RF_KILL check timer is running */
1782 cancel_delayed_work(&priv->rf_kill);
1783 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1784 round_jiffies_relative(2 * HZ));
1785 } else
1786 queue_work(priv->workqueue, &priv->up);
1789 return 1;
1792 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1793 const char *buf, size_t count)
1795 struct ipw_priv *priv = dev_get_drvdata(d);
1797 ipw_radio_kill_sw(priv, buf[0] == '1');
1799 return count;
1802 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1804 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1805 char *buf)
1807 struct ipw_priv *priv = dev_get_drvdata(d);
1808 int pos = 0, len = 0;
1809 if (priv->config & CFG_SPEED_SCAN) {
1810 while (priv->speed_scan[pos] != 0)
1811 len += sprintf(&buf[len], "%d ",
1812 priv->speed_scan[pos++]);
1813 return len + sprintf(&buf[len], "\n");
1816 return sprintf(buf, "0\n");
1819 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1820 const char *buf, size_t count)
1822 struct ipw_priv *priv = dev_get_drvdata(d);
1823 int channel, pos = 0;
1824 const char *p = buf;
1826 /* list of space separated channels to scan, optionally ending with 0 */
1827 while ((channel = simple_strtol(p, NULL, 0))) {
1828 if (pos == MAX_SPEED_SCAN - 1) {
1829 priv->speed_scan[pos] = 0;
1830 break;
1833 if (ieee80211_is_valid_channel(priv->ieee, channel))
1834 priv->speed_scan[pos++] = channel;
1835 else
1836 IPW_WARNING("Skipping invalid channel request: %d\n",
1837 channel);
1838 p = strchr(p, ' ');
1839 if (!p)
1840 break;
1841 while (*p == ' ' || *p == '\t')
1842 p++;
1845 if (pos == 0)
1846 priv->config &= ~CFG_SPEED_SCAN;
1847 else {
1848 priv->speed_scan_pos = 0;
1849 priv->config |= CFG_SPEED_SCAN;
1852 return count;
1855 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1856 store_speed_scan);
1858 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1859 char *buf)
1861 struct ipw_priv *priv = dev_get_drvdata(d);
1862 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1865 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1866 const char *buf, size_t count)
1868 struct ipw_priv *priv = dev_get_drvdata(d);
1869 if (buf[0] == '1')
1870 priv->config |= CFG_NET_STATS;
1871 else
1872 priv->config &= ~CFG_NET_STATS;
1874 return count;
1877 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1878 show_net_stats, store_net_stats);
1880 static ssize_t show_channels(struct device *d,
1881 struct device_attribute *attr,
1882 char *buf)
1884 struct ipw_priv *priv = dev_get_drvdata(d);
1885 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
1886 int len = 0, i;
1888 len = sprintf(&buf[len],
1889 "Displaying %d channels in 2.4Ghz band "
1890 "(802.11bg):\n", geo->bg_channels);
1892 for (i = 0; i < geo->bg_channels; i++) {
1893 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1894 geo->bg[i].channel,
1895 geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT ?
1896 " (radar spectrum)" : "",
1897 ((geo->bg[i].flags & IEEE80211_CH_NO_IBSS) ||
1898 (geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT))
1899 ? "" : ", IBSS",
1900 geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1901 "passive only" : "active/passive",
1902 geo->bg[i].flags & IEEE80211_CH_B_ONLY ?
1903 "B" : "B/G");
1906 len += sprintf(&buf[len],
1907 "Displaying %d channels in 5.2Ghz band "
1908 "(802.11a):\n", geo->a_channels);
1909 for (i = 0; i < geo->a_channels; i++) {
1910 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1911 geo->a[i].channel,
1912 geo->a[i].flags & IEEE80211_CH_RADAR_DETECT ?
1913 " (radar spectrum)" : "",
1914 ((geo->a[i].flags & IEEE80211_CH_NO_IBSS) ||
1915 (geo->a[i].flags & IEEE80211_CH_RADAR_DETECT))
1916 ? "" : ", IBSS",
1917 geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1918 "passive only" : "active/passive");
1921 return len;
1924 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1926 static void notify_wx_assoc_event(struct ipw_priv *priv)
1928 union iwreq_data wrqu;
1929 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1930 if (priv->status & STATUS_ASSOCIATED)
1931 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1932 else
1933 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1934 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1937 static void ipw_irq_tasklet(struct ipw_priv *priv)
1939 u32 inta, inta_mask, handled = 0;
1940 unsigned long flags;
1941 int rc = 0;
1943 spin_lock_irqsave(&priv->irq_lock, flags);
1945 inta = ipw_read32(priv, IPW_INTA_RW);
1946 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1947 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1949 /* Add any cached INTA values that need to be handled */
1950 inta |= priv->isr_inta;
1952 spin_unlock_irqrestore(&priv->irq_lock, flags);
1954 spin_lock_irqsave(&priv->lock, flags);
1956 /* handle all the justifications for the interrupt */
1957 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1958 ipw_rx(priv);
1959 handled |= IPW_INTA_BIT_RX_TRANSFER;
1962 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1963 IPW_DEBUG_HC("Command completed.\n");
1964 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1965 priv->status &= ~STATUS_HCMD_ACTIVE;
1966 wake_up_interruptible(&priv->wait_command_queue);
1967 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1970 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1971 IPW_DEBUG_TX("TX_QUEUE_1\n");
1972 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1973 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1976 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1977 IPW_DEBUG_TX("TX_QUEUE_2\n");
1978 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1979 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1982 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1983 IPW_DEBUG_TX("TX_QUEUE_3\n");
1984 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1985 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1988 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1989 IPW_DEBUG_TX("TX_QUEUE_4\n");
1990 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1991 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1994 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1995 IPW_WARNING("STATUS_CHANGE\n");
1996 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1999 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2000 IPW_WARNING("TX_PERIOD_EXPIRED\n");
2001 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2004 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2005 IPW_WARNING("HOST_CMD_DONE\n");
2006 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2009 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2010 IPW_WARNING("FW_INITIALIZATION_DONE\n");
2011 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2014 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2015 IPW_WARNING("PHY_OFF_DONE\n");
2016 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2019 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2020 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2021 priv->status |= STATUS_RF_KILL_HW;
2022 wake_up_interruptible(&priv->wait_command_queue);
2023 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2024 cancel_delayed_work(&priv->request_scan);
2025 cancel_delayed_work(&priv->request_direct_scan);
2026 cancel_delayed_work(&priv->request_passive_scan);
2027 cancel_delayed_work(&priv->scan_event);
2028 schedule_work(&priv->link_down);
2029 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
2030 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2033 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2034 IPW_WARNING("Firmware error detected. Restarting.\n");
2035 if (priv->error) {
2036 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2037 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2038 struct ipw_fw_error *error =
2039 ipw_alloc_error_log(priv);
2040 ipw_dump_error_log(priv, error);
2041 kfree(error);
2043 } else {
2044 priv->error = ipw_alloc_error_log(priv);
2045 if (priv->error)
2046 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2047 else
2048 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2049 "log.\n");
2050 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2051 ipw_dump_error_log(priv, priv->error);
2054 /* XXX: If hardware encryption is for WPA/WPA2,
2055 * we have to notify the supplicant. */
2056 if (priv->ieee->sec.encrypt) {
2057 priv->status &= ~STATUS_ASSOCIATED;
2058 notify_wx_assoc_event(priv);
2061 /* Keep the restart process from trying to send host
2062 * commands by clearing the INIT status bit */
2063 priv->status &= ~STATUS_INIT;
2065 /* Cancel currently queued command. */
2066 priv->status &= ~STATUS_HCMD_ACTIVE;
2067 wake_up_interruptible(&priv->wait_command_queue);
2069 queue_work(priv->workqueue, &priv->adapter_restart);
2070 handled |= IPW_INTA_BIT_FATAL_ERROR;
2073 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2074 IPW_ERROR("Parity error\n");
2075 handled |= IPW_INTA_BIT_PARITY_ERROR;
2078 if (handled != inta) {
2079 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2082 spin_unlock_irqrestore(&priv->lock, flags);
2084 /* enable all interrupts */
2085 ipw_enable_interrupts(priv);
2088 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2089 static char *get_cmd_string(u8 cmd)
2091 switch (cmd) {
2092 IPW_CMD(HOST_COMPLETE);
2093 IPW_CMD(POWER_DOWN);
2094 IPW_CMD(SYSTEM_CONFIG);
2095 IPW_CMD(MULTICAST_ADDRESS);
2096 IPW_CMD(SSID);
2097 IPW_CMD(ADAPTER_ADDRESS);
2098 IPW_CMD(PORT_TYPE);
2099 IPW_CMD(RTS_THRESHOLD);
2100 IPW_CMD(FRAG_THRESHOLD);
2101 IPW_CMD(POWER_MODE);
2102 IPW_CMD(WEP_KEY);
2103 IPW_CMD(TGI_TX_KEY);
2104 IPW_CMD(SCAN_REQUEST);
2105 IPW_CMD(SCAN_REQUEST_EXT);
2106 IPW_CMD(ASSOCIATE);
2107 IPW_CMD(SUPPORTED_RATES);
2108 IPW_CMD(SCAN_ABORT);
2109 IPW_CMD(TX_FLUSH);
2110 IPW_CMD(QOS_PARAMETERS);
2111 IPW_CMD(DINO_CONFIG);
2112 IPW_CMD(RSN_CAPABILITIES);
2113 IPW_CMD(RX_KEY);
2114 IPW_CMD(CARD_DISABLE);
2115 IPW_CMD(SEED_NUMBER);
2116 IPW_CMD(TX_POWER);
2117 IPW_CMD(COUNTRY_INFO);
2118 IPW_CMD(AIRONET_INFO);
2119 IPW_CMD(AP_TX_POWER);
2120 IPW_CMD(CCKM_INFO);
2121 IPW_CMD(CCX_VER_INFO);
2122 IPW_CMD(SET_CALIBRATION);
2123 IPW_CMD(SENSITIVITY_CALIB);
2124 IPW_CMD(RETRY_LIMIT);
2125 IPW_CMD(IPW_PRE_POWER_DOWN);
2126 IPW_CMD(VAP_BEACON_TEMPLATE);
2127 IPW_CMD(VAP_DTIM_PERIOD);
2128 IPW_CMD(EXT_SUPPORTED_RATES);
2129 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2130 IPW_CMD(VAP_QUIET_INTERVALS);
2131 IPW_CMD(VAP_CHANNEL_SWITCH);
2132 IPW_CMD(VAP_MANDATORY_CHANNELS);
2133 IPW_CMD(VAP_CELL_PWR_LIMIT);
2134 IPW_CMD(VAP_CF_PARAM_SET);
2135 IPW_CMD(VAP_SET_BEACONING_STATE);
2136 IPW_CMD(MEASUREMENT);
2137 IPW_CMD(POWER_CAPABILITY);
2138 IPW_CMD(SUPPORTED_CHANNELS);
2139 IPW_CMD(TPC_REPORT);
2140 IPW_CMD(WME_INFO);
2141 IPW_CMD(PRODUCTION_COMMAND);
2142 default:
2143 return "UNKNOWN";
2147 #define HOST_COMPLETE_TIMEOUT HZ
2149 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2151 int rc = 0;
2152 unsigned long flags;
2154 spin_lock_irqsave(&priv->lock, flags);
2155 if (priv->status & STATUS_HCMD_ACTIVE) {
2156 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2157 get_cmd_string(cmd->cmd));
2158 spin_unlock_irqrestore(&priv->lock, flags);
2159 return -EAGAIN;
2162 priv->status |= STATUS_HCMD_ACTIVE;
2164 if (priv->cmdlog) {
2165 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2166 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2167 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2168 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2169 cmd->len);
2170 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2173 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2174 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2175 priv->status);
2177 #ifndef DEBUG_CMD_WEP_KEY
2178 if (cmd->cmd == IPW_CMD_WEP_KEY)
2179 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2180 else
2181 #endif
2182 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2184 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2185 if (rc) {
2186 priv->status &= ~STATUS_HCMD_ACTIVE;
2187 IPW_ERROR("Failed to send %s: Reason %d\n",
2188 get_cmd_string(cmd->cmd), rc);
2189 spin_unlock_irqrestore(&priv->lock, flags);
2190 goto exit;
2192 spin_unlock_irqrestore(&priv->lock, flags);
2194 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2195 !(priv->
2196 status & STATUS_HCMD_ACTIVE),
2197 HOST_COMPLETE_TIMEOUT);
2198 if (rc == 0) {
2199 spin_lock_irqsave(&priv->lock, flags);
2200 if (priv->status & STATUS_HCMD_ACTIVE) {
2201 IPW_ERROR("Failed to send %s: Command timed out.\n",
2202 get_cmd_string(cmd->cmd));
2203 priv->status &= ~STATUS_HCMD_ACTIVE;
2204 spin_unlock_irqrestore(&priv->lock, flags);
2205 rc = -EIO;
2206 goto exit;
2208 spin_unlock_irqrestore(&priv->lock, flags);
2209 } else
2210 rc = 0;
2212 if (priv->status & STATUS_RF_KILL_HW) {
2213 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2214 get_cmd_string(cmd->cmd));
2215 rc = -EIO;
2216 goto exit;
2219 exit:
2220 if (priv->cmdlog) {
2221 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2222 priv->cmdlog_pos %= priv->cmdlog_len;
2224 return rc;
2227 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2229 struct host_cmd cmd = {
2230 .cmd = command,
2233 return __ipw_send_cmd(priv, &cmd);
2236 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2237 void *data)
2239 struct host_cmd cmd = {
2240 .cmd = command,
2241 .len = len,
2242 .param = data,
2245 return __ipw_send_cmd(priv, &cmd);
2248 static int ipw_send_host_complete(struct ipw_priv *priv)
2250 if (!priv) {
2251 IPW_ERROR("Invalid args\n");
2252 return -1;
2255 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2258 static int ipw_send_system_config(struct ipw_priv *priv)
2260 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2261 sizeof(priv->sys_config),
2262 &priv->sys_config);
2265 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2267 if (!priv || !ssid) {
2268 IPW_ERROR("Invalid args\n");
2269 return -1;
2272 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2273 ssid);
2276 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2278 if (!priv || !mac) {
2279 IPW_ERROR("Invalid args\n");
2280 return -1;
2283 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2284 priv->net_dev->name, mac);
2286 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2290 * NOTE: This must be executed from our workqueue as it results in udelay
2291 * being called which may corrupt the keyboard if executed on default
2292 * workqueue
2294 static void ipw_adapter_restart(void *adapter)
2296 struct ipw_priv *priv = adapter;
2298 if (priv->status & STATUS_RF_KILL_MASK)
2299 return;
2301 ipw_down(priv);
2303 if (priv->assoc_network &&
2304 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2305 ipw_remove_current_network(priv);
2307 if (ipw_up(priv)) {
2308 IPW_ERROR("Failed to up device\n");
2309 return;
2313 static void ipw_bg_adapter_restart(struct work_struct *work)
2315 struct ipw_priv *priv =
2316 container_of(work, struct ipw_priv, adapter_restart);
2317 mutex_lock(&priv->mutex);
2318 ipw_adapter_restart(priv);
2319 mutex_unlock(&priv->mutex);
2322 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2324 static void ipw_scan_check(void *data)
2326 struct ipw_priv *priv = data;
2327 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2328 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2329 "adapter after (%dms).\n",
2330 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2331 queue_work(priv->workqueue, &priv->adapter_restart);
2335 static void ipw_bg_scan_check(struct work_struct *work)
2337 struct ipw_priv *priv =
2338 container_of(work, struct ipw_priv, scan_check.work);
2339 mutex_lock(&priv->mutex);
2340 ipw_scan_check(priv);
2341 mutex_unlock(&priv->mutex);
2344 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2345 struct ipw_scan_request_ext *request)
2347 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2348 sizeof(*request), request);
2351 static int ipw_send_scan_abort(struct ipw_priv *priv)
2353 if (!priv) {
2354 IPW_ERROR("Invalid args\n");
2355 return -1;
2358 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2361 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2363 struct ipw_sensitivity_calib calib = {
2364 .beacon_rssi_raw = cpu_to_le16(sens),
2367 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2368 &calib);
2371 static int ipw_send_associate(struct ipw_priv *priv,
2372 struct ipw_associate *associate)
2374 if (!priv || !associate) {
2375 IPW_ERROR("Invalid args\n");
2376 return -1;
2379 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2380 associate);
2383 static int ipw_send_supported_rates(struct ipw_priv *priv,
2384 struct ipw_supported_rates *rates)
2386 if (!priv || !rates) {
2387 IPW_ERROR("Invalid args\n");
2388 return -1;
2391 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2392 rates);
2395 static int ipw_set_random_seed(struct ipw_priv *priv)
2397 u32 val;
2399 if (!priv) {
2400 IPW_ERROR("Invalid args\n");
2401 return -1;
2404 get_random_bytes(&val, sizeof(val));
2406 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2409 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2411 __le32 v = cpu_to_le32(phy_off);
2412 if (!priv) {
2413 IPW_ERROR("Invalid args\n");
2414 return -1;
2417 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2420 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2422 if (!priv || !power) {
2423 IPW_ERROR("Invalid args\n");
2424 return -1;
2427 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2430 static int ipw_set_tx_power(struct ipw_priv *priv)
2432 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2433 struct ipw_tx_power tx_power;
2434 s8 max_power;
2435 int i;
2437 memset(&tx_power, 0, sizeof(tx_power));
2439 /* configure device for 'G' band */
2440 tx_power.ieee_mode = IPW_G_MODE;
2441 tx_power.num_channels = geo->bg_channels;
2442 for (i = 0; i < geo->bg_channels; i++) {
2443 max_power = geo->bg[i].max_power;
2444 tx_power.channels_tx_power[i].channel_number =
2445 geo->bg[i].channel;
2446 tx_power.channels_tx_power[i].tx_power = max_power ?
2447 min(max_power, priv->tx_power) : priv->tx_power;
2449 if (ipw_send_tx_power(priv, &tx_power))
2450 return -EIO;
2452 /* configure device to also handle 'B' band */
2453 tx_power.ieee_mode = IPW_B_MODE;
2454 if (ipw_send_tx_power(priv, &tx_power))
2455 return -EIO;
2457 /* configure device to also handle 'A' band */
2458 if (priv->ieee->abg_true) {
2459 tx_power.ieee_mode = IPW_A_MODE;
2460 tx_power.num_channels = geo->a_channels;
2461 for (i = 0; i < tx_power.num_channels; i++) {
2462 max_power = geo->a[i].max_power;
2463 tx_power.channels_tx_power[i].channel_number =
2464 geo->a[i].channel;
2465 tx_power.channels_tx_power[i].tx_power = max_power ?
2466 min(max_power, priv->tx_power) : priv->tx_power;
2468 if (ipw_send_tx_power(priv, &tx_power))
2469 return -EIO;
2471 return 0;
2474 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2476 struct ipw_rts_threshold rts_threshold = {
2477 .rts_threshold = cpu_to_le16(rts),
2480 if (!priv) {
2481 IPW_ERROR("Invalid args\n");
2482 return -1;
2485 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2486 sizeof(rts_threshold), &rts_threshold);
2489 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2491 struct ipw_frag_threshold frag_threshold = {
2492 .frag_threshold = cpu_to_le16(frag),
2495 if (!priv) {
2496 IPW_ERROR("Invalid args\n");
2497 return -1;
2500 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2501 sizeof(frag_threshold), &frag_threshold);
2504 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2506 __le32 param;
2508 if (!priv) {
2509 IPW_ERROR("Invalid args\n");
2510 return -1;
2513 /* If on battery, set to 3, if AC set to CAM, else user
2514 * level */
2515 switch (mode) {
2516 case IPW_POWER_BATTERY:
2517 param = cpu_to_le32(IPW_POWER_INDEX_3);
2518 break;
2519 case IPW_POWER_AC:
2520 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2521 break;
2522 default:
2523 param = cpu_to_le32(mode);
2524 break;
2527 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2528 &param);
2531 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2533 struct ipw_retry_limit retry_limit = {
2534 .short_retry_limit = slimit,
2535 .long_retry_limit = llimit
2538 if (!priv) {
2539 IPW_ERROR("Invalid args\n");
2540 return -1;
2543 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2544 &retry_limit);
2548 * The IPW device contains a Microwire compatible EEPROM that stores
2549 * various data like the MAC address. Usually the firmware has exclusive
2550 * access to the eeprom, but during device initialization (before the
2551 * device driver has sent the HostComplete command to the firmware) the
2552 * device driver has read access to the EEPROM by way of indirect addressing
2553 * through a couple of memory mapped registers.
2555 * The following is a simplified implementation for pulling data out of the
2556 * the eeprom, along with some helper functions to find information in
2557 * the per device private data's copy of the eeprom.
2559 * NOTE: To better understand how these functions work (i.e what is a chip
2560 * select and why do have to keep driving the eeprom clock?), read
2561 * just about any data sheet for a Microwire compatible EEPROM.
2564 /* write a 32 bit value into the indirect accessor register */
2565 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2567 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2569 /* the eeprom requires some time to complete the operation */
2570 udelay(p->eeprom_delay);
2572 return;
2575 /* perform a chip select operation */
2576 static void eeprom_cs(struct ipw_priv *priv)
2578 eeprom_write_reg(priv, 0);
2579 eeprom_write_reg(priv, EEPROM_BIT_CS);
2580 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2581 eeprom_write_reg(priv, EEPROM_BIT_CS);
2584 /* perform a chip select operation */
2585 static void eeprom_disable_cs(struct ipw_priv *priv)
2587 eeprom_write_reg(priv, EEPROM_BIT_CS);
2588 eeprom_write_reg(priv, 0);
2589 eeprom_write_reg(priv, EEPROM_BIT_SK);
2592 /* push a single bit down to the eeprom */
2593 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2595 int d = (bit ? EEPROM_BIT_DI : 0);
2596 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2597 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2600 /* push an opcode followed by an address down to the eeprom */
2601 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2603 int i;
2605 eeprom_cs(priv);
2606 eeprom_write_bit(priv, 1);
2607 eeprom_write_bit(priv, op & 2);
2608 eeprom_write_bit(priv, op & 1);
2609 for (i = 7; i >= 0; i--) {
2610 eeprom_write_bit(priv, addr & (1 << i));
2614 /* pull 16 bits off the eeprom, one bit at a time */
2615 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2617 int i;
2618 u16 r = 0;
2620 /* Send READ Opcode */
2621 eeprom_op(priv, EEPROM_CMD_READ, addr);
2623 /* Send dummy bit */
2624 eeprom_write_reg(priv, EEPROM_BIT_CS);
2626 /* Read the byte off the eeprom one bit at a time */
2627 for (i = 0; i < 16; i++) {
2628 u32 data = 0;
2629 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2630 eeprom_write_reg(priv, EEPROM_BIT_CS);
2631 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2632 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2635 /* Send another dummy bit */
2636 eeprom_write_reg(priv, 0);
2637 eeprom_disable_cs(priv);
2639 return r;
2642 /* helper function for pulling the mac address out of the private */
2643 /* data's copy of the eeprom data */
2644 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2646 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2650 * Either the device driver (i.e. the host) or the firmware can
2651 * load eeprom data into the designated region in SRAM. If neither
2652 * happens then the FW will shutdown with a fatal error.
2654 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2655 * bit needs region of shared SRAM needs to be non-zero.
2657 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2659 int i;
2660 __le16 *eeprom = (__le16 *) priv->eeprom;
2662 IPW_DEBUG_TRACE(">>\n");
2664 /* read entire contents of eeprom into private buffer */
2665 for (i = 0; i < 128; i++)
2666 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2669 If the data looks correct, then copy it to our private
2670 copy. Otherwise let the firmware know to perform the operation
2671 on its own.
2673 if (priv->eeprom[EEPROM_VERSION] != 0) {
2674 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2676 /* write the eeprom data to sram */
2677 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2678 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2680 /* Do not load eeprom data on fatal error or suspend */
2681 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2682 } else {
2683 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2685 /* Load eeprom data on fatal error or suspend */
2686 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2689 IPW_DEBUG_TRACE("<<\n");
2692 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2694 count >>= 2;
2695 if (!count)
2696 return;
2697 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2698 while (count--)
2699 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2702 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2704 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2705 CB_NUMBER_OF_ELEMENTS_SMALL *
2706 sizeof(struct command_block));
2709 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2710 { /* start dma engine but no transfers yet */
2712 IPW_DEBUG_FW(">> : \n");
2714 /* Start the dma */
2715 ipw_fw_dma_reset_command_blocks(priv);
2717 /* Write CB base address */
2718 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2720 IPW_DEBUG_FW("<< : \n");
2721 return 0;
2724 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2726 u32 control = 0;
2728 IPW_DEBUG_FW(">> :\n");
2730 /* set the Stop and Abort bit */
2731 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2732 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2733 priv->sram_desc.last_cb_index = 0;
2735 IPW_DEBUG_FW("<< \n");
2738 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2739 struct command_block *cb)
2741 u32 address =
2742 IPW_SHARED_SRAM_DMA_CONTROL +
2743 (sizeof(struct command_block) * index);
2744 IPW_DEBUG_FW(">> :\n");
2746 ipw_write_indirect(priv, address, (u8 *) cb,
2747 (int)sizeof(struct command_block));
2749 IPW_DEBUG_FW("<< :\n");
2750 return 0;
2754 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2756 u32 control = 0;
2757 u32 index = 0;
2759 IPW_DEBUG_FW(">> :\n");
2761 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2762 ipw_fw_dma_write_command_block(priv, index,
2763 &priv->sram_desc.cb_list[index]);
2765 /* Enable the DMA in the CSR register */
2766 ipw_clear_bit(priv, IPW_RESET_REG,
2767 IPW_RESET_REG_MASTER_DISABLED |
2768 IPW_RESET_REG_STOP_MASTER);
2770 /* Set the Start bit. */
2771 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2772 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2774 IPW_DEBUG_FW("<< :\n");
2775 return 0;
2778 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2780 u32 address;
2781 u32 register_value = 0;
2782 u32 cb_fields_address = 0;
2784 IPW_DEBUG_FW(">> :\n");
2785 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2786 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2788 /* Read the DMA Controlor register */
2789 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2790 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2792 /* Print the CB values */
2793 cb_fields_address = address;
2794 register_value = ipw_read_reg32(priv, cb_fields_address);
2795 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2797 cb_fields_address += sizeof(u32);
2798 register_value = ipw_read_reg32(priv, cb_fields_address);
2799 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2801 cb_fields_address += sizeof(u32);
2802 register_value = ipw_read_reg32(priv, cb_fields_address);
2803 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2804 register_value);
2806 cb_fields_address += sizeof(u32);
2807 register_value = ipw_read_reg32(priv, cb_fields_address);
2808 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2810 IPW_DEBUG_FW(">> :\n");
2813 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2815 u32 current_cb_address = 0;
2816 u32 current_cb_index = 0;
2818 IPW_DEBUG_FW("<< :\n");
2819 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2821 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2822 sizeof(struct command_block);
2824 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2825 current_cb_index, current_cb_address);
2827 IPW_DEBUG_FW(">> :\n");
2828 return current_cb_index;
2832 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2833 u32 src_address,
2834 u32 dest_address,
2835 u32 length,
2836 int interrupt_enabled, int is_last)
2839 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2840 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2841 CB_DEST_SIZE_LONG;
2842 struct command_block *cb;
2843 u32 last_cb_element = 0;
2845 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2846 src_address, dest_address, length);
2848 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2849 return -1;
2851 last_cb_element = priv->sram_desc.last_cb_index;
2852 cb = &priv->sram_desc.cb_list[last_cb_element];
2853 priv->sram_desc.last_cb_index++;
2855 /* Calculate the new CB control word */
2856 if (interrupt_enabled)
2857 control |= CB_INT_ENABLED;
2859 if (is_last)
2860 control |= CB_LAST_VALID;
2862 control |= length;
2864 /* Calculate the CB Element's checksum value */
2865 cb->status = control ^ src_address ^ dest_address;
2867 /* Copy the Source and Destination addresses */
2868 cb->dest_addr = dest_address;
2869 cb->source_addr = src_address;
2871 /* Copy the Control Word last */
2872 cb->control = control;
2874 return 0;
2877 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2878 int nr, u32 dest_address, u32 len)
2880 int ret, i;
2881 u32 size;
2883 IPW_DEBUG_FW(">> \n");
2884 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2885 nr, dest_address, len);
2887 for (i = 0; i < nr; i++) {
2888 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2889 ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2890 dest_address +
2891 i * CB_MAX_LENGTH, size,
2892 0, 0);
2893 if (ret) {
2894 IPW_DEBUG_FW_INFO(": Failed\n");
2895 return -1;
2896 } else
2897 IPW_DEBUG_FW_INFO(": Added new cb\n");
2900 IPW_DEBUG_FW("<< \n");
2901 return 0;
2904 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2906 u32 current_index = 0, previous_index;
2907 u32 watchdog = 0;
2909 IPW_DEBUG_FW(">> : \n");
2911 current_index = ipw_fw_dma_command_block_index(priv);
2912 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2913 (int)priv->sram_desc.last_cb_index);
2915 while (current_index < priv->sram_desc.last_cb_index) {
2916 udelay(50);
2917 previous_index = current_index;
2918 current_index = ipw_fw_dma_command_block_index(priv);
2920 if (previous_index < current_index) {
2921 watchdog = 0;
2922 continue;
2924 if (++watchdog > 400) {
2925 IPW_DEBUG_FW_INFO("Timeout\n");
2926 ipw_fw_dma_dump_command_block(priv);
2927 ipw_fw_dma_abort(priv);
2928 return -1;
2932 ipw_fw_dma_abort(priv);
2934 /*Disable the DMA in the CSR register */
2935 ipw_set_bit(priv, IPW_RESET_REG,
2936 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2938 IPW_DEBUG_FW("<< dmaWaitSync \n");
2939 return 0;
2942 static void ipw_remove_current_network(struct ipw_priv *priv)
2944 struct list_head *element, *safe;
2945 struct ieee80211_network *network = NULL;
2946 unsigned long flags;
2948 spin_lock_irqsave(&priv->ieee->lock, flags);
2949 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2950 network = list_entry(element, struct ieee80211_network, list);
2951 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2952 list_del(element);
2953 list_add_tail(&network->list,
2954 &priv->ieee->network_free_list);
2957 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2961 * Check that card is still alive.
2962 * Reads debug register from domain0.
2963 * If card is present, pre-defined value should
2964 * be found there.
2966 * @param priv
2967 * @return 1 if card is present, 0 otherwise
2969 static inline int ipw_alive(struct ipw_priv *priv)
2971 return ipw_read32(priv, 0x90) == 0xd55555d5;
2974 /* timeout in msec, attempted in 10-msec quanta */
2975 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2976 int timeout)
2978 int i = 0;
2980 do {
2981 if ((ipw_read32(priv, addr) & mask) == mask)
2982 return i;
2983 mdelay(10);
2984 i += 10;
2985 } while (i < timeout);
2987 return -ETIME;
2990 /* These functions load the firmware and micro code for the operation of
2991 * the ipw hardware. It assumes the buffer has all the bits for the
2992 * image and the caller is handling the memory allocation and clean up.
2995 static int ipw_stop_master(struct ipw_priv *priv)
2997 int rc;
2999 IPW_DEBUG_TRACE(">> \n");
3000 /* stop master. typical delay - 0 */
3001 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3003 /* timeout is in msec, polled in 10-msec quanta */
3004 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3005 IPW_RESET_REG_MASTER_DISABLED, 100);
3006 if (rc < 0) {
3007 IPW_ERROR("wait for stop master failed after 100ms\n");
3008 return -1;
3011 IPW_DEBUG_INFO("stop master %dms\n", rc);
3013 return rc;
3016 static void ipw_arc_release(struct ipw_priv *priv)
3018 IPW_DEBUG_TRACE(">> \n");
3019 mdelay(5);
3021 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3023 /* no one knows timing, for safety add some delay */
3024 mdelay(5);
3027 struct fw_chunk {
3028 __le32 address;
3029 __le32 length;
3032 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3034 int rc = 0, i, addr;
3035 u8 cr = 0;
3036 __le16 *image;
3038 image = (__le16 *) data;
3040 IPW_DEBUG_TRACE(">> \n");
3042 rc = ipw_stop_master(priv);
3044 if (rc < 0)
3045 return rc;
3047 for (addr = IPW_SHARED_LOWER_BOUND;
3048 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3049 ipw_write32(priv, addr, 0);
3052 /* no ucode (yet) */
3053 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3054 /* destroy DMA queues */
3055 /* reset sequence */
3057 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3058 ipw_arc_release(priv);
3059 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3060 mdelay(1);
3062 /* reset PHY */
3063 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3064 mdelay(1);
3066 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3067 mdelay(1);
3069 /* enable ucode store */
3070 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3071 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3072 mdelay(1);
3074 /* write ucode */
3076 * @bug
3077 * Do NOT set indirect address register once and then
3078 * store data to indirect data register in the loop.
3079 * It seems very reasonable, but in this case DINO do not
3080 * accept ucode. It is essential to set address each time.
3082 /* load new ipw uCode */
3083 for (i = 0; i < len / 2; i++)
3084 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3085 le16_to_cpu(image[i]));
3087 /* enable DINO */
3088 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3089 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3091 /* this is where the igx / win driver deveates from the VAP driver. */
3093 /* wait for alive response */
3094 for (i = 0; i < 100; i++) {
3095 /* poll for incoming data */
3096 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3097 if (cr & DINO_RXFIFO_DATA)
3098 break;
3099 mdelay(1);
3102 if (cr & DINO_RXFIFO_DATA) {
3103 /* alive_command_responce size is NOT multiple of 4 */
3104 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3106 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3107 response_buffer[i] =
3108 cpu_to_le32(ipw_read_reg32(priv,
3109 IPW_BASEBAND_RX_FIFO_READ));
3110 memcpy(&priv->dino_alive, response_buffer,
3111 sizeof(priv->dino_alive));
3112 if (priv->dino_alive.alive_command == 1
3113 && priv->dino_alive.ucode_valid == 1) {
3114 rc = 0;
3115 IPW_DEBUG_INFO
3116 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3117 "of %02d/%02d/%02d %02d:%02d\n",
3118 priv->dino_alive.software_revision,
3119 priv->dino_alive.software_revision,
3120 priv->dino_alive.device_identifier,
3121 priv->dino_alive.device_identifier,
3122 priv->dino_alive.time_stamp[0],
3123 priv->dino_alive.time_stamp[1],
3124 priv->dino_alive.time_stamp[2],
3125 priv->dino_alive.time_stamp[3],
3126 priv->dino_alive.time_stamp[4]);
3127 } else {
3128 IPW_DEBUG_INFO("Microcode is not alive\n");
3129 rc = -EINVAL;
3131 } else {
3132 IPW_DEBUG_INFO("No alive response from DINO\n");
3133 rc = -ETIME;
3136 /* disable DINO, otherwise for some reason
3137 firmware have problem getting alive resp. */
3138 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3140 return rc;
3143 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3145 int ret = -1;
3146 int offset = 0;
3147 struct fw_chunk *chunk;
3148 int total_nr = 0;
3149 int i;
3150 struct pci_pool *pool;
3151 u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL];
3152 dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL];
3154 IPW_DEBUG_TRACE("<< : \n");
3156 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3157 if (!pool) {
3158 IPW_ERROR("pci_pool_create failed\n");
3159 return -ENOMEM;
3162 /* Start the Dma */
3163 ret = ipw_fw_dma_enable(priv);
3165 /* the DMA is already ready this would be a bug. */
3166 BUG_ON(priv->sram_desc.last_cb_index > 0);
3168 do {
3169 u32 chunk_len;
3170 u8 *start;
3171 int size;
3172 int nr = 0;
3174 chunk = (struct fw_chunk *)(data + offset);
3175 offset += sizeof(struct fw_chunk);
3176 chunk_len = le32_to_cpu(chunk->length);
3177 start = data + offset;
3179 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3180 for (i = 0; i < nr; i++) {
3181 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3182 &phys[total_nr]);
3183 if (!virts[total_nr]) {
3184 ret = -ENOMEM;
3185 goto out;
3187 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3188 CB_MAX_LENGTH);
3189 memcpy(virts[total_nr], start, size);
3190 start += size;
3191 total_nr++;
3192 /* We don't support fw chunk larger than 64*8K */
3193 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3196 /* build DMA packet and queue up for sending */
3197 /* dma to chunk->address, the chunk->length bytes from data +
3198 * offeset*/
3199 /* Dma loading */
3200 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3201 nr, le32_to_cpu(chunk->address),
3202 chunk_len);
3203 if (ret) {
3204 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3205 goto out;
3208 offset += chunk_len;
3209 } while (offset < len);
3211 /* Run the DMA and wait for the answer */
3212 ret = ipw_fw_dma_kick(priv);
3213 if (ret) {
3214 IPW_ERROR("dmaKick Failed\n");
3215 goto out;
3218 ret = ipw_fw_dma_wait(priv);
3219 if (ret) {
3220 IPW_ERROR("dmaWaitSync Failed\n");
3221 goto out;
3223 out:
3224 for (i = 0; i < total_nr; i++)
3225 pci_pool_free(pool, virts[i], phys[i]);
3227 pci_pool_destroy(pool);
3229 return ret;
3232 /* stop nic */
3233 static int ipw_stop_nic(struct ipw_priv *priv)
3235 int rc = 0;
3237 /* stop */
3238 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3240 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3241 IPW_RESET_REG_MASTER_DISABLED, 500);
3242 if (rc < 0) {
3243 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3244 return rc;
3247 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3249 return rc;
3252 static void ipw_start_nic(struct ipw_priv *priv)
3254 IPW_DEBUG_TRACE(">>\n");
3256 /* prvHwStartNic release ARC */
3257 ipw_clear_bit(priv, IPW_RESET_REG,
3258 IPW_RESET_REG_MASTER_DISABLED |
3259 IPW_RESET_REG_STOP_MASTER |
3260 CBD_RESET_REG_PRINCETON_RESET);
3262 /* enable power management */
3263 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3264 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3266 IPW_DEBUG_TRACE("<<\n");
3269 static int ipw_init_nic(struct ipw_priv *priv)
3271 int rc;
3273 IPW_DEBUG_TRACE(">>\n");
3274 /* reset */
3275 /*prvHwInitNic */
3276 /* set "initialization complete" bit to move adapter to D0 state */
3277 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3279 /* low-level PLL activation */
3280 ipw_write32(priv, IPW_READ_INT_REGISTER,
3281 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3283 /* wait for clock stabilization */
3284 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3285 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3286 if (rc < 0)
3287 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3289 /* assert SW reset */
3290 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3292 udelay(10);
3294 /* set "initialization complete" bit to move adapter to D0 state */
3295 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3297 IPW_DEBUG_TRACE(">>\n");
3298 return 0;
3301 /* Call this function from process context, it will sleep in request_firmware.
3302 * Probe is an ok place to call this from.
3304 static int ipw_reset_nic(struct ipw_priv *priv)
3306 int rc = 0;
3307 unsigned long flags;
3309 IPW_DEBUG_TRACE(">>\n");
3311 rc = ipw_init_nic(priv);
3313 spin_lock_irqsave(&priv->lock, flags);
3314 /* Clear the 'host command active' bit... */
3315 priv->status &= ~STATUS_HCMD_ACTIVE;
3316 wake_up_interruptible(&priv->wait_command_queue);
3317 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3318 wake_up_interruptible(&priv->wait_state);
3319 spin_unlock_irqrestore(&priv->lock, flags);
3321 IPW_DEBUG_TRACE("<<\n");
3322 return rc;
3326 struct ipw_fw {
3327 __le32 ver;
3328 __le32 boot_size;
3329 __le32 ucode_size;
3330 __le32 fw_size;
3331 u8 data[0];
3334 static int ipw_get_fw(struct ipw_priv *priv,
3335 const struct firmware **raw, const char *name)
3337 struct ipw_fw *fw;
3338 int rc;
3340 /* ask firmware_class module to get the boot firmware off disk */
3341 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3342 if (rc < 0) {
3343 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3344 return rc;
3347 if ((*raw)->size < sizeof(*fw)) {
3348 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3349 return -EINVAL;
3352 fw = (void *)(*raw)->data;
3354 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3355 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3356 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3357 name, (*raw)->size);
3358 return -EINVAL;
3361 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3362 name,
3363 le32_to_cpu(fw->ver) >> 16,
3364 le32_to_cpu(fw->ver) & 0xff,
3365 (*raw)->size - sizeof(*fw));
3366 return 0;
3369 #define IPW_RX_BUF_SIZE (3000)
3371 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3372 struct ipw_rx_queue *rxq)
3374 unsigned long flags;
3375 int i;
3377 spin_lock_irqsave(&rxq->lock, flags);
3379 INIT_LIST_HEAD(&rxq->rx_free);
3380 INIT_LIST_HEAD(&rxq->rx_used);
3382 /* Fill the rx_used queue with _all_ of the Rx buffers */
3383 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3384 /* In the reset function, these buffers may have been allocated
3385 * to an SKB, so we need to unmap and free potential storage */
3386 if (rxq->pool[i].skb != NULL) {
3387 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3388 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3389 dev_kfree_skb(rxq->pool[i].skb);
3390 rxq->pool[i].skb = NULL;
3392 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3395 /* Set us so that we have processed and used all buffers, but have
3396 * not restocked the Rx queue with fresh buffers */
3397 rxq->read = rxq->write = 0;
3398 rxq->free_count = 0;
3399 spin_unlock_irqrestore(&rxq->lock, flags);
3402 #ifdef CONFIG_PM
3403 static int fw_loaded = 0;
3404 static const struct firmware *raw = NULL;
3406 static void free_firmware(void)
3408 if (fw_loaded) {
3409 release_firmware(raw);
3410 raw = NULL;
3411 fw_loaded = 0;
3414 #else
3415 #define free_firmware() do {} while (0)
3416 #endif
3418 static int ipw_load(struct ipw_priv *priv)
3420 #ifndef CONFIG_PM
3421 const struct firmware *raw = NULL;
3422 #endif
3423 struct ipw_fw *fw;
3424 u8 *boot_img, *ucode_img, *fw_img;
3425 u8 *name = NULL;
3426 int rc = 0, retries = 3;
3428 switch (priv->ieee->iw_mode) {
3429 case IW_MODE_ADHOC:
3430 name = "ipw2200-ibss.fw";
3431 break;
3432 #ifdef CONFIG_IPW2200_MONITOR
3433 case IW_MODE_MONITOR:
3434 name = "ipw2200-sniffer.fw";
3435 break;
3436 #endif
3437 case IW_MODE_INFRA:
3438 name = "ipw2200-bss.fw";
3439 break;
3442 if (!name) {
3443 rc = -EINVAL;
3444 goto error;
3447 #ifdef CONFIG_PM
3448 if (!fw_loaded) {
3449 #endif
3450 rc = ipw_get_fw(priv, &raw, name);
3451 if (rc < 0)
3452 goto error;
3453 #ifdef CONFIG_PM
3455 #endif
3457 fw = (void *)raw->data;
3458 boot_img = &fw->data[0];
3459 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3460 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3461 le32_to_cpu(fw->ucode_size)];
3463 if (rc < 0)
3464 goto error;
3466 if (!priv->rxq)
3467 priv->rxq = ipw_rx_queue_alloc(priv);
3468 else
3469 ipw_rx_queue_reset(priv, priv->rxq);
3470 if (!priv->rxq) {
3471 IPW_ERROR("Unable to initialize Rx queue\n");
3472 goto error;
3475 retry:
3476 /* Ensure interrupts are disabled */
3477 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3478 priv->status &= ~STATUS_INT_ENABLED;
3480 /* ack pending interrupts */
3481 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3483 ipw_stop_nic(priv);
3485 rc = ipw_reset_nic(priv);
3486 if (rc < 0) {
3487 IPW_ERROR("Unable to reset NIC\n");
3488 goto error;
3491 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3492 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3494 /* DMA the initial boot firmware into the device */
3495 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3496 if (rc < 0) {
3497 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3498 goto error;
3501 /* kick start the device */
3502 ipw_start_nic(priv);
3504 /* wait for the device to finish its initial startup sequence */
3505 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3506 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3507 if (rc < 0) {
3508 IPW_ERROR("device failed to boot initial fw image\n");
3509 goto error;
3511 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3513 /* ack fw init done interrupt */
3514 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3516 /* DMA the ucode into the device */
3517 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3518 if (rc < 0) {
3519 IPW_ERROR("Unable to load ucode: %d\n", rc);
3520 goto error;
3523 /* stop nic */
3524 ipw_stop_nic(priv);
3526 /* DMA bss firmware into the device */
3527 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3528 if (rc < 0) {
3529 IPW_ERROR("Unable to load firmware: %d\n", rc);
3530 goto error;
3532 #ifdef CONFIG_PM
3533 fw_loaded = 1;
3534 #endif
3536 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3538 rc = ipw_queue_reset(priv);
3539 if (rc < 0) {
3540 IPW_ERROR("Unable to initialize queues\n");
3541 goto error;
3544 /* Ensure interrupts are disabled */
3545 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3546 /* ack pending interrupts */
3547 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3549 /* kick start the device */
3550 ipw_start_nic(priv);
3552 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3553 if (retries > 0) {
3554 IPW_WARNING("Parity error. Retrying init.\n");
3555 retries--;
3556 goto retry;
3559 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3560 rc = -EIO;
3561 goto error;
3564 /* wait for the device */
3565 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3566 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3567 if (rc < 0) {
3568 IPW_ERROR("device failed to start within 500ms\n");
3569 goto error;
3571 IPW_DEBUG_INFO("device response after %dms\n", rc);
3573 /* ack fw init done interrupt */
3574 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3576 /* read eeprom data and initialize the eeprom region of sram */
3577 priv->eeprom_delay = 1;
3578 ipw_eeprom_init_sram(priv);
3580 /* enable interrupts */
3581 ipw_enable_interrupts(priv);
3583 /* Ensure our queue has valid packets */
3584 ipw_rx_queue_replenish(priv);
3586 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3588 /* ack pending interrupts */
3589 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3591 #ifndef CONFIG_PM
3592 release_firmware(raw);
3593 #endif
3594 return 0;
3596 error:
3597 if (priv->rxq) {
3598 ipw_rx_queue_free(priv, priv->rxq);
3599 priv->rxq = NULL;
3601 ipw_tx_queue_free(priv);
3602 if (raw)
3603 release_firmware(raw);
3604 #ifdef CONFIG_PM
3605 fw_loaded = 0;
3606 raw = NULL;
3607 #endif
3609 return rc;
3613 * DMA services
3615 * Theory of operation
3617 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3618 * 2 empty entries always kept in the buffer to protect from overflow.
3620 * For Tx queue, there are low mark and high mark limits. If, after queuing
3621 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3622 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3623 * Tx queue resumed.
3625 * The IPW operates with six queues, one receive queue in the device's
3626 * sram, one transmit queue for sending commands to the device firmware,
3627 * and four transmit queues for data.
3629 * The four transmit queues allow for performing quality of service (qos)
3630 * transmissions as per the 802.11 protocol. Currently Linux does not
3631 * provide a mechanism to the user for utilizing prioritized queues, so
3632 * we only utilize the first data transmit queue (queue1).
3636 * Driver allocates buffers of this size for Rx
3640 * ipw_rx_queue_space - Return number of free slots available in queue.
3642 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3644 int s = q->read - q->write;
3645 if (s <= 0)
3646 s += RX_QUEUE_SIZE;
3647 /* keep some buffer to not confuse full and empty queue */
3648 s -= 2;
3649 if (s < 0)
3650 s = 0;
3651 return s;
3654 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3656 int s = q->last_used - q->first_empty;
3657 if (s <= 0)
3658 s += q->n_bd;
3659 s -= 2; /* keep some reserve to not confuse empty and full situations */
3660 if (s < 0)
3661 s = 0;
3662 return s;
3665 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3667 return (++index == n_bd) ? 0 : index;
3671 * Initialize common DMA queue structure
3673 * @param q queue to init
3674 * @param count Number of BD's to allocate. Should be power of 2
3675 * @param read_register Address for 'read' register
3676 * (not offset within BAR, full address)
3677 * @param write_register Address for 'write' register
3678 * (not offset within BAR, full address)
3679 * @param base_register Address for 'base' register
3680 * (not offset within BAR, full address)
3681 * @param size Address for 'size' register
3682 * (not offset within BAR, full address)
3684 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3685 int count, u32 read, u32 write, u32 base, u32 size)
3687 q->n_bd = count;
3689 q->low_mark = q->n_bd / 4;
3690 if (q->low_mark < 4)
3691 q->low_mark = 4;
3693 q->high_mark = q->n_bd / 8;
3694 if (q->high_mark < 2)
3695 q->high_mark = 2;
3697 q->first_empty = q->last_used = 0;
3698 q->reg_r = read;
3699 q->reg_w = write;
3701 ipw_write32(priv, base, q->dma_addr);
3702 ipw_write32(priv, size, count);
3703 ipw_write32(priv, read, 0);
3704 ipw_write32(priv, write, 0);
3706 _ipw_read32(priv, 0x90);
3709 static int ipw_queue_tx_init(struct ipw_priv *priv,
3710 struct clx2_tx_queue *q,
3711 int count, u32 read, u32 write, u32 base, u32 size)
3713 struct pci_dev *dev = priv->pci_dev;
3715 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3716 if (!q->txb) {
3717 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3718 return -ENOMEM;
3721 q->bd =
3722 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3723 if (!q->bd) {
3724 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3725 sizeof(q->bd[0]) * count);
3726 kfree(q->txb);
3727 q->txb = NULL;
3728 return -ENOMEM;
3731 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3732 return 0;
3736 * Free one TFD, those at index [txq->q.last_used].
3737 * Do NOT advance any indexes
3739 * @param dev
3740 * @param txq
3742 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3743 struct clx2_tx_queue *txq)
3745 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3746 struct pci_dev *dev = priv->pci_dev;
3747 int i;
3749 /* classify bd */
3750 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3751 /* nothing to cleanup after for host commands */
3752 return;
3754 /* sanity check */
3755 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3756 IPW_ERROR("Too many chunks: %i\n",
3757 le32_to_cpu(bd->u.data.num_chunks));
3758 /** @todo issue fatal error, it is quite serious situation */
3759 return;
3762 /* unmap chunks if any */
3763 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3764 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3765 le16_to_cpu(bd->u.data.chunk_len[i]),
3766 PCI_DMA_TODEVICE);
3767 if (txq->txb[txq->q.last_used]) {
3768 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3769 txq->txb[txq->q.last_used] = NULL;
3775 * Deallocate DMA queue.
3777 * Empty queue by removing and destroying all BD's.
3778 * Free all buffers.
3780 * @param dev
3781 * @param q
3783 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3785 struct clx2_queue *q = &txq->q;
3786 struct pci_dev *dev = priv->pci_dev;
3788 if (q->n_bd == 0)
3789 return;
3791 /* first, empty all BD's */
3792 for (; q->first_empty != q->last_used;
3793 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3794 ipw_queue_tx_free_tfd(priv, txq);
3797 /* free buffers belonging to queue itself */
3798 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3799 q->dma_addr);
3800 kfree(txq->txb);
3802 /* 0 fill whole structure */
3803 memset(txq, 0, sizeof(*txq));
3807 * Destroy all DMA queues and structures
3809 * @param priv
3811 static void ipw_tx_queue_free(struct ipw_priv *priv)
3813 /* Tx CMD queue */
3814 ipw_queue_tx_free(priv, &priv->txq_cmd);
3816 /* Tx queues */
3817 ipw_queue_tx_free(priv, &priv->txq[0]);
3818 ipw_queue_tx_free(priv, &priv->txq[1]);
3819 ipw_queue_tx_free(priv, &priv->txq[2]);
3820 ipw_queue_tx_free(priv, &priv->txq[3]);
3823 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3825 /* First 3 bytes are manufacturer */
3826 bssid[0] = priv->mac_addr[0];
3827 bssid[1] = priv->mac_addr[1];
3828 bssid[2] = priv->mac_addr[2];
3830 /* Last bytes are random */
3831 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3833 bssid[0] &= 0xfe; /* clear multicast bit */
3834 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3837 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3839 struct ipw_station_entry entry;
3840 int i;
3842 for (i = 0; i < priv->num_stations; i++) {
3843 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3844 /* Another node is active in network */
3845 priv->missed_adhoc_beacons = 0;
3846 if (!(priv->config & CFG_STATIC_CHANNEL))
3847 /* when other nodes drop out, we drop out */
3848 priv->config &= ~CFG_ADHOC_PERSIST;
3850 return i;
3854 if (i == MAX_STATIONS)
3855 return IPW_INVALID_STATION;
3857 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3859 entry.reserved = 0;
3860 entry.support_mode = 0;
3861 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3862 memcpy(priv->stations[i], bssid, ETH_ALEN);
3863 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3864 &entry, sizeof(entry));
3865 priv->num_stations++;
3867 return i;
3870 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3872 int i;
3874 for (i = 0; i < priv->num_stations; i++)
3875 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3876 return i;
3878 return IPW_INVALID_STATION;
3881 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3883 int err;
3885 if (priv->status & STATUS_ASSOCIATING) {
3886 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3887 queue_work(priv->workqueue, &priv->disassociate);
3888 return;
3891 if (!(priv->status & STATUS_ASSOCIATED)) {
3892 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3893 return;
3896 IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3897 "on channel %d.\n",
3898 priv->assoc_request.bssid,
3899 priv->assoc_request.channel);
3901 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3902 priv->status |= STATUS_DISASSOCIATING;
3904 if (quiet)
3905 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3906 else
3907 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3909 err = ipw_send_associate(priv, &priv->assoc_request);
3910 if (err) {
3911 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3912 "failed.\n");
3913 return;
3918 static int ipw_disassociate(void *data)
3920 struct ipw_priv *priv = data;
3921 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3922 return 0;
3923 ipw_send_disassociate(data, 0);
3924 netif_carrier_off(priv->net_dev);
3925 return 1;
3928 static void ipw_bg_disassociate(struct work_struct *work)
3930 struct ipw_priv *priv =
3931 container_of(work, struct ipw_priv, disassociate);
3932 mutex_lock(&priv->mutex);
3933 ipw_disassociate(priv);
3934 mutex_unlock(&priv->mutex);
3937 static void ipw_system_config(struct work_struct *work)
3939 struct ipw_priv *priv =
3940 container_of(work, struct ipw_priv, system_config);
3942 #ifdef CONFIG_IPW2200_PROMISCUOUS
3943 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3944 priv->sys_config.accept_all_data_frames = 1;
3945 priv->sys_config.accept_non_directed_frames = 1;
3946 priv->sys_config.accept_all_mgmt_bcpr = 1;
3947 priv->sys_config.accept_all_mgmt_frames = 1;
3949 #endif
3951 ipw_send_system_config(priv);
3954 struct ipw_status_code {
3955 u16 status;
3956 const char *reason;
3959 static const struct ipw_status_code ipw_status_codes[] = {
3960 {0x00, "Successful"},
3961 {0x01, "Unspecified failure"},
3962 {0x0A, "Cannot support all requested capabilities in the "
3963 "Capability information field"},
3964 {0x0B, "Reassociation denied due to inability to confirm that "
3965 "association exists"},
3966 {0x0C, "Association denied due to reason outside the scope of this "
3967 "standard"},
3968 {0x0D,
3969 "Responding station does not support the specified authentication "
3970 "algorithm"},
3971 {0x0E,
3972 "Received an Authentication frame with authentication sequence "
3973 "transaction sequence number out of expected sequence"},
3974 {0x0F, "Authentication rejected because of challenge failure"},
3975 {0x10, "Authentication rejected due to timeout waiting for next "
3976 "frame in sequence"},
3977 {0x11, "Association denied because AP is unable to handle additional "
3978 "associated stations"},
3979 {0x12,
3980 "Association denied due to requesting station not supporting all "
3981 "of the datarates in the BSSBasicServiceSet Parameter"},
3982 {0x13,
3983 "Association denied due to requesting station not supporting "
3984 "short preamble operation"},
3985 {0x14,
3986 "Association denied due to requesting station not supporting "
3987 "PBCC encoding"},
3988 {0x15,
3989 "Association denied due to requesting station not supporting "
3990 "channel agility"},
3991 {0x19,
3992 "Association denied due to requesting station not supporting "
3993 "short slot operation"},
3994 {0x1A,
3995 "Association denied due to requesting station not supporting "
3996 "DSSS-OFDM operation"},
3997 {0x28, "Invalid Information Element"},
3998 {0x29, "Group Cipher is not valid"},
3999 {0x2A, "Pairwise Cipher is not valid"},
4000 {0x2B, "AKMP is not valid"},
4001 {0x2C, "Unsupported RSN IE version"},
4002 {0x2D, "Invalid RSN IE Capabilities"},
4003 {0x2E, "Cipher suite is rejected per security policy"},
4006 static const char *ipw_get_status_code(u16 status)
4008 int i;
4009 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4010 if (ipw_status_codes[i].status == (status & 0xff))
4011 return ipw_status_codes[i].reason;
4012 return "Unknown status value.";
4015 static void inline average_init(struct average *avg)
4017 memset(avg, 0, sizeof(*avg));
4020 #define DEPTH_RSSI 8
4021 #define DEPTH_NOISE 16
4022 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4024 return ((depth-1)*prev_avg + val)/depth;
4027 static void average_add(struct average *avg, s16 val)
4029 avg->sum -= avg->entries[avg->pos];
4030 avg->sum += val;
4031 avg->entries[avg->pos++] = val;
4032 if (unlikely(avg->pos == AVG_ENTRIES)) {
4033 avg->init = 1;
4034 avg->pos = 0;
4038 static s16 average_value(struct average *avg)
4040 if (!unlikely(avg->init)) {
4041 if (avg->pos)
4042 return avg->sum / avg->pos;
4043 return 0;
4046 return avg->sum / AVG_ENTRIES;
4049 static void ipw_reset_stats(struct ipw_priv *priv)
4051 u32 len = sizeof(u32);
4053 priv->quality = 0;
4055 average_init(&priv->average_missed_beacons);
4056 priv->exp_avg_rssi = -60;
4057 priv->exp_avg_noise = -85 + 0x100;
4059 priv->last_rate = 0;
4060 priv->last_missed_beacons = 0;
4061 priv->last_rx_packets = 0;
4062 priv->last_tx_packets = 0;
4063 priv->last_tx_failures = 0;
4065 /* Firmware managed, reset only when NIC is restarted, so we have to
4066 * normalize on the current value */
4067 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4068 &priv->last_rx_err, &len);
4069 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4070 &priv->last_tx_failures, &len);
4072 /* Driver managed, reset with each association */
4073 priv->missed_adhoc_beacons = 0;
4074 priv->missed_beacons = 0;
4075 priv->tx_packets = 0;
4076 priv->rx_packets = 0;
4080 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4082 u32 i = 0x80000000;
4083 u32 mask = priv->rates_mask;
4084 /* If currently associated in B mode, restrict the maximum
4085 * rate match to B rates */
4086 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4087 mask &= IEEE80211_CCK_RATES_MASK;
4089 /* TODO: Verify that the rate is supported by the current rates
4090 * list. */
4092 while (i && !(mask & i))
4093 i >>= 1;
4094 switch (i) {
4095 case IEEE80211_CCK_RATE_1MB_MASK:
4096 return 1000000;
4097 case IEEE80211_CCK_RATE_2MB_MASK:
4098 return 2000000;
4099 case IEEE80211_CCK_RATE_5MB_MASK:
4100 return 5500000;
4101 case IEEE80211_OFDM_RATE_6MB_MASK:
4102 return 6000000;
4103 case IEEE80211_OFDM_RATE_9MB_MASK:
4104 return 9000000;
4105 case IEEE80211_CCK_RATE_11MB_MASK:
4106 return 11000000;
4107 case IEEE80211_OFDM_RATE_12MB_MASK:
4108 return 12000000;
4109 case IEEE80211_OFDM_RATE_18MB_MASK:
4110 return 18000000;
4111 case IEEE80211_OFDM_RATE_24MB_MASK:
4112 return 24000000;
4113 case IEEE80211_OFDM_RATE_36MB_MASK:
4114 return 36000000;
4115 case IEEE80211_OFDM_RATE_48MB_MASK:
4116 return 48000000;
4117 case IEEE80211_OFDM_RATE_54MB_MASK:
4118 return 54000000;
4121 if (priv->ieee->mode == IEEE_B)
4122 return 11000000;
4123 else
4124 return 54000000;
4127 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4129 u32 rate, len = sizeof(rate);
4130 int err;
4132 if (!(priv->status & STATUS_ASSOCIATED))
4133 return 0;
4135 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4136 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4137 &len);
4138 if (err) {
4139 IPW_DEBUG_INFO("failed querying ordinals.\n");
4140 return 0;
4142 } else
4143 return ipw_get_max_rate(priv);
4145 switch (rate) {
4146 case IPW_TX_RATE_1MB:
4147 return 1000000;
4148 case IPW_TX_RATE_2MB:
4149 return 2000000;
4150 case IPW_TX_RATE_5MB:
4151 return 5500000;
4152 case IPW_TX_RATE_6MB:
4153 return 6000000;
4154 case IPW_TX_RATE_9MB:
4155 return 9000000;
4156 case IPW_TX_RATE_11MB:
4157 return 11000000;
4158 case IPW_TX_RATE_12MB:
4159 return 12000000;
4160 case IPW_TX_RATE_18MB:
4161 return 18000000;
4162 case IPW_TX_RATE_24MB:
4163 return 24000000;
4164 case IPW_TX_RATE_36MB:
4165 return 36000000;
4166 case IPW_TX_RATE_48MB:
4167 return 48000000;
4168 case IPW_TX_RATE_54MB:
4169 return 54000000;
4172 return 0;
4175 #define IPW_STATS_INTERVAL (2 * HZ)
4176 static void ipw_gather_stats(struct ipw_priv *priv)
4178 u32 rx_err, rx_err_delta, rx_packets_delta;
4179 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4180 u32 missed_beacons_percent, missed_beacons_delta;
4181 u32 quality = 0;
4182 u32 len = sizeof(u32);
4183 s16 rssi;
4184 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4185 rate_quality;
4186 u32 max_rate;
4188 if (!(priv->status & STATUS_ASSOCIATED)) {
4189 priv->quality = 0;
4190 return;
4193 /* Update the statistics */
4194 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4195 &priv->missed_beacons, &len);
4196 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4197 priv->last_missed_beacons = priv->missed_beacons;
4198 if (priv->assoc_request.beacon_interval) {
4199 missed_beacons_percent = missed_beacons_delta *
4200 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4201 (IPW_STATS_INTERVAL * 10);
4202 } else {
4203 missed_beacons_percent = 0;
4205 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4207 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4208 rx_err_delta = rx_err - priv->last_rx_err;
4209 priv->last_rx_err = rx_err;
4211 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4212 tx_failures_delta = tx_failures - priv->last_tx_failures;
4213 priv->last_tx_failures = tx_failures;
4215 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4216 priv->last_rx_packets = priv->rx_packets;
4218 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4219 priv->last_tx_packets = priv->tx_packets;
4221 /* Calculate quality based on the following:
4223 * Missed beacon: 100% = 0, 0% = 70% missed
4224 * Rate: 60% = 1Mbs, 100% = Max
4225 * Rx and Tx errors represent a straight % of total Rx/Tx
4226 * RSSI: 100% = > -50, 0% = < -80
4227 * Rx errors: 100% = 0, 0% = 50% missed
4229 * The lowest computed quality is used.
4232 #define BEACON_THRESHOLD 5
4233 beacon_quality = 100 - missed_beacons_percent;
4234 if (beacon_quality < BEACON_THRESHOLD)
4235 beacon_quality = 0;
4236 else
4237 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4238 (100 - BEACON_THRESHOLD);
4239 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4240 beacon_quality, missed_beacons_percent);
4242 priv->last_rate = ipw_get_current_rate(priv);
4243 max_rate = ipw_get_max_rate(priv);
4244 rate_quality = priv->last_rate * 40 / max_rate + 60;
4245 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4246 rate_quality, priv->last_rate / 1000000);
4248 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4249 rx_quality = 100 - (rx_err_delta * 100) /
4250 (rx_packets_delta + rx_err_delta);
4251 else
4252 rx_quality = 100;
4253 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4254 rx_quality, rx_err_delta, rx_packets_delta);
4256 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4257 tx_quality = 100 - (tx_failures_delta * 100) /
4258 (tx_packets_delta + tx_failures_delta);
4259 else
4260 tx_quality = 100;
4261 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4262 tx_quality, tx_failures_delta, tx_packets_delta);
4264 rssi = priv->exp_avg_rssi;
4265 signal_quality =
4266 (100 *
4267 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4268 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4269 (priv->ieee->perfect_rssi - rssi) *
4270 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4271 62 * (priv->ieee->perfect_rssi - rssi))) /
4272 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4273 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4274 if (signal_quality > 100)
4275 signal_quality = 100;
4276 else if (signal_quality < 1)
4277 signal_quality = 0;
4279 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4280 signal_quality, rssi);
4282 quality = min(beacon_quality,
4283 min(rate_quality,
4284 min(tx_quality, min(rx_quality, signal_quality))));
4285 if (quality == beacon_quality)
4286 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4287 quality);
4288 if (quality == rate_quality)
4289 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4290 quality);
4291 if (quality == tx_quality)
4292 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4293 quality);
4294 if (quality == rx_quality)
4295 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4296 quality);
4297 if (quality == signal_quality)
4298 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4299 quality);
4301 priv->quality = quality;
4303 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4304 IPW_STATS_INTERVAL);
4307 static void ipw_bg_gather_stats(struct work_struct *work)
4309 struct ipw_priv *priv =
4310 container_of(work, struct ipw_priv, gather_stats.work);
4311 mutex_lock(&priv->mutex);
4312 ipw_gather_stats(priv);
4313 mutex_unlock(&priv->mutex);
4316 /* Missed beacon behavior:
4317 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4318 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4319 * Above disassociate threshold, give up and stop scanning.
4320 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4321 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4322 int missed_count)
4324 priv->notif_missed_beacons = missed_count;
4326 if (missed_count > priv->disassociate_threshold &&
4327 priv->status & STATUS_ASSOCIATED) {
4328 /* If associated and we've hit the missed
4329 * beacon threshold, disassociate, turn
4330 * off roaming, and abort any active scans */
4331 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4332 IPW_DL_STATE | IPW_DL_ASSOC,
4333 "Missed beacon: %d - disassociate\n", missed_count);
4334 priv->status &= ~STATUS_ROAMING;
4335 if (priv->status & STATUS_SCANNING) {
4336 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4337 IPW_DL_STATE,
4338 "Aborting scan with missed beacon.\n");
4339 queue_work(priv->workqueue, &priv->abort_scan);
4342 queue_work(priv->workqueue, &priv->disassociate);
4343 return;
4346 if (priv->status & STATUS_ROAMING) {
4347 /* If we are currently roaming, then just
4348 * print a debug statement... */
4349 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4350 "Missed beacon: %d - roam in progress\n",
4351 missed_count);
4352 return;
4355 if (roaming &&
4356 (missed_count > priv->roaming_threshold &&
4357 missed_count <= priv->disassociate_threshold)) {
4358 /* If we are not already roaming, set the ROAM
4359 * bit in the status and kick off a scan.
4360 * This can happen several times before we reach
4361 * disassociate_threshold. */
4362 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4363 "Missed beacon: %d - initiate "
4364 "roaming\n", missed_count);
4365 if (!(priv->status & STATUS_ROAMING)) {
4366 priv->status |= STATUS_ROAMING;
4367 if (!(priv->status & STATUS_SCANNING))
4368 queue_delayed_work(priv->workqueue,
4369 &priv->request_scan, 0);
4371 return;
4374 if (priv->status & STATUS_SCANNING &&
4375 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4376 /* Stop scan to keep fw from getting
4377 * stuck (only if we aren't roaming --
4378 * otherwise we'll never scan more than 2 or 3
4379 * channels..) */
4380 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4381 "Aborting scan with missed beacon.\n");
4382 queue_work(priv->workqueue, &priv->abort_scan);
4385 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4388 static void ipw_scan_event(struct work_struct *work)
4390 union iwreq_data wrqu;
4392 struct ipw_priv *priv =
4393 container_of(work, struct ipw_priv, scan_event.work);
4395 wrqu.data.length = 0;
4396 wrqu.data.flags = 0;
4397 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4400 static void handle_scan_event(struct ipw_priv *priv)
4402 /* Only userspace-requested scan completion events go out immediately */
4403 if (!priv->user_requested_scan) {
4404 if (!delayed_work_pending(&priv->scan_event))
4405 queue_delayed_work(priv->workqueue, &priv->scan_event,
4406 round_jiffies_relative(msecs_to_jiffies(4000)));
4407 } else {
4408 union iwreq_data wrqu;
4410 priv->user_requested_scan = 0;
4411 cancel_delayed_work(&priv->scan_event);
4413 wrqu.data.length = 0;
4414 wrqu.data.flags = 0;
4415 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4420 * Handle host notification packet.
4421 * Called from interrupt routine
4423 static void ipw_rx_notification(struct ipw_priv *priv,
4424 struct ipw_rx_notification *notif)
4426 DECLARE_SSID_BUF(ssid);
4427 u16 size = le16_to_cpu(notif->size);
4428 notif->size = le16_to_cpu(notif->size);
4430 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4432 switch (notif->subtype) {
4433 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4434 struct notif_association *assoc = &notif->u.assoc;
4436 switch (assoc->state) {
4437 case CMAS_ASSOCIATED:{
4438 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4439 IPW_DL_ASSOC,
4440 "associated: '%s' %pM \n",
4441 print_ssid(ssid, priv->essid,
4442 priv->essid_len),
4443 priv->bssid);
4445 switch (priv->ieee->iw_mode) {
4446 case IW_MODE_INFRA:
4447 memcpy(priv->ieee->bssid,
4448 priv->bssid, ETH_ALEN);
4449 break;
4451 case IW_MODE_ADHOC:
4452 memcpy(priv->ieee->bssid,
4453 priv->bssid, ETH_ALEN);
4455 /* clear out the station table */
4456 priv->num_stations = 0;
4458 IPW_DEBUG_ASSOC
4459 ("queueing adhoc check\n");
4460 queue_delayed_work(priv->
4461 workqueue,
4462 &priv->
4463 adhoc_check,
4464 le16_to_cpu(priv->
4465 assoc_request.
4466 beacon_interval));
4467 break;
4470 priv->status &= ~STATUS_ASSOCIATING;
4471 priv->status |= STATUS_ASSOCIATED;
4472 queue_work(priv->workqueue,
4473 &priv->system_config);
4475 #ifdef CONFIG_IPW2200_QOS
4476 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4477 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4478 if ((priv->status & STATUS_AUTH) &&
4479 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4480 == IEEE80211_STYPE_ASSOC_RESP)) {
4481 if ((sizeof
4482 (struct
4483 ieee80211_assoc_response)
4484 <= size)
4485 && (size <= 2314)) {
4486 struct
4487 ieee80211_rx_stats
4488 stats = {
4489 .len = size - 1,
4492 IPW_DEBUG_QOS
4493 ("QoS Associate "
4494 "size %d\n", size);
4495 ieee80211_rx_mgt(priv->
4496 ieee,
4497 (struct
4498 ieee80211_hdr_4addr
4500 &notif->u.raw, &stats);
4503 #endif
4505 schedule_work(&priv->link_up);
4507 break;
4510 case CMAS_AUTHENTICATED:{
4511 if (priv->
4512 status & (STATUS_ASSOCIATED |
4513 STATUS_AUTH)) {
4514 struct notif_authenticate *auth
4515 = &notif->u.auth;
4516 IPW_DEBUG(IPW_DL_NOTIF |
4517 IPW_DL_STATE |
4518 IPW_DL_ASSOC,
4519 "deauthenticated: '%s' "
4520 "%pM"
4521 ": (0x%04X) - %s \n",
4522 print_ssid(ssid,
4523 priv->
4524 essid,
4525 priv->
4526 essid_len),
4527 priv->bssid,
4528 le16_to_cpu(auth->status),
4529 ipw_get_status_code
4530 (le16_to_cpu
4531 (auth->status)));
4533 priv->status &=
4534 ~(STATUS_ASSOCIATING |
4535 STATUS_AUTH |
4536 STATUS_ASSOCIATED);
4538 schedule_work(&priv->link_down);
4539 break;
4542 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4543 IPW_DL_ASSOC,
4544 "authenticated: '%s' %pM\n",
4545 print_ssid(ssid, priv->essid,
4546 priv->essid_len),
4547 priv->bssid);
4548 break;
4551 case CMAS_INIT:{
4552 if (priv->status & STATUS_AUTH) {
4553 struct
4554 ieee80211_assoc_response
4555 *resp;
4556 resp =
4557 (struct
4558 ieee80211_assoc_response
4559 *)&notif->u.raw;
4560 IPW_DEBUG(IPW_DL_NOTIF |
4561 IPW_DL_STATE |
4562 IPW_DL_ASSOC,
4563 "association failed (0x%04X): %s\n",
4564 le16_to_cpu(resp->status),
4565 ipw_get_status_code
4566 (le16_to_cpu
4567 (resp->status)));
4570 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4571 IPW_DL_ASSOC,
4572 "disassociated: '%s' %pM \n",
4573 print_ssid(ssid, priv->essid,
4574 priv->essid_len),
4575 priv->bssid);
4577 priv->status &=
4578 ~(STATUS_DISASSOCIATING |
4579 STATUS_ASSOCIATING |
4580 STATUS_ASSOCIATED | STATUS_AUTH);
4581 if (priv->assoc_network
4582 && (priv->assoc_network->
4583 capability &
4584 WLAN_CAPABILITY_IBSS))
4585 ipw_remove_current_network
4586 (priv);
4588 schedule_work(&priv->link_down);
4590 break;
4593 case CMAS_RX_ASSOC_RESP:
4594 break;
4596 default:
4597 IPW_ERROR("assoc: unknown (%d)\n",
4598 assoc->state);
4599 break;
4602 break;
4605 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4606 struct notif_authenticate *auth = &notif->u.auth;
4607 switch (auth->state) {
4608 case CMAS_AUTHENTICATED:
4609 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4610 "authenticated: '%s' %pM \n",
4611 print_ssid(ssid, priv->essid,
4612 priv->essid_len),
4613 priv->bssid);
4614 priv->status |= STATUS_AUTH;
4615 break;
4617 case CMAS_INIT:
4618 if (priv->status & STATUS_AUTH) {
4619 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4620 IPW_DL_ASSOC,
4621 "authentication failed (0x%04X): %s\n",
4622 le16_to_cpu(auth->status),
4623 ipw_get_status_code(le16_to_cpu
4624 (auth->
4625 status)));
4627 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4628 IPW_DL_ASSOC,
4629 "deauthenticated: '%s' %pM\n",
4630 print_ssid(ssid, priv->essid,
4631 priv->essid_len),
4632 priv->bssid);
4634 priv->status &= ~(STATUS_ASSOCIATING |
4635 STATUS_AUTH |
4636 STATUS_ASSOCIATED);
4638 schedule_work(&priv->link_down);
4639 break;
4641 case CMAS_TX_AUTH_SEQ_1:
4642 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4643 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4644 break;
4645 case CMAS_RX_AUTH_SEQ_2:
4646 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4647 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4648 break;
4649 case CMAS_AUTH_SEQ_1_PASS:
4650 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4651 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4652 break;
4653 case CMAS_AUTH_SEQ_1_FAIL:
4654 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4655 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4656 break;
4657 case CMAS_TX_AUTH_SEQ_3:
4658 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4659 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4660 break;
4661 case CMAS_RX_AUTH_SEQ_4:
4662 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4663 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4664 break;
4665 case CMAS_AUTH_SEQ_2_PASS:
4666 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4667 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4668 break;
4669 case CMAS_AUTH_SEQ_2_FAIL:
4670 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4671 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4672 break;
4673 case CMAS_TX_ASSOC:
4674 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4675 IPW_DL_ASSOC, "TX_ASSOC\n");
4676 break;
4677 case CMAS_RX_ASSOC_RESP:
4678 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4679 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4681 break;
4682 case CMAS_ASSOCIATED:
4683 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4684 IPW_DL_ASSOC, "ASSOCIATED\n");
4685 break;
4686 default:
4687 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4688 auth->state);
4689 break;
4691 break;
4694 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4695 struct notif_channel_result *x =
4696 &notif->u.channel_result;
4698 if (size == sizeof(*x)) {
4699 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4700 x->channel_num);
4701 } else {
4702 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4703 "(should be %zd)\n",
4704 size, sizeof(*x));
4706 break;
4709 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4710 struct notif_scan_complete *x = &notif->u.scan_complete;
4711 if (size == sizeof(*x)) {
4712 IPW_DEBUG_SCAN
4713 ("Scan completed: type %d, %d channels, "
4714 "%d status\n", x->scan_type,
4715 x->num_channels, x->status);
4716 } else {
4717 IPW_ERROR("Scan completed of wrong size %d "
4718 "(should be %zd)\n",
4719 size, sizeof(*x));
4722 priv->status &=
4723 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4725 wake_up_interruptible(&priv->wait_state);
4726 cancel_delayed_work(&priv->scan_check);
4728 if (priv->status & STATUS_EXIT_PENDING)
4729 break;
4731 priv->ieee->scans++;
4733 #ifdef CONFIG_IPW2200_MONITOR
4734 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4735 priv->status |= STATUS_SCAN_FORCED;
4736 queue_delayed_work(priv->workqueue,
4737 &priv->request_scan, 0);
4738 break;
4740 priv->status &= ~STATUS_SCAN_FORCED;
4741 #endif /* CONFIG_IPW2200_MONITOR */
4743 /* Do queued direct scans first */
4744 if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
4745 queue_delayed_work(priv->workqueue,
4746 &priv->request_direct_scan, 0);
4749 if (!(priv->status & (STATUS_ASSOCIATED |
4750 STATUS_ASSOCIATING |
4751 STATUS_ROAMING |
4752 STATUS_DISASSOCIATING)))
4753 queue_work(priv->workqueue, &priv->associate);
4754 else if (priv->status & STATUS_ROAMING) {
4755 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4756 /* If a scan completed and we are in roam mode, then
4757 * the scan that completed was the one requested as a
4758 * result of entering roam... so, schedule the
4759 * roam work */
4760 queue_work(priv->workqueue,
4761 &priv->roam);
4762 else
4763 /* Don't schedule if we aborted the scan */
4764 priv->status &= ~STATUS_ROAMING;
4765 } else if (priv->status & STATUS_SCAN_PENDING)
4766 queue_delayed_work(priv->workqueue,
4767 &priv->request_scan, 0);
4768 else if (priv->config & CFG_BACKGROUND_SCAN
4769 && priv->status & STATUS_ASSOCIATED)
4770 queue_delayed_work(priv->workqueue,
4771 &priv->request_scan,
4772 round_jiffies_relative(HZ));
4774 /* Send an empty event to user space.
4775 * We don't send the received data on the event because
4776 * it would require us to do complex transcoding, and
4777 * we want to minimise the work done in the irq handler
4778 * Use a request to extract the data.
4779 * Also, we generate this even for any scan, regardless
4780 * on how the scan was initiated. User space can just
4781 * sync on periodic scan to get fresh data...
4782 * Jean II */
4783 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4784 handle_scan_event(priv);
4785 break;
4788 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4789 struct notif_frag_length *x = &notif->u.frag_len;
4791 if (size == sizeof(*x))
4792 IPW_ERROR("Frag length: %d\n",
4793 le16_to_cpu(x->frag_length));
4794 else
4795 IPW_ERROR("Frag length of wrong size %d "
4796 "(should be %zd)\n",
4797 size, sizeof(*x));
4798 break;
4801 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4802 struct notif_link_deterioration *x =
4803 &notif->u.link_deterioration;
4805 if (size == sizeof(*x)) {
4806 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4807 "link deterioration: type %d, cnt %d\n",
4808 x->silence_notification_type,
4809 x->silence_count);
4810 memcpy(&priv->last_link_deterioration, x,
4811 sizeof(*x));
4812 } else {
4813 IPW_ERROR("Link Deterioration of wrong size %d "
4814 "(should be %zd)\n",
4815 size, sizeof(*x));
4817 break;
4820 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4821 IPW_ERROR("Dino config\n");
4822 if (priv->hcmd
4823 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4824 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4826 break;
4829 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4830 struct notif_beacon_state *x = &notif->u.beacon_state;
4831 if (size != sizeof(*x)) {
4832 IPW_ERROR
4833 ("Beacon state of wrong size %d (should "
4834 "be %zd)\n", size, sizeof(*x));
4835 break;
4838 if (le32_to_cpu(x->state) ==
4839 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4840 ipw_handle_missed_beacon(priv,
4841 le32_to_cpu(x->
4842 number));
4844 break;
4847 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4848 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4849 if (size == sizeof(*x)) {
4850 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4851 "0x%02x station %d\n",
4852 x->key_state, x->security_type,
4853 x->station_index);
4854 break;
4857 IPW_ERROR
4858 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4859 size, sizeof(*x));
4860 break;
4863 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4864 struct notif_calibration *x = &notif->u.calibration;
4866 if (size == sizeof(*x)) {
4867 memcpy(&priv->calib, x, sizeof(*x));
4868 IPW_DEBUG_INFO("TODO: Calibration\n");
4869 break;
4872 IPW_ERROR
4873 ("Calibration of wrong size %d (should be %zd)\n",
4874 size, sizeof(*x));
4875 break;
4878 case HOST_NOTIFICATION_NOISE_STATS:{
4879 if (size == sizeof(u32)) {
4880 priv->exp_avg_noise =
4881 exponential_average(priv->exp_avg_noise,
4882 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4883 DEPTH_NOISE);
4884 break;
4887 IPW_ERROR
4888 ("Noise stat is wrong size %d (should be %zd)\n",
4889 size, sizeof(u32));
4890 break;
4893 default:
4894 IPW_DEBUG_NOTIF("Unknown notification: "
4895 "subtype=%d,flags=0x%2x,size=%d\n",
4896 notif->subtype, notif->flags, size);
4901 * Destroys all DMA structures and initialise them again
4903 * @param priv
4904 * @return error code
4906 static int ipw_queue_reset(struct ipw_priv *priv)
4908 int rc = 0;
4909 /** @todo customize queue sizes */
4910 int nTx = 64, nTxCmd = 8;
4911 ipw_tx_queue_free(priv);
4912 /* Tx CMD queue */
4913 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4914 IPW_TX_CMD_QUEUE_READ_INDEX,
4915 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4916 IPW_TX_CMD_QUEUE_BD_BASE,
4917 IPW_TX_CMD_QUEUE_BD_SIZE);
4918 if (rc) {
4919 IPW_ERROR("Tx Cmd queue init failed\n");
4920 goto error;
4922 /* Tx queue(s) */
4923 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4924 IPW_TX_QUEUE_0_READ_INDEX,
4925 IPW_TX_QUEUE_0_WRITE_INDEX,
4926 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4927 if (rc) {
4928 IPW_ERROR("Tx 0 queue init failed\n");
4929 goto error;
4931 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4932 IPW_TX_QUEUE_1_READ_INDEX,
4933 IPW_TX_QUEUE_1_WRITE_INDEX,
4934 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4935 if (rc) {
4936 IPW_ERROR("Tx 1 queue init failed\n");
4937 goto error;
4939 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4940 IPW_TX_QUEUE_2_READ_INDEX,
4941 IPW_TX_QUEUE_2_WRITE_INDEX,
4942 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4943 if (rc) {
4944 IPW_ERROR("Tx 2 queue init failed\n");
4945 goto error;
4947 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4948 IPW_TX_QUEUE_3_READ_INDEX,
4949 IPW_TX_QUEUE_3_WRITE_INDEX,
4950 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4951 if (rc) {
4952 IPW_ERROR("Tx 3 queue init failed\n");
4953 goto error;
4955 /* statistics */
4956 priv->rx_bufs_min = 0;
4957 priv->rx_pend_max = 0;
4958 return rc;
4960 error:
4961 ipw_tx_queue_free(priv);
4962 return rc;
4966 * Reclaim Tx queue entries no more used by NIC.
4968 * When FW advances 'R' index, all entries between old and
4969 * new 'R' index need to be reclaimed. As result, some free space
4970 * forms. If there is enough free space (> low mark), wake Tx queue.
4972 * @note Need to protect against garbage in 'R' index
4973 * @param priv
4974 * @param txq
4975 * @param qindex
4976 * @return Number of used entries remains in the queue
4978 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4979 struct clx2_tx_queue *txq, int qindex)
4981 u32 hw_tail;
4982 int used;
4983 struct clx2_queue *q = &txq->q;
4985 hw_tail = ipw_read32(priv, q->reg_r);
4986 if (hw_tail >= q->n_bd) {
4987 IPW_ERROR
4988 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4989 hw_tail, q->n_bd);
4990 goto done;
4992 for (; q->last_used != hw_tail;
4993 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4994 ipw_queue_tx_free_tfd(priv, txq);
4995 priv->tx_packets++;
4997 done:
4998 if ((ipw_tx_queue_space(q) > q->low_mark) &&
4999 (qindex >= 0))
5000 netif_wake_queue(priv->net_dev);
5001 used = q->first_empty - q->last_used;
5002 if (used < 0)
5003 used += q->n_bd;
5005 return used;
5008 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5009 int len, int sync)
5011 struct clx2_tx_queue *txq = &priv->txq_cmd;
5012 struct clx2_queue *q = &txq->q;
5013 struct tfd_frame *tfd;
5015 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5016 IPW_ERROR("No space for Tx\n");
5017 return -EBUSY;
5020 tfd = &txq->bd[q->first_empty];
5021 txq->txb[q->first_empty] = NULL;
5023 memset(tfd, 0, sizeof(*tfd));
5024 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5025 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5026 priv->hcmd_seq++;
5027 tfd->u.cmd.index = hcmd;
5028 tfd->u.cmd.length = len;
5029 memcpy(tfd->u.cmd.payload, buf, len);
5030 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5031 ipw_write32(priv, q->reg_w, q->first_empty);
5032 _ipw_read32(priv, 0x90);
5034 return 0;
5038 * Rx theory of operation
5040 * The host allocates 32 DMA target addresses and passes the host address
5041 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5042 * 0 to 31
5044 * Rx Queue Indexes
5045 * The host/firmware share two index registers for managing the Rx buffers.
5047 * The READ index maps to the first position that the firmware may be writing
5048 * to -- the driver can read up to (but not including) this position and get
5049 * good data.
5050 * The READ index is managed by the firmware once the card is enabled.
5052 * The WRITE index maps to the last position the driver has read from -- the
5053 * position preceding WRITE is the last slot the firmware can place a packet.
5055 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5056 * WRITE = READ.
5058 * During initialization the host sets up the READ queue position to the first
5059 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5061 * When the firmware places a packet in a buffer it will advance the READ index
5062 * and fire the RX interrupt. The driver can then query the READ index and
5063 * process as many packets as possible, moving the WRITE index forward as it
5064 * resets the Rx queue buffers with new memory.
5066 * The management in the driver is as follows:
5067 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5068 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5069 * to replensish the ipw->rxq->rx_free.
5070 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5071 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5072 * 'processed' and 'read' driver indexes as well)
5073 * + A received packet is processed and handed to the kernel network stack,
5074 * detached from the ipw->rxq. The driver 'processed' index is updated.
5075 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5076 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5077 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5078 * were enough free buffers and RX_STALLED is set it is cleared.
5081 * Driver sequence:
5083 * ipw_rx_queue_alloc() Allocates rx_free
5084 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5085 * ipw_rx_queue_restock
5086 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5087 * queue, updates firmware pointers, and updates
5088 * the WRITE index. If insufficient rx_free buffers
5089 * are available, schedules ipw_rx_queue_replenish
5091 * -- enable interrupts --
5092 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5093 * READ INDEX, detaching the SKB from the pool.
5094 * Moves the packet buffer from queue to rx_used.
5095 * Calls ipw_rx_queue_restock to refill any empty
5096 * slots.
5097 * ...
5102 * If there are slots in the RX queue that need to be restocked,
5103 * and we have free pre-allocated buffers, fill the ranks as much
5104 * as we can pulling from rx_free.
5106 * This moves the 'write' index forward to catch up with 'processed', and
5107 * also updates the memory address in the firmware to reference the new
5108 * target buffer.
5110 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5112 struct ipw_rx_queue *rxq = priv->rxq;
5113 struct list_head *element;
5114 struct ipw_rx_mem_buffer *rxb;
5115 unsigned long flags;
5116 int write;
5118 spin_lock_irqsave(&rxq->lock, flags);
5119 write = rxq->write;
5120 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5121 element = rxq->rx_free.next;
5122 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5123 list_del(element);
5125 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5126 rxb->dma_addr);
5127 rxq->queue[rxq->write] = rxb;
5128 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5129 rxq->free_count--;
5131 spin_unlock_irqrestore(&rxq->lock, flags);
5133 /* If the pre-allocated buffer pool is dropping low, schedule to
5134 * refill it */
5135 if (rxq->free_count <= RX_LOW_WATERMARK)
5136 queue_work(priv->workqueue, &priv->rx_replenish);
5138 /* If we've added more space for the firmware to place data, tell it */
5139 if (write != rxq->write)
5140 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5144 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5145 * Also restock the Rx queue via ipw_rx_queue_restock.
5147 * This is called as a scheduled work item (except for during intialization)
5149 static void ipw_rx_queue_replenish(void *data)
5151 struct ipw_priv *priv = data;
5152 struct ipw_rx_queue *rxq = priv->rxq;
5153 struct list_head *element;
5154 struct ipw_rx_mem_buffer *rxb;
5155 unsigned long flags;
5157 spin_lock_irqsave(&rxq->lock, flags);
5158 while (!list_empty(&rxq->rx_used)) {
5159 element = rxq->rx_used.next;
5160 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5161 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5162 if (!rxb->skb) {
5163 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5164 priv->net_dev->name);
5165 /* We don't reschedule replenish work here -- we will
5166 * call the restock method and if it still needs
5167 * more buffers it will schedule replenish */
5168 break;
5170 list_del(element);
5172 rxb->dma_addr =
5173 pci_map_single(priv->pci_dev, rxb->skb->data,
5174 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5176 list_add_tail(&rxb->list, &rxq->rx_free);
5177 rxq->free_count++;
5179 spin_unlock_irqrestore(&rxq->lock, flags);
5181 ipw_rx_queue_restock(priv);
5184 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5186 struct ipw_priv *priv =
5187 container_of(work, struct ipw_priv, rx_replenish);
5188 mutex_lock(&priv->mutex);
5189 ipw_rx_queue_replenish(priv);
5190 mutex_unlock(&priv->mutex);
5193 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5194 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5195 * This free routine walks the list of POOL entries and if SKB is set to
5196 * non NULL it is unmapped and freed
5198 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5200 int i;
5202 if (!rxq)
5203 return;
5205 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5206 if (rxq->pool[i].skb != NULL) {
5207 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5208 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5209 dev_kfree_skb(rxq->pool[i].skb);
5213 kfree(rxq);
5216 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5218 struct ipw_rx_queue *rxq;
5219 int i;
5221 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5222 if (unlikely(!rxq)) {
5223 IPW_ERROR("memory allocation failed\n");
5224 return NULL;
5226 spin_lock_init(&rxq->lock);
5227 INIT_LIST_HEAD(&rxq->rx_free);
5228 INIT_LIST_HEAD(&rxq->rx_used);
5230 /* Fill the rx_used queue with _all_ of the Rx buffers */
5231 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5232 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5234 /* Set us so that we have processed and used all buffers, but have
5235 * not restocked the Rx queue with fresh buffers */
5236 rxq->read = rxq->write = 0;
5237 rxq->free_count = 0;
5239 return rxq;
5242 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5244 rate &= ~IEEE80211_BASIC_RATE_MASK;
5245 if (ieee_mode == IEEE_A) {
5246 switch (rate) {
5247 case IEEE80211_OFDM_RATE_6MB:
5248 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5249 1 : 0;
5250 case IEEE80211_OFDM_RATE_9MB:
5251 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5252 1 : 0;
5253 case IEEE80211_OFDM_RATE_12MB:
5254 return priv->
5255 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5256 case IEEE80211_OFDM_RATE_18MB:
5257 return priv->
5258 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5259 case IEEE80211_OFDM_RATE_24MB:
5260 return priv->
5261 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5262 case IEEE80211_OFDM_RATE_36MB:
5263 return priv->
5264 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5265 case IEEE80211_OFDM_RATE_48MB:
5266 return priv->
5267 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5268 case IEEE80211_OFDM_RATE_54MB:
5269 return priv->
5270 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5271 default:
5272 return 0;
5276 /* B and G mixed */
5277 switch (rate) {
5278 case IEEE80211_CCK_RATE_1MB:
5279 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5280 case IEEE80211_CCK_RATE_2MB:
5281 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5282 case IEEE80211_CCK_RATE_5MB:
5283 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5284 case IEEE80211_CCK_RATE_11MB:
5285 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5288 /* If we are limited to B modulations, bail at this point */
5289 if (ieee_mode == IEEE_B)
5290 return 0;
5292 /* G */
5293 switch (rate) {
5294 case IEEE80211_OFDM_RATE_6MB:
5295 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5296 case IEEE80211_OFDM_RATE_9MB:
5297 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5298 case IEEE80211_OFDM_RATE_12MB:
5299 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5300 case IEEE80211_OFDM_RATE_18MB:
5301 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5302 case IEEE80211_OFDM_RATE_24MB:
5303 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5304 case IEEE80211_OFDM_RATE_36MB:
5305 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5306 case IEEE80211_OFDM_RATE_48MB:
5307 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5308 case IEEE80211_OFDM_RATE_54MB:
5309 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5312 return 0;
5315 static int ipw_compatible_rates(struct ipw_priv *priv,
5316 const struct ieee80211_network *network,
5317 struct ipw_supported_rates *rates)
5319 int num_rates, i;
5321 memset(rates, 0, sizeof(*rates));
5322 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5323 rates->num_rates = 0;
5324 for (i = 0; i < num_rates; i++) {
5325 if (!ipw_is_rate_in_mask(priv, network->mode,
5326 network->rates[i])) {
5328 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5329 IPW_DEBUG_SCAN("Adding masked mandatory "
5330 "rate %02X\n",
5331 network->rates[i]);
5332 rates->supported_rates[rates->num_rates++] =
5333 network->rates[i];
5334 continue;
5337 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5338 network->rates[i], priv->rates_mask);
5339 continue;
5342 rates->supported_rates[rates->num_rates++] = network->rates[i];
5345 num_rates = min(network->rates_ex_len,
5346 (u8) (IPW_MAX_RATES - num_rates));
5347 for (i = 0; i < num_rates; i++) {
5348 if (!ipw_is_rate_in_mask(priv, network->mode,
5349 network->rates_ex[i])) {
5350 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5351 IPW_DEBUG_SCAN("Adding masked mandatory "
5352 "rate %02X\n",
5353 network->rates_ex[i]);
5354 rates->supported_rates[rates->num_rates++] =
5355 network->rates[i];
5356 continue;
5359 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5360 network->rates_ex[i], priv->rates_mask);
5361 continue;
5364 rates->supported_rates[rates->num_rates++] =
5365 network->rates_ex[i];
5368 return 1;
5371 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5372 const struct ipw_supported_rates *src)
5374 u8 i;
5375 for (i = 0; i < src->num_rates; i++)
5376 dest->supported_rates[i] = src->supported_rates[i];
5377 dest->num_rates = src->num_rates;
5380 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5381 * mask should ever be used -- right now all callers to add the scan rates are
5382 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5383 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5384 u8 modulation, u32 rate_mask)
5386 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5387 IEEE80211_BASIC_RATE_MASK : 0;
5389 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5390 rates->supported_rates[rates->num_rates++] =
5391 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5393 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5394 rates->supported_rates[rates->num_rates++] =
5395 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5397 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5398 rates->supported_rates[rates->num_rates++] = basic_mask |
5399 IEEE80211_CCK_RATE_5MB;
5401 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5402 rates->supported_rates[rates->num_rates++] = basic_mask |
5403 IEEE80211_CCK_RATE_11MB;
5406 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5407 u8 modulation, u32 rate_mask)
5409 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5410 IEEE80211_BASIC_RATE_MASK : 0;
5412 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5413 rates->supported_rates[rates->num_rates++] = basic_mask |
5414 IEEE80211_OFDM_RATE_6MB;
5416 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5417 rates->supported_rates[rates->num_rates++] =
5418 IEEE80211_OFDM_RATE_9MB;
5420 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5421 rates->supported_rates[rates->num_rates++] = basic_mask |
5422 IEEE80211_OFDM_RATE_12MB;
5424 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5425 rates->supported_rates[rates->num_rates++] =
5426 IEEE80211_OFDM_RATE_18MB;
5428 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5429 rates->supported_rates[rates->num_rates++] = basic_mask |
5430 IEEE80211_OFDM_RATE_24MB;
5432 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5433 rates->supported_rates[rates->num_rates++] =
5434 IEEE80211_OFDM_RATE_36MB;
5436 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5437 rates->supported_rates[rates->num_rates++] =
5438 IEEE80211_OFDM_RATE_48MB;
5440 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5441 rates->supported_rates[rates->num_rates++] =
5442 IEEE80211_OFDM_RATE_54MB;
5445 struct ipw_network_match {
5446 struct ieee80211_network *network;
5447 struct ipw_supported_rates rates;
5450 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5451 struct ipw_network_match *match,
5452 struct ieee80211_network *network,
5453 int roaming)
5455 struct ipw_supported_rates rates;
5456 DECLARE_SSID_BUF(ssid);
5458 /* Verify that this network's capability is compatible with the
5459 * current mode (AdHoc or Infrastructure) */
5460 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5461 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5462 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5463 "capability mismatch.\n",
5464 print_ssid(ssid, network->ssid,
5465 network->ssid_len),
5466 network->bssid);
5467 return 0;
5470 if (unlikely(roaming)) {
5471 /* If we are roaming, then ensure check if this is a valid
5472 * network to try and roam to */
5473 if ((network->ssid_len != match->network->ssid_len) ||
5474 memcmp(network->ssid, match->network->ssid,
5475 network->ssid_len)) {
5476 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5477 "because of non-network ESSID.\n",
5478 print_ssid(ssid, network->ssid,
5479 network->ssid_len),
5480 network->bssid);
5481 return 0;
5483 } else {
5484 /* If an ESSID has been configured then compare the broadcast
5485 * ESSID to ours */
5486 if ((priv->config & CFG_STATIC_ESSID) &&
5487 ((network->ssid_len != priv->essid_len) ||
5488 memcmp(network->ssid, priv->essid,
5489 min(network->ssid_len, priv->essid_len)))) {
5490 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5492 strncpy(escaped,
5493 print_ssid(ssid, network->ssid,
5494 network->ssid_len),
5495 sizeof(escaped));
5496 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5497 "because of ESSID mismatch: '%s'.\n",
5498 escaped, network->bssid,
5499 print_ssid(ssid, priv->essid,
5500 priv->essid_len));
5501 return 0;
5505 /* If the old network rate is better than this one, don't bother
5506 * testing everything else. */
5508 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5509 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5510 "current network.\n",
5511 print_ssid(ssid, match->network->ssid,
5512 match->network->ssid_len));
5513 return 0;
5514 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5515 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5516 "current network.\n",
5517 print_ssid(ssid, match->network->ssid,
5518 match->network->ssid_len));
5519 return 0;
5522 /* Now go through and see if the requested network is valid... */
5523 if (priv->ieee->scan_age != 0 &&
5524 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5525 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5526 "because of age: %ums.\n",
5527 print_ssid(ssid, network->ssid,
5528 network->ssid_len),
5529 network->bssid,
5530 jiffies_to_msecs(jiffies -
5531 network->last_scanned));
5532 return 0;
5535 if ((priv->config & CFG_STATIC_CHANNEL) &&
5536 (network->channel != priv->channel)) {
5537 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5538 "because of channel mismatch: %d != %d.\n",
5539 print_ssid(ssid, network->ssid,
5540 network->ssid_len),
5541 network->bssid,
5542 network->channel, priv->channel);
5543 return 0;
5546 /* Verify privacy compatability */
5547 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5548 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5549 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5550 "because of privacy mismatch: %s != %s.\n",
5551 print_ssid(ssid, network->ssid,
5552 network->ssid_len),
5553 network->bssid,
5554 priv->
5555 capability & CAP_PRIVACY_ON ? "on" : "off",
5556 network->
5557 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5558 "off");
5559 return 0;
5562 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5563 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5564 "because of the same BSSID match: %pM"
5565 ".\n", print_ssid(ssid, network->ssid,
5566 network->ssid_len),
5567 network->bssid,
5568 priv->bssid);
5569 return 0;
5572 /* Filter out any incompatible freq / mode combinations */
5573 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5574 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5575 "because of invalid frequency/mode "
5576 "combination.\n",
5577 print_ssid(ssid, network->ssid,
5578 network->ssid_len),
5579 network->bssid);
5580 return 0;
5583 /* Ensure that the rates supported by the driver are compatible with
5584 * this AP, including verification of basic rates (mandatory) */
5585 if (!ipw_compatible_rates(priv, network, &rates)) {
5586 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5587 "because configured rate mask excludes "
5588 "AP mandatory rate.\n",
5589 print_ssid(ssid, network->ssid,
5590 network->ssid_len),
5591 network->bssid);
5592 return 0;
5595 if (rates.num_rates == 0) {
5596 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5597 "because of no compatible rates.\n",
5598 print_ssid(ssid, network->ssid,
5599 network->ssid_len),
5600 network->bssid);
5601 return 0;
5604 /* TODO: Perform any further minimal comparititive tests. We do not
5605 * want to put too much policy logic here; intelligent scan selection
5606 * should occur within a generic IEEE 802.11 user space tool. */
5608 /* Set up 'new' AP to this network */
5609 ipw_copy_rates(&match->rates, &rates);
5610 match->network = network;
5611 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5612 print_ssid(ssid, network->ssid, network->ssid_len),
5613 network->bssid);
5615 return 1;
5618 static void ipw_merge_adhoc_network(struct work_struct *work)
5620 DECLARE_SSID_BUF(ssid);
5621 struct ipw_priv *priv =
5622 container_of(work, struct ipw_priv, merge_networks);
5623 struct ieee80211_network *network = NULL;
5624 struct ipw_network_match match = {
5625 .network = priv->assoc_network
5628 if ((priv->status & STATUS_ASSOCIATED) &&
5629 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5630 /* First pass through ROAM process -- look for a better
5631 * network */
5632 unsigned long flags;
5634 spin_lock_irqsave(&priv->ieee->lock, flags);
5635 list_for_each_entry(network, &priv->ieee->network_list, list) {
5636 if (network != priv->assoc_network)
5637 ipw_find_adhoc_network(priv, &match, network,
5640 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5642 if (match.network == priv->assoc_network) {
5643 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5644 "merge to.\n");
5645 return;
5648 mutex_lock(&priv->mutex);
5649 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5650 IPW_DEBUG_MERGE("remove network %s\n",
5651 print_ssid(ssid, priv->essid,
5652 priv->essid_len));
5653 ipw_remove_current_network(priv);
5656 ipw_disassociate(priv);
5657 priv->assoc_network = match.network;
5658 mutex_unlock(&priv->mutex);
5659 return;
5663 static int ipw_best_network(struct ipw_priv *priv,
5664 struct ipw_network_match *match,
5665 struct ieee80211_network *network, int roaming)
5667 struct ipw_supported_rates rates;
5668 DECLARE_SSID_BUF(ssid);
5670 /* Verify that this network's capability is compatible with the
5671 * current mode (AdHoc or Infrastructure) */
5672 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5673 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5674 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5675 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5676 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5677 "capability mismatch.\n",
5678 print_ssid(ssid, network->ssid,
5679 network->ssid_len),
5680 network->bssid);
5681 return 0;
5684 if (unlikely(roaming)) {
5685 /* If we are roaming, then ensure check if this is a valid
5686 * network to try and roam to */
5687 if ((network->ssid_len != match->network->ssid_len) ||
5688 memcmp(network->ssid, match->network->ssid,
5689 network->ssid_len)) {
5690 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5691 "because of non-network ESSID.\n",
5692 print_ssid(ssid, network->ssid,
5693 network->ssid_len),
5694 network->bssid);
5695 return 0;
5697 } else {
5698 /* If an ESSID has been configured then compare the broadcast
5699 * ESSID to ours */
5700 if ((priv->config & CFG_STATIC_ESSID) &&
5701 ((network->ssid_len != priv->essid_len) ||
5702 memcmp(network->ssid, priv->essid,
5703 min(network->ssid_len, priv->essid_len)))) {
5704 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5705 strncpy(escaped,
5706 print_ssid(ssid, network->ssid,
5707 network->ssid_len),
5708 sizeof(escaped));
5709 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5710 "because of ESSID mismatch: '%s'.\n",
5711 escaped, network->bssid,
5712 print_ssid(ssid, priv->essid,
5713 priv->essid_len));
5714 return 0;
5718 /* If the old network rate is better than this one, don't bother
5719 * testing everything else. */
5720 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5721 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5722 strncpy(escaped,
5723 print_ssid(ssid, network->ssid, network->ssid_len),
5724 sizeof(escaped));
5725 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5726 "'%s (%pM)' has a stronger signal.\n",
5727 escaped, network->bssid,
5728 print_ssid(ssid, match->network->ssid,
5729 match->network->ssid_len),
5730 match->network->bssid);
5731 return 0;
5734 /* If this network has already had an association attempt within the
5735 * last 3 seconds, do not try and associate again... */
5736 if (network->last_associate &&
5737 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5738 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5739 "because of storming (%ums since last "
5740 "assoc attempt).\n",
5741 print_ssid(ssid, network->ssid,
5742 network->ssid_len),
5743 network->bssid,
5744 jiffies_to_msecs(jiffies -
5745 network->last_associate));
5746 return 0;
5749 /* Now go through and see if the requested network is valid... */
5750 if (priv->ieee->scan_age != 0 &&
5751 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5752 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5753 "because of age: %ums.\n",
5754 print_ssid(ssid, network->ssid,
5755 network->ssid_len),
5756 network->bssid,
5757 jiffies_to_msecs(jiffies -
5758 network->last_scanned));
5759 return 0;
5762 if ((priv->config & CFG_STATIC_CHANNEL) &&
5763 (network->channel != priv->channel)) {
5764 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5765 "because of channel mismatch: %d != %d.\n",
5766 print_ssid(ssid, network->ssid,
5767 network->ssid_len),
5768 network->bssid,
5769 network->channel, priv->channel);
5770 return 0;
5773 /* Verify privacy compatability */
5774 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5775 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5776 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5777 "because of privacy mismatch: %s != %s.\n",
5778 print_ssid(ssid, network->ssid,
5779 network->ssid_len),
5780 network->bssid,
5781 priv->capability & CAP_PRIVACY_ON ? "on" :
5782 "off",
5783 network->capability &
5784 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5785 return 0;
5788 if ((priv->config & CFG_STATIC_BSSID) &&
5789 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5790 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5791 "because of BSSID mismatch: %pM.\n",
5792 print_ssid(ssid, network->ssid,
5793 network->ssid_len),
5794 network->bssid, priv->bssid);
5795 return 0;
5798 /* Filter out any incompatible freq / mode combinations */
5799 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5800 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5801 "because of invalid frequency/mode "
5802 "combination.\n",
5803 print_ssid(ssid, network->ssid,
5804 network->ssid_len),
5805 network->bssid);
5806 return 0;
5809 /* Filter out invalid channel in current GEO */
5810 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5811 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5812 "because of invalid channel in current GEO\n",
5813 print_ssid(ssid, network->ssid,
5814 network->ssid_len),
5815 network->bssid);
5816 return 0;
5819 /* Ensure that the rates supported by the driver are compatible with
5820 * this AP, including verification of basic rates (mandatory) */
5821 if (!ipw_compatible_rates(priv, network, &rates)) {
5822 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5823 "because configured rate mask excludes "
5824 "AP mandatory rate.\n",
5825 print_ssid(ssid, network->ssid,
5826 network->ssid_len),
5827 network->bssid);
5828 return 0;
5831 if (rates.num_rates == 0) {
5832 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5833 "because of no compatible rates.\n",
5834 print_ssid(ssid, network->ssid,
5835 network->ssid_len),
5836 network->bssid);
5837 return 0;
5840 /* TODO: Perform any further minimal comparititive tests. We do not
5841 * want to put too much policy logic here; intelligent scan selection
5842 * should occur within a generic IEEE 802.11 user space tool. */
5844 /* Set up 'new' AP to this network */
5845 ipw_copy_rates(&match->rates, &rates);
5846 match->network = network;
5848 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5849 print_ssid(ssid, network->ssid, network->ssid_len),
5850 network->bssid);
5852 return 1;
5855 static void ipw_adhoc_create(struct ipw_priv *priv,
5856 struct ieee80211_network *network)
5858 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5859 int i;
5862 * For the purposes of scanning, we can set our wireless mode
5863 * to trigger scans across combinations of bands, but when it
5864 * comes to creating a new ad-hoc network, we have tell the FW
5865 * exactly which band to use.
5867 * We also have the possibility of an invalid channel for the
5868 * chossen band. Attempting to create a new ad-hoc network
5869 * with an invalid channel for wireless mode will trigger a
5870 * FW fatal error.
5873 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5874 case IEEE80211_52GHZ_BAND:
5875 network->mode = IEEE_A;
5876 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5877 BUG_ON(i == -1);
5878 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5879 IPW_WARNING("Overriding invalid channel\n");
5880 priv->channel = geo->a[0].channel;
5882 break;
5884 case IEEE80211_24GHZ_BAND:
5885 if (priv->ieee->mode & IEEE_G)
5886 network->mode = IEEE_G;
5887 else
5888 network->mode = IEEE_B;
5889 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5890 BUG_ON(i == -1);
5891 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5892 IPW_WARNING("Overriding invalid channel\n");
5893 priv->channel = geo->bg[0].channel;
5895 break;
5897 default:
5898 IPW_WARNING("Overriding invalid channel\n");
5899 if (priv->ieee->mode & IEEE_A) {
5900 network->mode = IEEE_A;
5901 priv->channel = geo->a[0].channel;
5902 } else if (priv->ieee->mode & IEEE_G) {
5903 network->mode = IEEE_G;
5904 priv->channel = geo->bg[0].channel;
5905 } else {
5906 network->mode = IEEE_B;
5907 priv->channel = geo->bg[0].channel;
5909 break;
5912 network->channel = priv->channel;
5913 priv->config |= CFG_ADHOC_PERSIST;
5914 ipw_create_bssid(priv, network->bssid);
5915 network->ssid_len = priv->essid_len;
5916 memcpy(network->ssid, priv->essid, priv->essid_len);
5917 memset(&network->stats, 0, sizeof(network->stats));
5918 network->capability = WLAN_CAPABILITY_IBSS;
5919 if (!(priv->config & CFG_PREAMBLE_LONG))
5920 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5921 if (priv->capability & CAP_PRIVACY_ON)
5922 network->capability |= WLAN_CAPABILITY_PRIVACY;
5923 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5924 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5925 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5926 memcpy(network->rates_ex,
5927 &priv->rates.supported_rates[network->rates_len],
5928 network->rates_ex_len);
5929 network->last_scanned = 0;
5930 network->flags = 0;
5931 network->last_associate = 0;
5932 network->time_stamp[0] = 0;
5933 network->time_stamp[1] = 0;
5934 network->beacon_interval = 100; /* Default */
5935 network->listen_interval = 10; /* Default */
5936 network->atim_window = 0; /* Default */
5937 network->wpa_ie_len = 0;
5938 network->rsn_ie_len = 0;
5941 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5943 struct ipw_tgi_tx_key key;
5945 if (!(priv->ieee->sec.flags & (1 << index)))
5946 return;
5948 key.key_id = index;
5949 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5950 key.security_type = type;
5951 key.station_index = 0; /* always 0 for BSS */
5952 key.flags = 0;
5953 /* 0 for new key; previous value of counter (after fatal error) */
5954 key.tx_counter[0] = cpu_to_le32(0);
5955 key.tx_counter[1] = cpu_to_le32(0);
5957 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5960 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5962 struct ipw_wep_key key;
5963 int i;
5965 key.cmd_id = DINO_CMD_WEP_KEY;
5966 key.seq_num = 0;
5968 /* Note: AES keys cannot be set for multiple times.
5969 * Only set it at the first time. */
5970 for (i = 0; i < 4; i++) {
5971 key.key_index = i | type;
5972 if (!(priv->ieee->sec.flags & (1 << i))) {
5973 key.key_size = 0;
5974 continue;
5977 key.key_size = priv->ieee->sec.key_sizes[i];
5978 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5980 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5984 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5986 if (priv->ieee->host_encrypt)
5987 return;
5989 switch (level) {
5990 case SEC_LEVEL_3:
5991 priv->sys_config.disable_unicast_decryption = 0;
5992 priv->ieee->host_decrypt = 0;
5993 break;
5994 case SEC_LEVEL_2:
5995 priv->sys_config.disable_unicast_decryption = 1;
5996 priv->ieee->host_decrypt = 1;
5997 break;
5998 case SEC_LEVEL_1:
5999 priv->sys_config.disable_unicast_decryption = 0;
6000 priv->ieee->host_decrypt = 0;
6001 break;
6002 case SEC_LEVEL_0:
6003 priv->sys_config.disable_unicast_decryption = 1;
6004 break;
6005 default:
6006 break;
6010 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
6012 if (priv->ieee->host_encrypt)
6013 return;
6015 switch (level) {
6016 case SEC_LEVEL_3:
6017 priv->sys_config.disable_multicast_decryption = 0;
6018 break;
6019 case SEC_LEVEL_2:
6020 priv->sys_config.disable_multicast_decryption = 1;
6021 break;
6022 case SEC_LEVEL_1:
6023 priv->sys_config.disable_multicast_decryption = 0;
6024 break;
6025 case SEC_LEVEL_0:
6026 priv->sys_config.disable_multicast_decryption = 1;
6027 break;
6028 default:
6029 break;
6033 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6035 switch (priv->ieee->sec.level) {
6036 case SEC_LEVEL_3:
6037 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6038 ipw_send_tgi_tx_key(priv,
6039 DCT_FLAG_EXT_SECURITY_CCM,
6040 priv->ieee->sec.active_key);
6042 if (!priv->ieee->host_mc_decrypt)
6043 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6044 break;
6045 case SEC_LEVEL_2:
6046 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6047 ipw_send_tgi_tx_key(priv,
6048 DCT_FLAG_EXT_SECURITY_TKIP,
6049 priv->ieee->sec.active_key);
6050 break;
6051 case SEC_LEVEL_1:
6052 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6053 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6054 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6055 break;
6056 case SEC_LEVEL_0:
6057 default:
6058 break;
6062 static void ipw_adhoc_check(void *data)
6064 struct ipw_priv *priv = data;
6066 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6067 !(priv->config & CFG_ADHOC_PERSIST)) {
6068 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6069 IPW_DL_STATE | IPW_DL_ASSOC,
6070 "Missed beacon: %d - disassociate\n",
6071 priv->missed_adhoc_beacons);
6072 ipw_remove_current_network(priv);
6073 ipw_disassociate(priv);
6074 return;
6077 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
6078 le16_to_cpu(priv->assoc_request.beacon_interval));
6081 static void ipw_bg_adhoc_check(struct work_struct *work)
6083 struct ipw_priv *priv =
6084 container_of(work, struct ipw_priv, adhoc_check.work);
6085 mutex_lock(&priv->mutex);
6086 ipw_adhoc_check(priv);
6087 mutex_unlock(&priv->mutex);
6090 static void ipw_debug_config(struct ipw_priv *priv)
6092 DECLARE_SSID_BUF(ssid);
6093 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6094 "[CFG 0x%08X]\n", priv->config);
6095 if (priv->config & CFG_STATIC_CHANNEL)
6096 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6097 else
6098 IPW_DEBUG_INFO("Channel unlocked.\n");
6099 if (priv->config & CFG_STATIC_ESSID)
6100 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6101 print_ssid(ssid, priv->essid, priv->essid_len));
6102 else
6103 IPW_DEBUG_INFO("ESSID unlocked.\n");
6104 if (priv->config & CFG_STATIC_BSSID)
6105 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6106 else
6107 IPW_DEBUG_INFO("BSSID unlocked.\n");
6108 if (priv->capability & CAP_PRIVACY_ON)
6109 IPW_DEBUG_INFO("PRIVACY on\n");
6110 else
6111 IPW_DEBUG_INFO("PRIVACY off\n");
6112 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6115 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6117 /* TODO: Verify that this works... */
6118 struct ipw_fixed_rate fr = {
6119 .tx_rates = priv->rates_mask
6121 u32 reg;
6122 u16 mask = 0;
6124 /* Identify 'current FW band' and match it with the fixed
6125 * Tx rates */
6127 switch (priv->ieee->freq_band) {
6128 case IEEE80211_52GHZ_BAND: /* A only */
6129 /* IEEE_A */
6130 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6131 /* Invalid fixed rate mask */
6132 IPW_DEBUG_WX
6133 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6134 fr.tx_rates = 0;
6135 break;
6138 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6139 break;
6141 default: /* 2.4Ghz or Mixed */
6142 /* IEEE_B */
6143 if (mode == IEEE_B) {
6144 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6145 /* Invalid fixed rate mask */
6146 IPW_DEBUG_WX
6147 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6148 fr.tx_rates = 0;
6150 break;
6153 /* IEEE_G */
6154 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6155 IEEE80211_OFDM_RATES_MASK)) {
6156 /* Invalid fixed rate mask */
6157 IPW_DEBUG_WX
6158 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6159 fr.tx_rates = 0;
6160 break;
6163 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6164 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6165 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6168 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6169 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6170 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6173 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6174 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6175 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6178 fr.tx_rates |= mask;
6179 break;
6182 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6183 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6186 static void ipw_abort_scan(struct ipw_priv *priv)
6188 int err;
6190 if (priv->status & STATUS_SCAN_ABORTING) {
6191 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6192 return;
6194 priv->status |= STATUS_SCAN_ABORTING;
6196 err = ipw_send_scan_abort(priv);
6197 if (err)
6198 IPW_DEBUG_HC("Request to abort scan failed.\n");
6201 static void ipw_add_scan_channels(struct ipw_priv *priv,
6202 struct ipw_scan_request_ext *scan,
6203 int scan_type)
6205 int channel_index = 0;
6206 const struct ieee80211_geo *geo;
6207 int i;
6209 geo = ieee80211_get_geo(priv->ieee);
6211 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6212 int start = channel_index;
6213 for (i = 0; i < geo->a_channels; i++) {
6214 if ((priv->status & STATUS_ASSOCIATED) &&
6215 geo->a[i].channel == priv->channel)
6216 continue;
6217 channel_index++;
6218 scan->channels_list[channel_index] = geo->a[i].channel;
6219 ipw_set_scan_type(scan, channel_index,
6220 geo->a[i].
6221 flags & IEEE80211_CH_PASSIVE_ONLY ?
6222 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6223 scan_type);
6226 if (start != channel_index) {
6227 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6228 (channel_index - start);
6229 channel_index++;
6233 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6234 int start = channel_index;
6235 if (priv->config & CFG_SPEED_SCAN) {
6236 int index;
6237 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6238 /* nop out the list */
6239 [0] = 0
6242 u8 channel;
6243 while (channel_index < IPW_SCAN_CHANNELS - 1) {
6244 channel =
6245 priv->speed_scan[priv->speed_scan_pos];
6246 if (channel == 0) {
6247 priv->speed_scan_pos = 0;
6248 channel = priv->speed_scan[0];
6250 if ((priv->status & STATUS_ASSOCIATED) &&
6251 channel == priv->channel) {
6252 priv->speed_scan_pos++;
6253 continue;
6256 /* If this channel has already been
6257 * added in scan, break from loop
6258 * and this will be the first channel
6259 * in the next scan.
6261 if (channels[channel - 1] != 0)
6262 break;
6264 channels[channel - 1] = 1;
6265 priv->speed_scan_pos++;
6266 channel_index++;
6267 scan->channels_list[channel_index] = channel;
6268 index =
6269 ieee80211_channel_to_index(priv->ieee, channel);
6270 ipw_set_scan_type(scan, channel_index,
6271 geo->bg[index].
6272 flags &
6273 IEEE80211_CH_PASSIVE_ONLY ?
6274 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6275 : scan_type);
6277 } else {
6278 for (i = 0; i < geo->bg_channels; i++) {
6279 if ((priv->status & STATUS_ASSOCIATED) &&
6280 geo->bg[i].channel == priv->channel)
6281 continue;
6282 channel_index++;
6283 scan->channels_list[channel_index] =
6284 geo->bg[i].channel;
6285 ipw_set_scan_type(scan, channel_index,
6286 geo->bg[i].
6287 flags &
6288 IEEE80211_CH_PASSIVE_ONLY ?
6289 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6290 : scan_type);
6294 if (start != channel_index) {
6295 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6296 (channel_index - start);
6301 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6303 /* staying on passive channels longer than the DTIM interval during a
6304 * scan, while associated, causes the firmware to cancel the scan
6305 * without notification. Hence, don't stay on passive channels longer
6306 * than the beacon interval.
6308 if (priv->status & STATUS_ASSOCIATED
6309 && priv->assoc_network->beacon_interval > 10)
6310 return priv->assoc_network->beacon_interval - 10;
6311 else
6312 return 120;
6315 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6317 struct ipw_scan_request_ext scan;
6318 int err = 0, scan_type;
6320 if (!(priv->status & STATUS_INIT) ||
6321 (priv->status & STATUS_EXIT_PENDING))
6322 return 0;
6324 mutex_lock(&priv->mutex);
6326 if (direct && (priv->direct_scan_ssid_len == 0)) {
6327 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6328 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6329 goto done;
6332 if (priv->status & STATUS_SCANNING) {
6333 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6334 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6335 STATUS_SCAN_PENDING;
6336 goto done;
6339 if (!(priv->status & STATUS_SCAN_FORCED) &&
6340 priv->status & STATUS_SCAN_ABORTING) {
6341 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6342 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6343 STATUS_SCAN_PENDING;
6344 goto done;
6347 if (priv->status & STATUS_RF_KILL_MASK) {
6348 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6349 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6350 STATUS_SCAN_PENDING;
6351 goto done;
6354 memset(&scan, 0, sizeof(scan));
6355 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6357 if (type == IW_SCAN_TYPE_PASSIVE) {
6358 IPW_DEBUG_WX("use passive scanning\n");
6359 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6360 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6361 cpu_to_le16(ipw_passive_dwell_time(priv));
6362 ipw_add_scan_channels(priv, &scan, scan_type);
6363 goto send_request;
6366 /* Use active scan by default. */
6367 if (priv->config & CFG_SPEED_SCAN)
6368 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6369 cpu_to_le16(30);
6370 else
6371 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6372 cpu_to_le16(20);
6374 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6375 cpu_to_le16(20);
6377 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6378 cpu_to_le16(ipw_passive_dwell_time(priv));
6379 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6381 #ifdef CONFIG_IPW2200_MONITOR
6382 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6383 u8 channel;
6384 u8 band = 0;
6386 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6387 case IEEE80211_52GHZ_BAND:
6388 band = (u8) (IPW_A_MODE << 6) | 1;
6389 channel = priv->channel;
6390 break;
6392 case IEEE80211_24GHZ_BAND:
6393 band = (u8) (IPW_B_MODE << 6) | 1;
6394 channel = priv->channel;
6395 break;
6397 default:
6398 band = (u8) (IPW_B_MODE << 6) | 1;
6399 channel = 9;
6400 break;
6403 scan.channels_list[0] = band;
6404 scan.channels_list[1] = channel;
6405 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6407 /* NOTE: The card will sit on this channel for this time
6408 * period. Scan aborts are timing sensitive and frequently
6409 * result in firmware restarts. As such, it is best to
6410 * set a small dwell_time here and just keep re-issuing
6411 * scans. Otherwise fast channel hopping will not actually
6412 * hop channels.
6414 * TODO: Move SPEED SCAN support to all modes and bands */
6415 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6416 cpu_to_le16(2000);
6417 } else {
6418 #endif /* CONFIG_IPW2200_MONITOR */
6419 /* Honor direct scans first, otherwise if we are roaming make
6420 * this a direct scan for the current network. Finally,
6421 * ensure that every other scan is a fast channel hop scan */
6422 if (direct) {
6423 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6424 priv->direct_scan_ssid_len);
6425 if (err) {
6426 IPW_DEBUG_HC("Attempt to send SSID command "
6427 "failed\n");
6428 goto done;
6431 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6432 } else if ((priv->status & STATUS_ROAMING)
6433 || (!(priv->status & STATUS_ASSOCIATED)
6434 && (priv->config & CFG_STATIC_ESSID)
6435 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6436 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6437 if (err) {
6438 IPW_DEBUG_HC("Attempt to send SSID command "
6439 "failed.\n");
6440 goto done;
6443 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6444 } else
6445 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6447 ipw_add_scan_channels(priv, &scan, scan_type);
6448 #ifdef CONFIG_IPW2200_MONITOR
6450 #endif
6452 send_request:
6453 err = ipw_send_scan_request_ext(priv, &scan);
6454 if (err) {
6455 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6456 goto done;
6459 priv->status |= STATUS_SCANNING;
6460 if (direct) {
6461 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6462 priv->direct_scan_ssid_len = 0;
6463 } else
6464 priv->status &= ~STATUS_SCAN_PENDING;
6466 queue_delayed_work(priv->workqueue, &priv->scan_check,
6467 IPW_SCAN_CHECK_WATCHDOG);
6468 done:
6469 mutex_unlock(&priv->mutex);
6470 return err;
6473 static void ipw_request_passive_scan(struct work_struct *work)
6475 struct ipw_priv *priv =
6476 container_of(work, struct ipw_priv, request_passive_scan.work);
6477 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6480 static void ipw_request_scan(struct work_struct *work)
6482 struct ipw_priv *priv =
6483 container_of(work, struct ipw_priv, request_scan.work);
6484 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6487 static void ipw_request_direct_scan(struct work_struct *work)
6489 struct ipw_priv *priv =
6490 container_of(work, struct ipw_priv, request_direct_scan.work);
6491 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6494 static void ipw_bg_abort_scan(struct work_struct *work)
6496 struct ipw_priv *priv =
6497 container_of(work, struct ipw_priv, abort_scan);
6498 mutex_lock(&priv->mutex);
6499 ipw_abort_scan(priv);
6500 mutex_unlock(&priv->mutex);
6503 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6505 /* This is called when wpa_supplicant loads and closes the driver
6506 * interface. */
6507 priv->ieee->wpa_enabled = value;
6508 return 0;
6511 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6513 struct ieee80211_device *ieee = priv->ieee;
6514 struct ieee80211_security sec = {
6515 .flags = SEC_AUTH_MODE,
6517 int ret = 0;
6519 if (value & IW_AUTH_ALG_SHARED_KEY) {
6520 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6521 ieee->open_wep = 0;
6522 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6523 sec.auth_mode = WLAN_AUTH_OPEN;
6524 ieee->open_wep = 1;
6525 } else if (value & IW_AUTH_ALG_LEAP) {
6526 sec.auth_mode = WLAN_AUTH_LEAP;
6527 ieee->open_wep = 1;
6528 } else
6529 return -EINVAL;
6531 if (ieee->set_security)
6532 ieee->set_security(ieee->dev, &sec);
6533 else
6534 ret = -EOPNOTSUPP;
6536 return ret;
6539 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6540 int wpa_ie_len)
6542 /* make sure WPA is enabled */
6543 ipw_wpa_enable(priv, 1);
6546 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6547 char *capabilities, int length)
6549 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6551 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6552 capabilities);
6556 * WE-18 support
6559 /* SIOCSIWGENIE */
6560 static int ipw_wx_set_genie(struct net_device *dev,
6561 struct iw_request_info *info,
6562 union iwreq_data *wrqu, char *extra)
6564 struct ipw_priv *priv = ieee80211_priv(dev);
6565 struct ieee80211_device *ieee = priv->ieee;
6566 u8 *buf;
6567 int err = 0;
6569 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6570 (wrqu->data.length && extra == NULL))
6571 return -EINVAL;
6573 if (wrqu->data.length) {
6574 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6575 if (buf == NULL) {
6576 err = -ENOMEM;
6577 goto out;
6580 memcpy(buf, extra, wrqu->data.length);
6581 kfree(ieee->wpa_ie);
6582 ieee->wpa_ie = buf;
6583 ieee->wpa_ie_len = wrqu->data.length;
6584 } else {
6585 kfree(ieee->wpa_ie);
6586 ieee->wpa_ie = NULL;
6587 ieee->wpa_ie_len = 0;
6590 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6591 out:
6592 return err;
6595 /* SIOCGIWGENIE */
6596 static int ipw_wx_get_genie(struct net_device *dev,
6597 struct iw_request_info *info,
6598 union iwreq_data *wrqu, char *extra)
6600 struct ipw_priv *priv = ieee80211_priv(dev);
6601 struct ieee80211_device *ieee = priv->ieee;
6602 int err = 0;
6604 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6605 wrqu->data.length = 0;
6606 goto out;
6609 if (wrqu->data.length < ieee->wpa_ie_len) {
6610 err = -E2BIG;
6611 goto out;
6614 wrqu->data.length = ieee->wpa_ie_len;
6615 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6617 out:
6618 return err;
6621 static int wext_cipher2level(int cipher)
6623 switch (cipher) {
6624 case IW_AUTH_CIPHER_NONE:
6625 return SEC_LEVEL_0;
6626 case IW_AUTH_CIPHER_WEP40:
6627 case IW_AUTH_CIPHER_WEP104:
6628 return SEC_LEVEL_1;
6629 case IW_AUTH_CIPHER_TKIP:
6630 return SEC_LEVEL_2;
6631 case IW_AUTH_CIPHER_CCMP:
6632 return SEC_LEVEL_3;
6633 default:
6634 return -1;
6638 /* SIOCSIWAUTH */
6639 static int ipw_wx_set_auth(struct net_device *dev,
6640 struct iw_request_info *info,
6641 union iwreq_data *wrqu, char *extra)
6643 struct ipw_priv *priv = ieee80211_priv(dev);
6644 struct ieee80211_device *ieee = priv->ieee;
6645 struct iw_param *param = &wrqu->param;
6646 struct lib80211_crypt_data *crypt;
6647 unsigned long flags;
6648 int ret = 0;
6650 switch (param->flags & IW_AUTH_INDEX) {
6651 case IW_AUTH_WPA_VERSION:
6652 break;
6653 case IW_AUTH_CIPHER_PAIRWISE:
6654 ipw_set_hw_decrypt_unicast(priv,
6655 wext_cipher2level(param->value));
6656 break;
6657 case IW_AUTH_CIPHER_GROUP:
6658 ipw_set_hw_decrypt_multicast(priv,
6659 wext_cipher2level(param->value));
6660 break;
6661 case IW_AUTH_KEY_MGMT:
6663 * ipw2200 does not use these parameters
6665 break;
6667 case IW_AUTH_TKIP_COUNTERMEASURES:
6668 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6669 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6670 break;
6672 flags = crypt->ops->get_flags(crypt->priv);
6674 if (param->value)
6675 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6676 else
6677 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6679 crypt->ops->set_flags(flags, crypt->priv);
6681 break;
6683 case IW_AUTH_DROP_UNENCRYPTED:{
6684 /* HACK:
6686 * wpa_supplicant calls set_wpa_enabled when the driver
6687 * is loaded and unloaded, regardless of if WPA is being
6688 * used. No other calls are made which can be used to
6689 * determine if encryption will be used or not prior to
6690 * association being expected. If encryption is not being
6691 * used, drop_unencrypted is set to false, else true -- we
6692 * can use this to determine if the CAP_PRIVACY_ON bit should
6693 * be set.
6695 struct ieee80211_security sec = {
6696 .flags = SEC_ENABLED,
6697 .enabled = param->value,
6699 priv->ieee->drop_unencrypted = param->value;
6700 /* We only change SEC_LEVEL for open mode. Others
6701 * are set by ipw_wpa_set_encryption.
6703 if (!param->value) {
6704 sec.flags |= SEC_LEVEL;
6705 sec.level = SEC_LEVEL_0;
6706 } else {
6707 sec.flags |= SEC_LEVEL;
6708 sec.level = SEC_LEVEL_1;
6710 if (priv->ieee->set_security)
6711 priv->ieee->set_security(priv->ieee->dev, &sec);
6712 break;
6715 case IW_AUTH_80211_AUTH_ALG:
6716 ret = ipw_wpa_set_auth_algs(priv, param->value);
6717 break;
6719 case IW_AUTH_WPA_ENABLED:
6720 ret = ipw_wpa_enable(priv, param->value);
6721 ipw_disassociate(priv);
6722 break;
6724 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6725 ieee->ieee802_1x = param->value;
6726 break;
6728 case IW_AUTH_PRIVACY_INVOKED:
6729 ieee->privacy_invoked = param->value;
6730 break;
6732 default:
6733 return -EOPNOTSUPP;
6735 return ret;
6738 /* SIOCGIWAUTH */
6739 static int ipw_wx_get_auth(struct net_device *dev,
6740 struct iw_request_info *info,
6741 union iwreq_data *wrqu, char *extra)
6743 struct ipw_priv *priv = ieee80211_priv(dev);
6744 struct ieee80211_device *ieee = priv->ieee;
6745 struct lib80211_crypt_data *crypt;
6746 struct iw_param *param = &wrqu->param;
6747 int ret = 0;
6749 switch (param->flags & IW_AUTH_INDEX) {
6750 case IW_AUTH_WPA_VERSION:
6751 case IW_AUTH_CIPHER_PAIRWISE:
6752 case IW_AUTH_CIPHER_GROUP:
6753 case IW_AUTH_KEY_MGMT:
6755 * wpa_supplicant will control these internally
6757 ret = -EOPNOTSUPP;
6758 break;
6760 case IW_AUTH_TKIP_COUNTERMEASURES:
6761 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6762 if (!crypt || !crypt->ops->get_flags)
6763 break;
6765 param->value = (crypt->ops->get_flags(crypt->priv) &
6766 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6768 break;
6770 case IW_AUTH_DROP_UNENCRYPTED:
6771 param->value = ieee->drop_unencrypted;
6772 break;
6774 case IW_AUTH_80211_AUTH_ALG:
6775 param->value = ieee->sec.auth_mode;
6776 break;
6778 case IW_AUTH_WPA_ENABLED:
6779 param->value = ieee->wpa_enabled;
6780 break;
6782 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6783 param->value = ieee->ieee802_1x;
6784 break;
6786 case IW_AUTH_ROAMING_CONTROL:
6787 case IW_AUTH_PRIVACY_INVOKED:
6788 param->value = ieee->privacy_invoked;
6789 break;
6791 default:
6792 return -EOPNOTSUPP;
6794 return 0;
6797 /* SIOCSIWENCODEEXT */
6798 static int ipw_wx_set_encodeext(struct net_device *dev,
6799 struct iw_request_info *info,
6800 union iwreq_data *wrqu, char *extra)
6802 struct ipw_priv *priv = ieee80211_priv(dev);
6803 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6805 if (hwcrypto) {
6806 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6807 /* IPW HW can't build TKIP MIC,
6808 host decryption still needed */
6809 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6810 priv->ieee->host_mc_decrypt = 1;
6811 else {
6812 priv->ieee->host_encrypt = 0;
6813 priv->ieee->host_encrypt_msdu = 1;
6814 priv->ieee->host_decrypt = 1;
6816 } else {
6817 priv->ieee->host_encrypt = 0;
6818 priv->ieee->host_encrypt_msdu = 0;
6819 priv->ieee->host_decrypt = 0;
6820 priv->ieee->host_mc_decrypt = 0;
6824 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6827 /* SIOCGIWENCODEEXT */
6828 static int ipw_wx_get_encodeext(struct net_device *dev,
6829 struct iw_request_info *info,
6830 union iwreq_data *wrqu, char *extra)
6832 struct ipw_priv *priv = ieee80211_priv(dev);
6833 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6836 /* SIOCSIWMLME */
6837 static int ipw_wx_set_mlme(struct net_device *dev,
6838 struct iw_request_info *info,
6839 union iwreq_data *wrqu, char *extra)
6841 struct ipw_priv *priv = ieee80211_priv(dev);
6842 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6843 __le16 reason;
6845 reason = cpu_to_le16(mlme->reason_code);
6847 switch (mlme->cmd) {
6848 case IW_MLME_DEAUTH:
6849 /* silently ignore */
6850 break;
6852 case IW_MLME_DISASSOC:
6853 ipw_disassociate(priv);
6854 break;
6856 default:
6857 return -EOPNOTSUPP;
6859 return 0;
6862 #ifdef CONFIG_IPW2200_QOS
6864 /* QoS */
6866 * get the modulation type of the current network or
6867 * the card current mode
6869 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6871 u8 mode = 0;
6873 if (priv->status & STATUS_ASSOCIATED) {
6874 unsigned long flags;
6876 spin_lock_irqsave(&priv->ieee->lock, flags);
6877 mode = priv->assoc_network->mode;
6878 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6879 } else {
6880 mode = priv->ieee->mode;
6882 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6883 return mode;
6887 * Handle management frame beacon and probe response
6889 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6890 int active_network,
6891 struct ieee80211_network *network)
6893 u32 size = sizeof(struct ieee80211_qos_parameters);
6895 if (network->capability & WLAN_CAPABILITY_IBSS)
6896 network->qos_data.active = network->qos_data.supported;
6898 if (network->flags & NETWORK_HAS_QOS_MASK) {
6899 if (active_network &&
6900 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6901 network->qos_data.active = network->qos_data.supported;
6903 if ((network->qos_data.active == 1) && (active_network == 1) &&
6904 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6905 (network->qos_data.old_param_count !=
6906 network->qos_data.param_count)) {
6907 network->qos_data.old_param_count =
6908 network->qos_data.param_count;
6909 schedule_work(&priv->qos_activate);
6910 IPW_DEBUG_QOS("QoS parameters change call "
6911 "qos_activate\n");
6913 } else {
6914 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6915 memcpy(&network->qos_data.parameters,
6916 &def_parameters_CCK, size);
6917 else
6918 memcpy(&network->qos_data.parameters,
6919 &def_parameters_OFDM, size);
6921 if ((network->qos_data.active == 1) && (active_network == 1)) {
6922 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6923 schedule_work(&priv->qos_activate);
6926 network->qos_data.active = 0;
6927 network->qos_data.supported = 0;
6929 if ((priv->status & STATUS_ASSOCIATED) &&
6930 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6931 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6932 if (network->capability & WLAN_CAPABILITY_IBSS)
6933 if ((network->ssid_len ==
6934 priv->assoc_network->ssid_len) &&
6935 !memcmp(network->ssid,
6936 priv->assoc_network->ssid,
6937 network->ssid_len)) {
6938 queue_work(priv->workqueue,
6939 &priv->merge_networks);
6943 return 0;
6947 * This function set up the firmware to support QoS. It sends
6948 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6950 static int ipw_qos_activate(struct ipw_priv *priv,
6951 struct ieee80211_qos_data *qos_network_data)
6953 int err;
6954 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6955 struct ieee80211_qos_parameters *active_one = NULL;
6956 u32 size = sizeof(struct ieee80211_qos_parameters);
6957 u32 burst_duration;
6958 int i;
6959 u8 type;
6961 type = ipw_qos_current_mode(priv);
6963 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6964 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6965 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6966 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6968 if (qos_network_data == NULL) {
6969 if (type == IEEE_B) {
6970 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6971 active_one = &def_parameters_CCK;
6972 } else
6973 active_one = &def_parameters_OFDM;
6975 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6976 burst_duration = ipw_qos_get_burst_duration(priv);
6977 for (i = 0; i < QOS_QUEUE_NUM; i++)
6978 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6979 cpu_to_le16(burst_duration);
6980 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6981 if (type == IEEE_B) {
6982 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6983 type);
6984 if (priv->qos_data.qos_enable == 0)
6985 active_one = &def_parameters_CCK;
6986 else
6987 active_one = priv->qos_data.def_qos_parm_CCK;
6988 } else {
6989 if (priv->qos_data.qos_enable == 0)
6990 active_one = &def_parameters_OFDM;
6991 else
6992 active_one = priv->qos_data.def_qos_parm_OFDM;
6994 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6995 } else {
6996 unsigned long flags;
6997 int active;
6999 spin_lock_irqsave(&priv->ieee->lock, flags);
7000 active_one = &(qos_network_data->parameters);
7001 qos_network_data->old_param_count =
7002 qos_network_data->param_count;
7003 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7004 active = qos_network_data->supported;
7005 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7007 if (active == 0) {
7008 burst_duration = ipw_qos_get_burst_duration(priv);
7009 for (i = 0; i < QOS_QUEUE_NUM; i++)
7010 qos_parameters[QOS_PARAM_SET_ACTIVE].
7011 tx_op_limit[i] = cpu_to_le16(burst_duration);
7015 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
7016 err = ipw_send_qos_params_command(priv,
7017 (struct ieee80211_qos_parameters *)
7018 &(qos_parameters[0]));
7019 if (err)
7020 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
7022 return err;
7026 * send IPW_CMD_WME_INFO to the firmware
7028 static int ipw_qos_set_info_element(struct ipw_priv *priv)
7030 int ret = 0;
7031 struct ieee80211_qos_information_element qos_info;
7033 if (priv == NULL)
7034 return -1;
7036 qos_info.elementID = QOS_ELEMENT_ID;
7037 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
7039 qos_info.version = QOS_VERSION_1;
7040 qos_info.ac_info = 0;
7042 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7043 qos_info.qui_type = QOS_OUI_TYPE;
7044 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7046 ret = ipw_send_qos_info_command(priv, &qos_info);
7047 if (ret != 0) {
7048 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7050 return ret;
7054 * Set the QoS parameter with the association request structure
7056 static int ipw_qos_association(struct ipw_priv *priv,
7057 struct ieee80211_network *network)
7059 int err = 0;
7060 struct ieee80211_qos_data *qos_data = NULL;
7061 struct ieee80211_qos_data ibss_data = {
7062 .supported = 1,
7063 .active = 1,
7066 switch (priv->ieee->iw_mode) {
7067 case IW_MODE_ADHOC:
7068 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7070 qos_data = &ibss_data;
7071 break;
7073 case IW_MODE_INFRA:
7074 qos_data = &network->qos_data;
7075 break;
7077 default:
7078 BUG();
7079 break;
7082 err = ipw_qos_activate(priv, qos_data);
7083 if (err) {
7084 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7085 return err;
7088 if (priv->qos_data.qos_enable && qos_data->supported) {
7089 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7090 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7091 return ipw_qos_set_info_element(priv);
7094 return 0;
7098 * handling the beaconing responses. if we get different QoS setting
7099 * off the network from the associated setting, adjust the QoS
7100 * setting
7102 static int ipw_qos_association_resp(struct ipw_priv *priv,
7103 struct ieee80211_network *network)
7105 int ret = 0;
7106 unsigned long flags;
7107 u32 size = sizeof(struct ieee80211_qos_parameters);
7108 int set_qos_param = 0;
7110 if ((priv == NULL) || (network == NULL) ||
7111 (priv->assoc_network == NULL))
7112 return ret;
7114 if (!(priv->status & STATUS_ASSOCIATED))
7115 return ret;
7117 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7118 return ret;
7120 spin_lock_irqsave(&priv->ieee->lock, flags);
7121 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7122 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7123 sizeof(struct ieee80211_qos_data));
7124 priv->assoc_network->qos_data.active = 1;
7125 if ((network->qos_data.old_param_count !=
7126 network->qos_data.param_count)) {
7127 set_qos_param = 1;
7128 network->qos_data.old_param_count =
7129 network->qos_data.param_count;
7132 } else {
7133 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7134 memcpy(&priv->assoc_network->qos_data.parameters,
7135 &def_parameters_CCK, size);
7136 else
7137 memcpy(&priv->assoc_network->qos_data.parameters,
7138 &def_parameters_OFDM, size);
7139 priv->assoc_network->qos_data.active = 0;
7140 priv->assoc_network->qos_data.supported = 0;
7141 set_qos_param = 1;
7144 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7146 if (set_qos_param == 1)
7147 schedule_work(&priv->qos_activate);
7149 return ret;
7152 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7154 u32 ret = 0;
7156 if ((priv == NULL))
7157 return 0;
7159 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
7160 ret = priv->qos_data.burst_duration_CCK;
7161 else
7162 ret = priv->qos_data.burst_duration_OFDM;
7164 return ret;
7168 * Initialize the setting of QoS global
7170 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7171 int burst_enable, u32 burst_duration_CCK,
7172 u32 burst_duration_OFDM)
7174 priv->qos_data.qos_enable = enable;
7176 if (priv->qos_data.qos_enable) {
7177 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7178 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7179 IPW_DEBUG_QOS("QoS is enabled\n");
7180 } else {
7181 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7182 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7183 IPW_DEBUG_QOS("QoS is not enabled\n");
7186 priv->qos_data.burst_enable = burst_enable;
7188 if (burst_enable) {
7189 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7190 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7191 } else {
7192 priv->qos_data.burst_duration_CCK = 0;
7193 priv->qos_data.burst_duration_OFDM = 0;
7198 * map the packet priority to the right TX Queue
7200 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7202 if (priority > 7 || !priv->qos_data.qos_enable)
7203 priority = 0;
7205 return from_priority_to_tx_queue[priority] - 1;
7208 static int ipw_is_qos_active(struct net_device *dev,
7209 struct sk_buff *skb)
7211 struct ipw_priv *priv = ieee80211_priv(dev);
7212 struct ieee80211_qos_data *qos_data = NULL;
7213 int active, supported;
7214 u8 *daddr = skb->data + ETH_ALEN;
7215 int unicast = !is_multicast_ether_addr(daddr);
7217 if (!(priv->status & STATUS_ASSOCIATED))
7218 return 0;
7220 qos_data = &priv->assoc_network->qos_data;
7222 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7223 if (unicast == 0)
7224 qos_data->active = 0;
7225 else
7226 qos_data->active = qos_data->supported;
7228 active = qos_data->active;
7229 supported = qos_data->supported;
7230 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7231 "unicast %d\n",
7232 priv->qos_data.qos_enable, active, supported, unicast);
7233 if (active && priv->qos_data.qos_enable)
7234 return 1;
7236 return 0;
7240 * add QoS parameter to the TX command
7242 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7243 u16 priority,
7244 struct tfd_data *tfd)
7246 int tx_queue_id = 0;
7249 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7250 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7252 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7253 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7254 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7256 return 0;
7260 * background support to run QoS activate functionality
7262 static void ipw_bg_qos_activate(struct work_struct *work)
7264 struct ipw_priv *priv =
7265 container_of(work, struct ipw_priv, qos_activate);
7267 if (priv == NULL)
7268 return;
7270 mutex_lock(&priv->mutex);
7272 if (priv->status & STATUS_ASSOCIATED)
7273 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7275 mutex_unlock(&priv->mutex);
7278 static int ipw_handle_probe_response(struct net_device *dev,
7279 struct ieee80211_probe_response *resp,
7280 struct ieee80211_network *network)
7282 struct ipw_priv *priv = ieee80211_priv(dev);
7283 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7284 (network == priv->assoc_network));
7286 ipw_qos_handle_probe_response(priv, active_network, network);
7288 return 0;
7291 static int ipw_handle_beacon(struct net_device *dev,
7292 struct ieee80211_beacon *resp,
7293 struct ieee80211_network *network)
7295 struct ipw_priv *priv = ieee80211_priv(dev);
7296 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7297 (network == priv->assoc_network));
7299 ipw_qos_handle_probe_response(priv, active_network, network);
7301 return 0;
7304 static int ipw_handle_assoc_response(struct net_device *dev,
7305 struct ieee80211_assoc_response *resp,
7306 struct ieee80211_network *network)
7308 struct ipw_priv *priv = ieee80211_priv(dev);
7309 ipw_qos_association_resp(priv, network);
7310 return 0;
7313 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7314 *qos_param)
7316 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7317 sizeof(*qos_param) * 3, qos_param);
7320 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7321 *qos_param)
7323 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7324 qos_param);
7327 #endif /* CONFIG_IPW2200_QOS */
7329 static int ipw_associate_network(struct ipw_priv *priv,
7330 struct ieee80211_network *network,
7331 struct ipw_supported_rates *rates, int roaming)
7333 int err;
7334 DECLARE_SSID_BUF(ssid);
7336 if (priv->config & CFG_FIXED_RATE)
7337 ipw_set_fixed_rate(priv, network->mode);
7339 if (!(priv->config & CFG_STATIC_ESSID)) {
7340 priv->essid_len = min(network->ssid_len,
7341 (u8) IW_ESSID_MAX_SIZE);
7342 memcpy(priv->essid, network->ssid, priv->essid_len);
7345 network->last_associate = jiffies;
7347 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7348 priv->assoc_request.channel = network->channel;
7349 priv->assoc_request.auth_key = 0;
7351 if ((priv->capability & CAP_PRIVACY_ON) &&
7352 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7353 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7354 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7356 if (priv->ieee->sec.level == SEC_LEVEL_1)
7357 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7359 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7360 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7361 priv->assoc_request.auth_type = AUTH_LEAP;
7362 else
7363 priv->assoc_request.auth_type = AUTH_OPEN;
7365 if (priv->ieee->wpa_ie_len) {
7366 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7367 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7368 priv->ieee->wpa_ie_len);
7372 * It is valid for our ieee device to support multiple modes, but
7373 * when it comes to associating to a given network we have to choose
7374 * just one mode.
7376 if (network->mode & priv->ieee->mode & IEEE_A)
7377 priv->assoc_request.ieee_mode = IPW_A_MODE;
7378 else if (network->mode & priv->ieee->mode & IEEE_G)
7379 priv->assoc_request.ieee_mode = IPW_G_MODE;
7380 else if (network->mode & priv->ieee->mode & IEEE_B)
7381 priv->assoc_request.ieee_mode = IPW_B_MODE;
7383 priv->assoc_request.capability = cpu_to_le16(network->capability);
7384 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7385 && !(priv->config & CFG_PREAMBLE_LONG)) {
7386 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7387 } else {
7388 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7390 /* Clear the short preamble if we won't be supporting it */
7391 priv->assoc_request.capability &=
7392 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7395 /* Clear capability bits that aren't used in Ad Hoc */
7396 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7397 priv->assoc_request.capability &=
7398 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7400 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7401 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7402 roaming ? "Rea" : "A",
7403 print_ssid(ssid, priv->essid, priv->essid_len),
7404 network->channel,
7405 ipw_modes[priv->assoc_request.ieee_mode],
7406 rates->num_rates,
7407 (priv->assoc_request.preamble_length ==
7408 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7409 network->capability &
7410 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7411 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7412 priv->capability & CAP_PRIVACY_ON ?
7413 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7414 "(open)") : "",
7415 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7416 priv->capability & CAP_PRIVACY_ON ?
7417 '1' + priv->ieee->sec.active_key : '.',
7418 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7420 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7421 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7422 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7423 priv->assoc_request.assoc_type = HC_IBSS_START;
7424 priv->assoc_request.assoc_tsf_msw = 0;
7425 priv->assoc_request.assoc_tsf_lsw = 0;
7426 } else {
7427 if (unlikely(roaming))
7428 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7429 else
7430 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7431 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7432 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7435 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7437 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7438 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7439 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7440 } else {
7441 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7442 priv->assoc_request.atim_window = 0;
7445 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7447 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7448 if (err) {
7449 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7450 return err;
7453 rates->ieee_mode = priv->assoc_request.ieee_mode;
7454 rates->purpose = IPW_RATE_CONNECT;
7455 ipw_send_supported_rates(priv, rates);
7457 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7458 priv->sys_config.dot11g_auto_detection = 1;
7459 else
7460 priv->sys_config.dot11g_auto_detection = 0;
7462 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7463 priv->sys_config.answer_broadcast_ssid_probe = 1;
7464 else
7465 priv->sys_config.answer_broadcast_ssid_probe = 0;
7467 err = ipw_send_system_config(priv);
7468 if (err) {
7469 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7470 return err;
7473 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7474 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7475 if (err) {
7476 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7477 return err;
7481 * If preemption is enabled, it is possible for the association
7482 * to complete before we return from ipw_send_associate. Therefore
7483 * we have to be sure and update our priviate data first.
7485 priv->channel = network->channel;
7486 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7487 priv->status |= STATUS_ASSOCIATING;
7488 priv->status &= ~STATUS_SECURITY_UPDATED;
7490 priv->assoc_network = network;
7492 #ifdef CONFIG_IPW2200_QOS
7493 ipw_qos_association(priv, network);
7494 #endif
7496 err = ipw_send_associate(priv, &priv->assoc_request);
7497 if (err) {
7498 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7499 return err;
7502 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n",
7503 print_ssid(ssid, priv->essid, priv->essid_len),
7504 priv->bssid);
7506 return 0;
7509 static void ipw_roam(void *data)
7511 struct ipw_priv *priv = data;
7512 struct ieee80211_network *network = NULL;
7513 struct ipw_network_match match = {
7514 .network = priv->assoc_network
7517 /* The roaming process is as follows:
7519 * 1. Missed beacon threshold triggers the roaming process by
7520 * setting the status ROAM bit and requesting a scan.
7521 * 2. When the scan completes, it schedules the ROAM work
7522 * 3. The ROAM work looks at all of the known networks for one that
7523 * is a better network than the currently associated. If none
7524 * found, the ROAM process is over (ROAM bit cleared)
7525 * 4. If a better network is found, a disassociation request is
7526 * sent.
7527 * 5. When the disassociation completes, the roam work is again
7528 * scheduled. The second time through, the driver is no longer
7529 * associated, and the newly selected network is sent an
7530 * association request.
7531 * 6. At this point ,the roaming process is complete and the ROAM
7532 * status bit is cleared.
7535 /* If we are no longer associated, and the roaming bit is no longer
7536 * set, then we are not actively roaming, so just return */
7537 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7538 return;
7540 if (priv->status & STATUS_ASSOCIATED) {
7541 /* First pass through ROAM process -- look for a better
7542 * network */
7543 unsigned long flags;
7544 u8 rssi = priv->assoc_network->stats.rssi;
7545 priv->assoc_network->stats.rssi = -128;
7546 spin_lock_irqsave(&priv->ieee->lock, flags);
7547 list_for_each_entry(network, &priv->ieee->network_list, list) {
7548 if (network != priv->assoc_network)
7549 ipw_best_network(priv, &match, network, 1);
7551 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7552 priv->assoc_network->stats.rssi = rssi;
7554 if (match.network == priv->assoc_network) {
7555 IPW_DEBUG_ASSOC("No better APs in this network to "
7556 "roam to.\n");
7557 priv->status &= ~STATUS_ROAMING;
7558 ipw_debug_config(priv);
7559 return;
7562 ipw_send_disassociate(priv, 1);
7563 priv->assoc_network = match.network;
7565 return;
7568 /* Second pass through ROAM process -- request association */
7569 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7570 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7571 priv->status &= ~STATUS_ROAMING;
7574 static void ipw_bg_roam(struct work_struct *work)
7576 struct ipw_priv *priv =
7577 container_of(work, struct ipw_priv, roam);
7578 mutex_lock(&priv->mutex);
7579 ipw_roam(priv);
7580 mutex_unlock(&priv->mutex);
7583 static int ipw_associate(void *data)
7585 struct ipw_priv *priv = data;
7587 struct ieee80211_network *network = NULL;
7588 struct ipw_network_match match = {
7589 .network = NULL
7591 struct ipw_supported_rates *rates;
7592 struct list_head *element;
7593 unsigned long flags;
7594 DECLARE_SSID_BUF(ssid);
7596 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7597 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7598 return 0;
7601 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7602 IPW_DEBUG_ASSOC("Not attempting association (already in "
7603 "progress)\n");
7604 return 0;
7607 if (priv->status & STATUS_DISASSOCIATING) {
7608 IPW_DEBUG_ASSOC("Not attempting association (in "
7609 "disassociating)\n ");
7610 queue_work(priv->workqueue, &priv->associate);
7611 return 0;
7614 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7615 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7616 "initialized)\n");
7617 return 0;
7620 if (!(priv->config & CFG_ASSOCIATE) &&
7621 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7622 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7623 return 0;
7626 /* Protect our use of the network_list */
7627 spin_lock_irqsave(&priv->ieee->lock, flags);
7628 list_for_each_entry(network, &priv->ieee->network_list, list)
7629 ipw_best_network(priv, &match, network, 0);
7631 network = match.network;
7632 rates = &match.rates;
7634 if (network == NULL &&
7635 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7636 priv->config & CFG_ADHOC_CREATE &&
7637 priv->config & CFG_STATIC_ESSID &&
7638 priv->config & CFG_STATIC_CHANNEL) {
7639 /* Use oldest network if the free list is empty */
7640 if (list_empty(&priv->ieee->network_free_list)) {
7641 struct ieee80211_network *oldest = NULL;
7642 struct ieee80211_network *target;
7644 list_for_each_entry(target, &priv->ieee->network_list, list) {
7645 if ((oldest == NULL) ||
7646 (target->last_scanned < oldest->last_scanned))
7647 oldest = target;
7650 /* If there are no more slots, expire the oldest */
7651 list_del(&oldest->list);
7652 target = oldest;
7653 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7654 "network list.\n",
7655 print_ssid(ssid, target->ssid,
7656 target->ssid_len),
7657 target->bssid);
7658 list_add_tail(&target->list,
7659 &priv->ieee->network_free_list);
7662 element = priv->ieee->network_free_list.next;
7663 network = list_entry(element, struct ieee80211_network, list);
7664 ipw_adhoc_create(priv, network);
7665 rates = &priv->rates;
7666 list_del(element);
7667 list_add_tail(&network->list, &priv->ieee->network_list);
7669 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7671 /* If we reached the end of the list, then we don't have any valid
7672 * matching APs */
7673 if (!network) {
7674 ipw_debug_config(priv);
7676 if (!(priv->status & STATUS_SCANNING)) {
7677 if (!(priv->config & CFG_SPEED_SCAN))
7678 queue_delayed_work(priv->workqueue,
7679 &priv->request_scan,
7680 SCAN_INTERVAL);
7681 else
7682 queue_delayed_work(priv->workqueue,
7683 &priv->request_scan, 0);
7686 return 0;
7689 ipw_associate_network(priv, network, rates, 0);
7691 return 1;
7694 static void ipw_bg_associate(struct work_struct *work)
7696 struct ipw_priv *priv =
7697 container_of(work, struct ipw_priv, associate);
7698 mutex_lock(&priv->mutex);
7699 ipw_associate(priv);
7700 mutex_unlock(&priv->mutex);
7703 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7704 struct sk_buff *skb)
7706 struct ieee80211_hdr *hdr;
7707 u16 fc;
7709 hdr = (struct ieee80211_hdr *)skb->data;
7710 fc = le16_to_cpu(hdr->frame_control);
7711 if (!(fc & IEEE80211_FCTL_PROTECTED))
7712 return;
7714 fc &= ~IEEE80211_FCTL_PROTECTED;
7715 hdr->frame_control = cpu_to_le16(fc);
7716 switch (priv->ieee->sec.level) {
7717 case SEC_LEVEL_3:
7718 /* Remove CCMP HDR */
7719 memmove(skb->data + IEEE80211_3ADDR_LEN,
7720 skb->data + IEEE80211_3ADDR_LEN + 8,
7721 skb->len - IEEE80211_3ADDR_LEN - 8);
7722 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7723 break;
7724 case SEC_LEVEL_2:
7725 break;
7726 case SEC_LEVEL_1:
7727 /* Remove IV */
7728 memmove(skb->data + IEEE80211_3ADDR_LEN,
7729 skb->data + IEEE80211_3ADDR_LEN + 4,
7730 skb->len - IEEE80211_3ADDR_LEN - 4);
7731 skb_trim(skb, skb->len - 8); /* IV + ICV */
7732 break;
7733 case SEC_LEVEL_0:
7734 break;
7735 default:
7736 printk(KERN_ERR "Unknow security level %d\n",
7737 priv->ieee->sec.level);
7738 break;
7742 static void ipw_handle_data_packet(struct ipw_priv *priv,
7743 struct ipw_rx_mem_buffer *rxb,
7744 struct ieee80211_rx_stats *stats)
7746 struct net_device *dev = priv->net_dev;
7747 struct ieee80211_hdr_4addr *hdr;
7748 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7750 /* We received data from the HW, so stop the watchdog */
7751 dev->trans_start = jiffies;
7753 /* We only process data packets if the
7754 * interface is open */
7755 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7756 skb_tailroom(rxb->skb))) {
7757 dev->stats.rx_errors++;
7758 priv->wstats.discard.misc++;
7759 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7760 return;
7761 } else if (unlikely(!netif_running(priv->net_dev))) {
7762 dev->stats.rx_dropped++;
7763 priv->wstats.discard.misc++;
7764 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7765 return;
7768 /* Advance skb->data to the start of the actual payload */
7769 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7771 /* Set the size of the skb to the size of the frame */
7772 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7774 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7776 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7777 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7778 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7779 (is_multicast_ether_addr(hdr->addr1) ?
7780 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7781 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7783 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7784 dev->stats.rx_errors++;
7785 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7786 rxb->skb = NULL;
7787 __ipw_led_activity_on(priv);
7791 #ifdef CONFIG_IPW2200_RADIOTAP
7792 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7793 struct ipw_rx_mem_buffer *rxb,
7794 struct ieee80211_rx_stats *stats)
7796 struct net_device *dev = priv->net_dev;
7797 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7798 struct ipw_rx_frame *frame = &pkt->u.frame;
7800 /* initial pull of some data */
7801 u16 received_channel = frame->received_channel;
7802 u8 antennaAndPhy = frame->antennaAndPhy;
7803 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7804 u16 pktrate = frame->rate;
7806 /* Magic struct that slots into the radiotap header -- no reason
7807 * to build this manually element by element, we can write it much
7808 * more efficiently than we can parse it. ORDER MATTERS HERE */
7809 struct ipw_rt_hdr *ipw_rt;
7811 short len = le16_to_cpu(pkt->u.frame.length);
7813 /* We received data from the HW, so stop the watchdog */
7814 dev->trans_start = jiffies;
7816 /* We only process data packets if the
7817 * interface is open */
7818 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7819 skb_tailroom(rxb->skb))) {
7820 dev->stats.rx_errors++;
7821 priv->wstats.discard.misc++;
7822 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7823 return;
7824 } else if (unlikely(!netif_running(priv->net_dev))) {
7825 dev->stats.rx_dropped++;
7826 priv->wstats.discard.misc++;
7827 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7828 return;
7831 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7832 * that now */
7833 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7834 /* FIXME: Should alloc bigger skb instead */
7835 dev->stats.rx_dropped++;
7836 priv->wstats.discard.misc++;
7837 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7838 return;
7841 /* copy the frame itself */
7842 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7843 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7845 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7847 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7848 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7849 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7851 /* Big bitfield of all the fields we provide in radiotap */
7852 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7853 (1 << IEEE80211_RADIOTAP_TSFT) |
7854 (1 << IEEE80211_RADIOTAP_FLAGS) |
7855 (1 << IEEE80211_RADIOTAP_RATE) |
7856 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7857 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7858 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7859 (1 << IEEE80211_RADIOTAP_ANTENNA));
7861 /* Zero the flags, we'll add to them as we go */
7862 ipw_rt->rt_flags = 0;
7863 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7864 frame->parent_tsf[2] << 16 |
7865 frame->parent_tsf[1] << 8 |
7866 frame->parent_tsf[0]);
7868 /* Convert signal to DBM */
7869 ipw_rt->rt_dbmsignal = antsignal;
7870 ipw_rt->rt_dbmnoise = frame->noise;
7872 /* Convert the channel data and set the flags */
7873 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7874 if (received_channel > 14) { /* 802.11a */
7875 ipw_rt->rt_chbitmask =
7876 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7877 } else if (antennaAndPhy & 32) { /* 802.11b */
7878 ipw_rt->rt_chbitmask =
7879 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7880 } else { /* 802.11g */
7881 ipw_rt->rt_chbitmask =
7882 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7885 /* set the rate in multiples of 500k/s */
7886 switch (pktrate) {
7887 case IPW_TX_RATE_1MB:
7888 ipw_rt->rt_rate = 2;
7889 break;
7890 case IPW_TX_RATE_2MB:
7891 ipw_rt->rt_rate = 4;
7892 break;
7893 case IPW_TX_RATE_5MB:
7894 ipw_rt->rt_rate = 10;
7895 break;
7896 case IPW_TX_RATE_6MB:
7897 ipw_rt->rt_rate = 12;
7898 break;
7899 case IPW_TX_RATE_9MB:
7900 ipw_rt->rt_rate = 18;
7901 break;
7902 case IPW_TX_RATE_11MB:
7903 ipw_rt->rt_rate = 22;
7904 break;
7905 case IPW_TX_RATE_12MB:
7906 ipw_rt->rt_rate = 24;
7907 break;
7908 case IPW_TX_RATE_18MB:
7909 ipw_rt->rt_rate = 36;
7910 break;
7911 case IPW_TX_RATE_24MB:
7912 ipw_rt->rt_rate = 48;
7913 break;
7914 case IPW_TX_RATE_36MB:
7915 ipw_rt->rt_rate = 72;
7916 break;
7917 case IPW_TX_RATE_48MB:
7918 ipw_rt->rt_rate = 96;
7919 break;
7920 case IPW_TX_RATE_54MB:
7921 ipw_rt->rt_rate = 108;
7922 break;
7923 default:
7924 ipw_rt->rt_rate = 0;
7925 break;
7928 /* antenna number */
7929 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7931 /* set the preamble flag if we have it */
7932 if ((antennaAndPhy & 64))
7933 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7935 /* Set the size of the skb to the size of the frame */
7936 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7938 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7940 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7941 dev->stats.rx_errors++;
7942 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7943 rxb->skb = NULL;
7944 /* no LED during capture */
7947 #endif
7949 #ifdef CONFIG_IPW2200_PROMISCUOUS
7950 #define ieee80211_is_probe_response(fc) \
7951 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7952 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7954 #define ieee80211_is_management(fc) \
7955 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7957 #define ieee80211_is_control(fc) \
7958 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7960 #define ieee80211_is_data(fc) \
7961 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7963 #define ieee80211_is_assoc_request(fc) \
7964 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7966 #define ieee80211_is_reassoc_request(fc) \
7967 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7969 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7970 struct ipw_rx_mem_buffer *rxb,
7971 struct ieee80211_rx_stats *stats)
7973 struct net_device *dev = priv->prom_net_dev;
7974 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7975 struct ipw_rx_frame *frame = &pkt->u.frame;
7976 struct ipw_rt_hdr *ipw_rt;
7978 /* First cache any information we need before we overwrite
7979 * the information provided in the skb from the hardware */
7980 struct ieee80211_hdr *hdr;
7981 u16 channel = frame->received_channel;
7982 u8 phy_flags = frame->antennaAndPhy;
7983 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7984 s8 noise = frame->noise;
7985 u8 rate = frame->rate;
7986 short len = le16_to_cpu(pkt->u.frame.length);
7987 struct sk_buff *skb;
7988 int hdr_only = 0;
7989 u16 filter = priv->prom_priv->filter;
7991 /* If the filter is set to not include Rx frames then return */
7992 if (filter & IPW_PROM_NO_RX)
7993 return;
7995 /* We received data from the HW, so stop the watchdog */
7996 dev->trans_start = jiffies;
7998 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7999 dev->stats.rx_errors++;
8000 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
8001 return;
8004 /* We only process data packets if the interface is open */
8005 if (unlikely(!netif_running(dev))) {
8006 dev->stats.rx_dropped++;
8007 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
8008 return;
8011 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
8012 * that now */
8013 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
8014 /* FIXME: Should alloc bigger skb instead */
8015 dev->stats.rx_dropped++;
8016 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
8017 return;
8020 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
8021 if (ieee80211_is_management(le16_to_cpu(hdr->frame_control))) {
8022 if (filter & IPW_PROM_NO_MGMT)
8023 return;
8024 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
8025 hdr_only = 1;
8026 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_control))) {
8027 if (filter & IPW_PROM_NO_CTL)
8028 return;
8029 if (filter & IPW_PROM_CTL_HEADER_ONLY)
8030 hdr_only = 1;
8031 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_control))) {
8032 if (filter & IPW_PROM_NO_DATA)
8033 return;
8034 if (filter & IPW_PROM_DATA_HEADER_ONLY)
8035 hdr_only = 1;
8038 /* Copy the SKB since this is for the promiscuous side */
8039 skb = skb_copy(rxb->skb, GFP_ATOMIC);
8040 if (skb == NULL) {
8041 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8042 return;
8045 /* copy the frame data to write after where the radiotap header goes */
8046 ipw_rt = (void *)skb->data;
8048 if (hdr_only)
8049 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
8051 memcpy(ipw_rt->payload, hdr, len);
8053 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8054 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8055 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
8057 /* Set the size of the skb to the size of the frame */
8058 skb_put(skb, sizeof(*ipw_rt) + len);
8060 /* Big bitfield of all the fields we provide in radiotap */
8061 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8062 (1 << IEEE80211_RADIOTAP_TSFT) |
8063 (1 << IEEE80211_RADIOTAP_FLAGS) |
8064 (1 << IEEE80211_RADIOTAP_RATE) |
8065 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8066 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8067 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8068 (1 << IEEE80211_RADIOTAP_ANTENNA));
8070 /* Zero the flags, we'll add to them as we go */
8071 ipw_rt->rt_flags = 0;
8072 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8073 frame->parent_tsf[2] << 16 |
8074 frame->parent_tsf[1] << 8 |
8075 frame->parent_tsf[0]);
8077 /* Convert to DBM */
8078 ipw_rt->rt_dbmsignal = signal;
8079 ipw_rt->rt_dbmnoise = noise;
8081 /* Convert the channel data and set the flags */
8082 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8083 if (channel > 14) { /* 802.11a */
8084 ipw_rt->rt_chbitmask =
8085 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8086 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8087 ipw_rt->rt_chbitmask =
8088 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8089 } else { /* 802.11g */
8090 ipw_rt->rt_chbitmask =
8091 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8094 /* set the rate in multiples of 500k/s */
8095 switch (rate) {
8096 case IPW_TX_RATE_1MB:
8097 ipw_rt->rt_rate = 2;
8098 break;
8099 case IPW_TX_RATE_2MB:
8100 ipw_rt->rt_rate = 4;
8101 break;
8102 case IPW_TX_RATE_5MB:
8103 ipw_rt->rt_rate = 10;
8104 break;
8105 case IPW_TX_RATE_6MB:
8106 ipw_rt->rt_rate = 12;
8107 break;
8108 case IPW_TX_RATE_9MB:
8109 ipw_rt->rt_rate = 18;
8110 break;
8111 case IPW_TX_RATE_11MB:
8112 ipw_rt->rt_rate = 22;
8113 break;
8114 case IPW_TX_RATE_12MB:
8115 ipw_rt->rt_rate = 24;
8116 break;
8117 case IPW_TX_RATE_18MB:
8118 ipw_rt->rt_rate = 36;
8119 break;
8120 case IPW_TX_RATE_24MB:
8121 ipw_rt->rt_rate = 48;
8122 break;
8123 case IPW_TX_RATE_36MB:
8124 ipw_rt->rt_rate = 72;
8125 break;
8126 case IPW_TX_RATE_48MB:
8127 ipw_rt->rt_rate = 96;
8128 break;
8129 case IPW_TX_RATE_54MB:
8130 ipw_rt->rt_rate = 108;
8131 break;
8132 default:
8133 ipw_rt->rt_rate = 0;
8134 break;
8137 /* antenna number */
8138 ipw_rt->rt_antenna = (phy_flags & 3);
8140 /* set the preamble flag if we have it */
8141 if (phy_flags & (1 << 6))
8142 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8144 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8146 if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
8147 dev->stats.rx_errors++;
8148 dev_kfree_skb_any(skb);
8151 #endif
8153 static int is_network_packet(struct ipw_priv *priv,
8154 struct ieee80211_hdr_4addr *header)
8156 /* Filter incoming packets to determine if they are targetted toward
8157 * this network, discarding packets coming from ourselves */
8158 switch (priv->ieee->iw_mode) {
8159 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8160 /* packets from our adapter are dropped (echo) */
8161 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8162 return 0;
8164 /* {broad,multi}cast packets to our BSSID go through */
8165 if (is_multicast_ether_addr(header->addr1))
8166 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8168 /* packets to our adapter go through */
8169 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8170 ETH_ALEN);
8172 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8173 /* packets from our adapter are dropped (echo) */
8174 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8175 return 0;
8177 /* {broad,multi}cast packets to our BSS go through */
8178 if (is_multicast_ether_addr(header->addr1))
8179 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8181 /* packets to our adapter go through */
8182 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8183 ETH_ALEN);
8186 return 1;
8189 #define IPW_PACKET_RETRY_TIME HZ
8191 static int is_duplicate_packet(struct ipw_priv *priv,
8192 struct ieee80211_hdr_4addr *header)
8194 u16 sc = le16_to_cpu(header->seq_ctl);
8195 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8196 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8197 u16 *last_seq, *last_frag;
8198 unsigned long *last_time;
8200 switch (priv->ieee->iw_mode) {
8201 case IW_MODE_ADHOC:
8203 struct list_head *p;
8204 struct ipw_ibss_seq *entry = NULL;
8205 u8 *mac = header->addr2;
8206 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8208 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8209 entry =
8210 list_entry(p, struct ipw_ibss_seq, list);
8211 if (!memcmp(entry->mac, mac, ETH_ALEN))
8212 break;
8214 if (p == &priv->ibss_mac_hash[index]) {
8215 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8216 if (!entry) {
8217 IPW_ERROR
8218 ("Cannot malloc new mac entry\n");
8219 return 0;
8221 memcpy(entry->mac, mac, ETH_ALEN);
8222 entry->seq_num = seq;
8223 entry->frag_num = frag;
8224 entry->packet_time = jiffies;
8225 list_add(&entry->list,
8226 &priv->ibss_mac_hash[index]);
8227 return 0;
8229 last_seq = &entry->seq_num;
8230 last_frag = &entry->frag_num;
8231 last_time = &entry->packet_time;
8232 break;
8234 case IW_MODE_INFRA:
8235 last_seq = &priv->last_seq_num;
8236 last_frag = &priv->last_frag_num;
8237 last_time = &priv->last_packet_time;
8238 break;
8239 default:
8240 return 0;
8242 if ((*last_seq == seq) &&
8243 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8244 if (*last_frag == frag)
8245 goto drop;
8246 if (*last_frag + 1 != frag)
8247 /* out-of-order fragment */
8248 goto drop;
8249 } else
8250 *last_seq = seq;
8252 *last_frag = frag;
8253 *last_time = jiffies;
8254 return 0;
8256 drop:
8257 /* Comment this line now since we observed the card receives
8258 * duplicate packets but the FCTL_RETRY bit is not set in the
8259 * IBSS mode with fragmentation enabled.
8260 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8261 return 1;
8264 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8265 struct ipw_rx_mem_buffer *rxb,
8266 struct ieee80211_rx_stats *stats)
8268 struct sk_buff *skb = rxb->skb;
8269 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8270 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8271 (skb->data + IPW_RX_FRAME_SIZE);
8273 ieee80211_rx_mgt(priv->ieee, header, stats);
8275 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8276 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8277 IEEE80211_STYPE_PROBE_RESP) ||
8278 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8279 IEEE80211_STYPE_BEACON))) {
8280 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8281 ipw_add_station(priv, header->addr2);
8284 if (priv->config & CFG_NET_STATS) {
8285 IPW_DEBUG_HC("sending stat packet\n");
8287 /* Set the size of the skb to the size of the full
8288 * ipw header and 802.11 frame */
8289 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8290 IPW_RX_FRAME_SIZE);
8292 /* Advance past the ipw packet header to the 802.11 frame */
8293 skb_pull(skb, IPW_RX_FRAME_SIZE);
8295 /* Push the ieee80211_rx_stats before the 802.11 frame */
8296 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8298 skb->dev = priv->ieee->dev;
8300 /* Point raw at the ieee80211_stats */
8301 skb_reset_mac_header(skb);
8303 skb->pkt_type = PACKET_OTHERHOST;
8304 skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8305 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8306 netif_rx(skb);
8307 rxb->skb = NULL;
8312 * Main entry function for recieving a packet with 80211 headers. This
8313 * should be called when ever the FW has notified us that there is a new
8314 * skb in the recieve queue.
8316 static void ipw_rx(struct ipw_priv *priv)
8318 struct ipw_rx_mem_buffer *rxb;
8319 struct ipw_rx_packet *pkt;
8320 struct ieee80211_hdr_4addr *header;
8321 u32 r, w, i;
8322 u8 network_packet;
8323 u8 fill_rx = 0;
8325 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8326 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8327 i = priv->rxq->read;
8329 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8330 fill_rx = 1;
8332 while (i != r) {
8333 rxb = priv->rxq->queue[i];
8334 if (unlikely(rxb == NULL)) {
8335 printk(KERN_CRIT "Queue not allocated!\n");
8336 break;
8338 priv->rxq->queue[i] = NULL;
8340 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8341 IPW_RX_BUF_SIZE,
8342 PCI_DMA_FROMDEVICE);
8344 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8345 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8346 pkt->header.message_type,
8347 pkt->header.rx_seq_num, pkt->header.control_bits);
8349 switch (pkt->header.message_type) {
8350 case RX_FRAME_TYPE: /* 802.11 frame */ {
8351 struct ieee80211_rx_stats stats = {
8352 .rssi = pkt->u.frame.rssi_dbm -
8353 IPW_RSSI_TO_DBM,
8354 .signal =
8355 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8356 IPW_RSSI_TO_DBM + 0x100,
8357 .noise =
8358 le16_to_cpu(pkt->u.frame.noise),
8359 .rate = pkt->u.frame.rate,
8360 .mac_time = jiffies,
8361 .received_channel =
8362 pkt->u.frame.received_channel,
8363 .freq =
8364 (pkt->u.frame.
8365 control & (1 << 0)) ?
8366 IEEE80211_24GHZ_BAND :
8367 IEEE80211_52GHZ_BAND,
8368 .len = le16_to_cpu(pkt->u.frame.length),
8371 if (stats.rssi != 0)
8372 stats.mask |= IEEE80211_STATMASK_RSSI;
8373 if (stats.signal != 0)
8374 stats.mask |= IEEE80211_STATMASK_SIGNAL;
8375 if (stats.noise != 0)
8376 stats.mask |= IEEE80211_STATMASK_NOISE;
8377 if (stats.rate != 0)
8378 stats.mask |= IEEE80211_STATMASK_RATE;
8380 priv->rx_packets++;
8382 #ifdef CONFIG_IPW2200_PROMISCUOUS
8383 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8384 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8385 #endif
8387 #ifdef CONFIG_IPW2200_MONITOR
8388 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8389 #ifdef CONFIG_IPW2200_RADIOTAP
8391 ipw_handle_data_packet_monitor(priv,
8392 rxb,
8393 &stats);
8394 #else
8395 ipw_handle_data_packet(priv, rxb,
8396 &stats);
8397 #endif
8398 break;
8400 #endif
8402 header =
8403 (struct ieee80211_hdr_4addr *)(rxb->skb->
8404 data +
8405 IPW_RX_FRAME_SIZE);
8406 /* TODO: Check Ad-Hoc dest/source and make sure
8407 * that we are actually parsing these packets
8408 * correctly -- we should probably use the
8409 * frame control of the packet and disregard
8410 * the current iw_mode */
8412 network_packet =
8413 is_network_packet(priv, header);
8414 if (network_packet && priv->assoc_network) {
8415 priv->assoc_network->stats.rssi =
8416 stats.rssi;
8417 priv->exp_avg_rssi =
8418 exponential_average(priv->exp_avg_rssi,
8419 stats.rssi, DEPTH_RSSI);
8422 IPW_DEBUG_RX("Frame: len=%u\n",
8423 le16_to_cpu(pkt->u.frame.length));
8425 if (le16_to_cpu(pkt->u.frame.length) <
8426 ieee80211_get_hdrlen(le16_to_cpu(
8427 header->frame_ctl))) {
8428 IPW_DEBUG_DROP
8429 ("Received packet is too small. "
8430 "Dropping.\n");
8431 priv->net_dev->stats.rx_errors++;
8432 priv->wstats.discard.misc++;
8433 break;
8436 switch (WLAN_FC_GET_TYPE
8437 (le16_to_cpu(header->frame_ctl))) {
8439 case IEEE80211_FTYPE_MGMT:
8440 ipw_handle_mgmt_packet(priv, rxb,
8441 &stats);
8442 break;
8444 case IEEE80211_FTYPE_CTL:
8445 break;
8447 case IEEE80211_FTYPE_DATA:
8448 if (unlikely(!network_packet ||
8449 is_duplicate_packet(priv,
8450 header)))
8452 IPW_DEBUG_DROP("Dropping: "
8453 "%pM, "
8454 "%pM, "
8455 "%pM\n",
8456 header->addr1,
8457 header->addr2,
8458 header->addr3);
8459 break;
8462 ipw_handle_data_packet(priv, rxb,
8463 &stats);
8465 break;
8467 break;
8470 case RX_HOST_NOTIFICATION_TYPE:{
8471 IPW_DEBUG_RX
8472 ("Notification: subtype=%02X flags=%02X size=%d\n",
8473 pkt->u.notification.subtype,
8474 pkt->u.notification.flags,
8475 le16_to_cpu(pkt->u.notification.size));
8476 ipw_rx_notification(priv, &pkt->u.notification);
8477 break;
8480 default:
8481 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8482 pkt->header.message_type);
8483 break;
8486 /* For now we just don't re-use anything. We can tweak this
8487 * later to try and re-use notification packets and SKBs that
8488 * fail to Rx correctly */
8489 if (rxb->skb != NULL) {
8490 dev_kfree_skb_any(rxb->skb);
8491 rxb->skb = NULL;
8494 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8495 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8496 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8498 i = (i + 1) % RX_QUEUE_SIZE;
8500 /* If there are a lot of unsued frames, restock the Rx queue
8501 * so the ucode won't assert */
8502 if (fill_rx) {
8503 priv->rxq->read = i;
8504 ipw_rx_queue_replenish(priv);
8508 /* Backtrack one entry */
8509 priv->rxq->read = i;
8510 ipw_rx_queue_restock(priv);
8513 #define DEFAULT_RTS_THRESHOLD 2304U
8514 #define MIN_RTS_THRESHOLD 1U
8515 #define MAX_RTS_THRESHOLD 2304U
8516 #define DEFAULT_BEACON_INTERVAL 100U
8517 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8518 #define DEFAULT_LONG_RETRY_LIMIT 4U
8521 * ipw_sw_reset
8522 * @option: options to control different reset behaviour
8523 * 0 = reset everything except the 'disable' module_param
8524 * 1 = reset everything and print out driver info (for probe only)
8525 * 2 = reset everything
8527 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8529 int band, modulation;
8530 int old_mode = priv->ieee->iw_mode;
8532 /* Initialize module parameter values here */
8533 priv->config = 0;
8535 /* We default to disabling the LED code as right now it causes
8536 * too many systems to lock up... */
8537 if (!led)
8538 priv->config |= CFG_NO_LED;
8540 if (associate)
8541 priv->config |= CFG_ASSOCIATE;
8542 else
8543 IPW_DEBUG_INFO("Auto associate disabled.\n");
8545 if (auto_create)
8546 priv->config |= CFG_ADHOC_CREATE;
8547 else
8548 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8550 priv->config &= ~CFG_STATIC_ESSID;
8551 priv->essid_len = 0;
8552 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8554 if (disable && option) {
8555 priv->status |= STATUS_RF_KILL_SW;
8556 IPW_DEBUG_INFO("Radio disabled.\n");
8559 if (channel != 0) {
8560 priv->config |= CFG_STATIC_CHANNEL;
8561 priv->channel = channel;
8562 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8563 /* TODO: Validate that provided channel is in range */
8565 #ifdef CONFIG_IPW2200_QOS
8566 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8567 burst_duration_CCK, burst_duration_OFDM);
8568 #endif /* CONFIG_IPW2200_QOS */
8570 switch (mode) {
8571 case 1:
8572 priv->ieee->iw_mode = IW_MODE_ADHOC;
8573 priv->net_dev->type = ARPHRD_ETHER;
8575 break;
8576 #ifdef CONFIG_IPW2200_MONITOR
8577 case 2:
8578 priv->ieee->iw_mode = IW_MODE_MONITOR;
8579 #ifdef CONFIG_IPW2200_RADIOTAP
8580 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8581 #else
8582 priv->net_dev->type = ARPHRD_IEEE80211;
8583 #endif
8584 break;
8585 #endif
8586 default:
8587 case 0:
8588 priv->net_dev->type = ARPHRD_ETHER;
8589 priv->ieee->iw_mode = IW_MODE_INFRA;
8590 break;
8593 if (hwcrypto) {
8594 priv->ieee->host_encrypt = 0;
8595 priv->ieee->host_encrypt_msdu = 0;
8596 priv->ieee->host_decrypt = 0;
8597 priv->ieee->host_mc_decrypt = 0;
8599 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8601 /* IPW2200/2915 is abled to do hardware fragmentation. */
8602 priv->ieee->host_open_frag = 0;
8604 if ((priv->pci_dev->device == 0x4223) ||
8605 (priv->pci_dev->device == 0x4224)) {
8606 if (option == 1)
8607 printk(KERN_INFO DRV_NAME
8608 ": Detected Intel PRO/Wireless 2915ABG Network "
8609 "Connection\n");
8610 priv->ieee->abg_true = 1;
8611 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8612 modulation = IEEE80211_OFDM_MODULATION |
8613 IEEE80211_CCK_MODULATION;
8614 priv->adapter = IPW_2915ABG;
8615 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8616 } else {
8617 if (option == 1)
8618 printk(KERN_INFO DRV_NAME
8619 ": Detected Intel PRO/Wireless 2200BG Network "
8620 "Connection\n");
8622 priv->ieee->abg_true = 0;
8623 band = IEEE80211_24GHZ_BAND;
8624 modulation = IEEE80211_OFDM_MODULATION |
8625 IEEE80211_CCK_MODULATION;
8626 priv->adapter = IPW_2200BG;
8627 priv->ieee->mode = IEEE_G | IEEE_B;
8630 priv->ieee->freq_band = band;
8631 priv->ieee->modulation = modulation;
8633 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8635 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8636 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8638 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8639 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8640 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8642 /* If power management is turned on, default to AC mode */
8643 priv->power_mode = IPW_POWER_AC;
8644 priv->tx_power = IPW_TX_POWER_DEFAULT;
8646 return old_mode == priv->ieee->iw_mode;
8650 * This file defines the Wireless Extension handlers. It does not
8651 * define any methods of hardware manipulation and relies on the
8652 * functions defined in ipw_main to provide the HW interaction.
8654 * The exception to this is the use of the ipw_get_ordinal()
8655 * function used to poll the hardware vs. making unecessary calls.
8659 static int ipw_wx_get_name(struct net_device *dev,
8660 struct iw_request_info *info,
8661 union iwreq_data *wrqu, char *extra)
8663 struct ipw_priv *priv = ieee80211_priv(dev);
8664 mutex_lock(&priv->mutex);
8665 if (priv->status & STATUS_RF_KILL_MASK)
8666 strcpy(wrqu->name, "radio off");
8667 else if (!(priv->status & STATUS_ASSOCIATED))
8668 strcpy(wrqu->name, "unassociated");
8669 else
8670 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8671 ipw_modes[priv->assoc_request.ieee_mode]);
8672 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8673 mutex_unlock(&priv->mutex);
8674 return 0;
8677 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8679 if (channel == 0) {
8680 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8681 priv->config &= ~CFG_STATIC_CHANNEL;
8682 IPW_DEBUG_ASSOC("Attempting to associate with new "
8683 "parameters.\n");
8684 ipw_associate(priv);
8685 return 0;
8688 priv->config |= CFG_STATIC_CHANNEL;
8690 if (priv->channel == channel) {
8691 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8692 channel);
8693 return 0;
8696 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8697 priv->channel = channel;
8699 #ifdef CONFIG_IPW2200_MONITOR
8700 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8701 int i;
8702 if (priv->status & STATUS_SCANNING) {
8703 IPW_DEBUG_SCAN("Scan abort triggered due to "
8704 "channel change.\n");
8705 ipw_abort_scan(priv);
8708 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8709 udelay(10);
8711 if (priv->status & STATUS_SCANNING)
8712 IPW_DEBUG_SCAN("Still scanning...\n");
8713 else
8714 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8715 1000 - i);
8717 return 0;
8719 #endif /* CONFIG_IPW2200_MONITOR */
8721 /* Network configuration changed -- force [re]association */
8722 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8723 if (!ipw_disassociate(priv))
8724 ipw_associate(priv);
8726 return 0;
8729 static int ipw_wx_set_freq(struct net_device *dev,
8730 struct iw_request_info *info,
8731 union iwreq_data *wrqu, char *extra)
8733 struct ipw_priv *priv = ieee80211_priv(dev);
8734 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8735 struct iw_freq *fwrq = &wrqu->freq;
8736 int ret = 0, i;
8737 u8 channel, flags;
8738 int band;
8740 if (fwrq->m == 0) {
8741 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8742 mutex_lock(&priv->mutex);
8743 ret = ipw_set_channel(priv, 0);
8744 mutex_unlock(&priv->mutex);
8745 return ret;
8747 /* if setting by freq convert to channel */
8748 if (fwrq->e == 1) {
8749 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8750 if (channel == 0)
8751 return -EINVAL;
8752 } else
8753 channel = fwrq->m;
8755 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8756 return -EINVAL;
8758 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8759 i = ieee80211_channel_to_index(priv->ieee, channel);
8760 if (i == -1)
8761 return -EINVAL;
8763 flags = (band == IEEE80211_24GHZ_BAND) ?
8764 geo->bg[i].flags : geo->a[i].flags;
8765 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8766 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8767 return -EINVAL;
8771 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8772 mutex_lock(&priv->mutex);
8773 ret = ipw_set_channel(priv, channel);
8774 mutex_unlock(&priv->mutex);
8775 return ret;
8778 static int ipw_wx_get_freq(struct net_device *dev,
8779 struct iw_request_info *info,
8780 union iwreq_data *wrqu, char *extra)
8782 struct ipw_priv *priv = ieee80211_priv(dev);
8784 wrqu->freq.e = 0;
8786 /* If we are associated, trying to associate, or have a statically
8787 * configured CHANNEL then return that; otherwise return ANY */
8788 mutex_lock(&priv->mutex);
8789 if (priv->config & CFG_STATIC_CHANNEL ||
8790 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8791 int i;
8793 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
8794 BUG_ON(i == -1);
8795 wrqu->freq.e = 1;
8797 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
8798 case IEEE80211_52GHZ_BAND:
8799 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8800 break;
8802 case IEEE80211_24GHZ_BAND:
8803 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8804 break;
8806 default:
8807 BUG();
8809 } else
8810 wrqu->freq.m = 0;
8812 mutex_unlock(&priv->mutex);
8813 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8814 return 0;
8817 static int ipw_wx_set_mode(struct net_device *dev,
8818 struct iw_request_info *info,
8819 union iwreq_data *wrqu, char *extra)
8821 struct ipw_priv *priv = ieee80211_priv(dev);
8822 int err = 0;
8824 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8826 switch (wrqu->mode) {
8827 #ifdef CONFIG_IPW2200_MONITOR
8828 case IW_MODE_MONITOR:
8829 #endif
8830 case IW_MODE_ADHOC:
8831 case IW_MODE_INFRA:
8832 break;
8833 case IW_MODE_AUTO:
8834 wrqu->mode = IW_MODE_INFRA;
8835 break;
8836 default:
8837 return -EINVAL;
8839 if (wrqu->mode == priv->ieee->iw_mode)
8840 return 0;
8842 mutex_lock(&priv->mutex);
8844 ipw_sw_reset(priv, 0);
8846 #ifdef CONFIG_IPW2200_MONITOR
8847 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8848 priv->net_dev->type = ARPHRD_ETHER;
8850 if (wrqu->mode == IW_MODE_MONITOR)
8851 #ifdef CONFIG_IPW2200_RADIOTAP
8852 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8853 #else
8854 priv->net_dev->type = ARPHRD_IEEE80211;
8855 #endif
8856 #endif /* CONFIG_IPW2200_MONITOR */
8858 /* Free the existing firmware and reset the fw_loaded
8859 * flag so ipw_load() will bring in the new firmware */
8860 free_firmware();
8862 priv->ieee->iw_mode = wrqu->mode;
8864 queue_work(priv->workqueue, &priv->adapter_restart);
8865 mutex_unlock(&priv->mutex);
8866 return err;
8869 static int ipw_wx_get_mode(struct net_device *dev,
8870 struct iw_request_info *info,
8871 union iwreq_data *wrqu, char *extra)
8873 struct ipw_priv *priv = ieee80211_priv(dev);
8874 mutex_lock(&priv->mutex);
8875 wrqu->mode = priv->ieee->iw_mode;
8876 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8877 mutex_unlock(&priv->mutex);
8878 return 0;
8881 /* Values are in microsecond */
8882 static const s32 timeout_duration[] = {
8883 350000,
8884 250000,
8885 75000,
8886 37000,
8887 25000,
8890 static const s32 period_duration[] = {
8891 400000,
8892 700000,
8893 1000000,
8894 1000000,
8895 1000000
8898 static int ipw_wx_get_range(struct net_device *dev,
8899 struct iw_request_info *info,
8900 union iwreq_data *wrqu, char *extra)
8902 struct ipw_priv *priv = ieee80211_priv(dev);
8903 struct iw_range *range = (struct iw_range *)extra;
8904 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8905 int i = 0, j;
8907 wrqu->data.length = sizeof(*range);
8908 memset(range, 0, sizeof(*range));
8910 /* 54Mbs == ~27 Mb/s real (802.11g) */
8911 range->throughput = 27 * 1000 * 1000;
8913 range->max_qual.qual = 100;
8914 /* TODO: Find real max RSSI and stick here */
8915 range->max_qual.level = 0;
8916 range->max_qual.noise = 0;
8917 range->max_qual.updated = 7; /* Updated all three */
8919 range->avg_qual.qual = 70;
8920 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8921 range->avg_qual.level = 0; /* FIXME to real average level */
8922 range->avg_qual.noise = 0;
8923 range->avg_qual.updated = 7; /* Updated all three */
8924 mutex_lock(&priv->mutex);
8925 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8927 for (i = 0; i < range->num_bitrates; i++)
8928 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8929 500000;
8931 range->max_rts = DEFAULT_RTS_THRESHOLD;
8932 range->min_frag = MIN_FRAG_THRESHOLD;
8933 range->max_frag = MAX_FRAG_THRESHOLD;
8935 range->encoding_size[0] = 5;
8936 range->encoding_size[1] = 13;
8937 range->num_encoding_sizes = 2;
8938 range->max_encoding_tokens = WEP_KEYS;
8940 /* Set the Wireless Extension versions */
8941 range->we_version_compiled = WIRELESS_EXT;
8942 range->we_version_source = 18;
8944 i = 0;
8945 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8946 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8947 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8948 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8949 continue;
8951 range->freq[i].i = geo->bg[j].channel;
8952 range->freq[i].m = geo->bg[j].freq * 100000;
8953 range->freq[i].e = 1;
8954 i++;
8958 if (priv->ieee->mode & IEEE_A) {
8959 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8960 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8961 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8962 continue;
8964 range->freq[i].i = geo->a[j].channel;
8965 range->freq[i].m = geo->a[j].freq * 100000;
8966 range->freq[i].e = 1;
8967 i++;
8971 range->num_channels = i;
8972 range->num_frequency = i;
8974 mutex_unlock(&priv->mutex);
8976 /* Event capability (kernel + driver) */
8977 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8978 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8979 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8980 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8981 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8983 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8984 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8986 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8988 IPW_DEBUG_WX("GET Range\n");
8989 return 0;
8992 static int ipw_wx_set_wap(struct net_device *dev,
8993 struct iw_request_info *info,
8994 union iwreq_data *wrqu, char *extra)
8996 struct ipw_priv *priv = ieee80211_priv(dev);
8998 static const unsigned char any[] = {
8999 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
9001 static const unsigned char off[] = {
9002 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
9005 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
9006 return -EINVAL;
9007 mutex_lock(&priv->mutex);
9008 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
9009 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9010 /* we disable mandatory BSSID association */
9011 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
9012 priv->config &= ~CFG_STATIC_BSSID;
9013 IPW_DEBUG_ASSOC("Attempting to associate with new "
9014 "parameters.\n");
9015 ipw_associate(priv);
9016 mutex_unlock(&priv->mutex);
9017 return 0;
9020 priv->config |= CFG_STATIC_BSSID;
9021 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9022 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
9023 mutex_unlock(&priv->mutex);
9024 return 0;
9027 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
9028 wrqu->ap_addr.sa_data);
9030 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9032 /* Network configuration changed -- force [re]association */
9033 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9034 if (!ipw_disassociate(priv))
9035 ipw_associate(priv);
9037 mutex_unlock(&priv->mutex);
9038 return 0;
9041 static int ipw_wx_get_wap(struct net_device *dev,
9042 struct iw_request_info *info,
9043 union iwreq_data *wrqu, char *extra)
9045 struct ipw_priv *priv = ieee80211_priv(dev);
9047 /* If we are associated, trying to associate, or have a statically
9048 * configured BSSID then return that; otherwise return ANY */
9049 mutex_lock(&priv->mutex);
9050 if (priv->config & CFG_STATIC_BSSID ||
9051 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9052 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9053 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9054 } else
9055 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9057 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9058 wrqu->ap_addr.sa_data);
9059 mutex_unlock(&priv->mutex);
9060 return 0;
9063 static int ipw_wx_set_essid(struct net_device *dev,
9064 struct iw_request_info *info,
9065 union iwreq_data *wrqu, char *extra)
9067 struct ipw_priv *priv = ieee80211_priv(dev);
9068 int length;
9069 DECLARE_SSID_BUF(ssid);
9071 mutex_lock(&priv->mutex);
9073 if (!wrqu->essid.flags)
9075 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9076 ipw_disassociate(priv);
9077 priv->config &= ~CFG_STATIC_ESSID;
9078 ipw_associate(priv);
9079 mutex_unlock(&priv->mutex);
9080 return 0;
9083 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9085 priv->config |= CFG_STATIC_ESSID;
9087 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9088 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9089 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9090 mutex_unlock(&priv->mutex);
9091 return 0;
9094 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9095 print_ssid(ssid, extra, length), length);
9097 priv->essid_len = length;
9098 memcpy(priv->essid, extra, priv->essid_len);
9100 /* Network configuration changed -- force [re]association */
9101 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9102 if (!ipw_disassociate(priv))
9103 ipw_associate(priv);
9105 mutex_unlock(&priv->mutex);
9106 return 0;
9109 static int ipw_wx_get_essid(struct net_device *dev,
9110 struct iw_request_info *info,
9111 union iwreq_data *wrqu, char *extra)
9113 struct ipw_priv *priv = ieee80211_priv(dev);
9114 DECLARE_SSID_BUF(ssid);
9116 /* If we are associated, trying to associate, or have a statically
9117 * configured ESSID then return that; otherwise return ANY */
9118 mutex_lock(&priv->mutex);
9119 if (priv->config & CFG_STATIC_ESSID ||
9120 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9121 IPW_DEBUG_WX("Getting essid: '%s'\n",
9122 print_ssid(ssid, priv->essid, priv->essid_len));
9123 memcpy(extra, priv->essid, priv->essid_len);
9124 wrqu->essid.length = priv->essid_len;
9125 wrqu->essid.flags = 1; /* active */
9126 } else {
9127 IPW_DEBUG_WX("Getting essid: ANY\n");
9128 wrqu->essid.length = 0;
9129 wrqu->essid.flags = 0; /* active */
9131 mutex_unlock(&priv->mutex);
9132 return 0;
9135 static int ipw_wx_set_nick(struct net_device *dev,
9136 struct iw_request_info *info,
9137 union iwreq_data *wrqu, char *extra)
9139 struct ipw_priv *priv = ieee80211_priv(dev);
9141 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9142 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9143 return -E2BIG;
9144 mutex_lock(&priv->mutex);
9145 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9146 memset(priv->nick, 0, sizeof(priv->nick));
9147 memcpy(priv->nick, extra, wrqu->data.length);
9148 IPW_DEBUG_TRACE("<<\n");
9149 mutex_unlock(&priv->mutex);
9150 return 0;
9154 static int ipw_wx_get_nick(struct net_device *dev,
9155 struct iw_request_info *info,
9156 union iwreq_data *wrqu, char *extra)
9158 struct ipw_priv *priv = ieee80211_priv(dev);
9159 IPW_DEBUG_WX("Getting nick\n");
9160 mutex_lock(&priv->mutex);
9161 wrqu->data.length = strlen(priv->nick);
9162 memcpy(extra, priv->nick, wrqu->data.length);
9163 wrqu->data.flags = 1; /* active */
9164 mutex_unlock(&priv->mutex);
9165 return 0;
9168 static int ipw_wx_set_sens(struct net_device *dev,
9169 struct iw_request_info *info,
9170 union iwreq_data *wrqu, char *extra)
9172 struct ipw_priv *priv = ieee80211_priv(dev);
9173 int err = 0;
9175 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9176 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9177 mutex_lock(&priv->mutex);
9179 if (wrqu->sens.fixed == 0)
9181 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9182 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9183 goto out;
9185 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9186 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9187 err = -EINVAL;
9188 goto out;
9191 priv->roaming_threshold = wrqu->sens.value;
9192 priv->disassociate_threshold = 3*wrqu->sens.value;
9193 out:
9194 mutex_unlock(&priv->mutex);
9195 return err;
9198 static int ipw_wx_get_sens(struct net_device *dev,
9199 struct iw_request_info *info,
9200 union iwreq_data *wrqu, char *extra)
9202 struct ipw_priv *priv = ieee80211_priv(dev);
9203 mutex_lock(&priv->mutex);
9204 wrqu->sens.fixed = 1;
9205 wrqu->sens.value = priv->roaming_threshold;
9206 mutex_unlock(&priv->mutex);
9208 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9209 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9211 return 0;
9214 static int ipw_wx_set_rate(struct net_device *dev,
9215 struct iw_request_info *info,
9216 union iwreq_data *wrqu, char *extra)
9218 /* TODO: We should use semaphores or locks for access to priv */
9219 struct ipw_priv *priv = ieee80211_priv(dev);
9220 u32 target_rate = wrqu->bitrate.value;
9221 u32 fixed, mask;
9223 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9224 /* value = X, fixed = 1 means only rate X */
9225 /* value = X, fixed = 0 means all rates lower equal X */
9227 if (target_rate == -1) {
9228 fixed = 0;
9229 mask = IEEE80211_DEFAULT_RATES_MASK;
9230 /* Now we should reassociate */
9231 goto apply;
9234 mask = 0;
9235 fixed = wrqu->bitrate.fixed;
9237 if (target_rate == 1000000 || !fixed)
9238 mask |= IEEE80211_CCK_RATE_1MB_MASK;
9239 if (target_rate == 1000000)
9240 goto apply;
9242 if (target_rate == 2000000 || !fixed)
9243 mask |= IEEE80211_CCK_RATE_2MB_MASK;
9244 if (target_rate == 2000000)
9245 goto apply;
9247 if (target_rate == 5500000 || !fixed)
9248 mask |= IEEE80211_CCK_RATE_5MB_MASK;
9249 if (target_rate == 5500000)
9250 goto apply;
9252 if (target_rate == 6000000 || !fixed)
9253 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9254 if (target_rate == 6000000)
9255 goto apply;
9257 if (target_rate == 9000000 || !fixed)
9258 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9259 if (target_rate == 9000000)
9260 goto apply;
9262 if (target_rate == 11000000 || !fixed)
9263 mask |= IEEE80211_CCK_RATE_11MB_MASK;
9264 if (target_rate == 11000000)
9265 goto apply;
9267 if (target_rate == 12000000 || !fixed)
9268 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9269 if (target_rate == 12000000)
9270 goto apply;
9272 if (target_rate == 18000000 || !fixed)
9273 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9274 if (target_rate == 18000000)
9275 goto apply;
9277 if (target_rate == 24000000 || !fixed)
9278 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9279 if (target_rate == 24000000)
9280 goto apply;
9282 if (target_rate == 36000000 || !fixed)
9283 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9284 if (target_rate == 36000000)
9285 goto apply;
9287 if (target_rate == 48000000 || !fixed)
9288 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9289 if (target_rate == 48000000)
9290 goto apply;
9292 if (target_rate == 54000000 || !fixed)
9293 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9294 if (target_rate == 54000000)
9295 goto apply;
9297 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9298 return -EINVAL;
9300 apply:
9301 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9302 mask, fixed ? "fixed" : "sub-rates");
9303 mutex_lock(&priv->mutex);
9304 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9305 priv->config &= ~CFG_FIXED_RATE;
9306 ipw_set_fixed_rate(priv, priv->ieee->mode);
9307 } else
9308 priv->config |= CFG_FIXED_RATE;
9310 if (priv->rates_mask == mask) {
9311 IPW_DEBUG_WX("Mask set to current mask.\n");
9312 mutex_unlock(&priv->mutex);
9313 return 0;
9316 priv->rates_mask = mask;
9318 /* Network configuration changed -- force [re]association */
9319 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9320 if (!ipw_disassociate(priv))
9321 ipw_associate(priv);
9323 mutex_unlock(&priv->mutex);
9324 return 0;
9327 static int ipw_wx_get_rate(struct net_device *dev,
9328 struct iw_request_info *info,
9329 union iwreq_data *wrqu, char *extra)
9331 struct ipw_priv *priv = ieee80211_priv(dev);
9332 mutex_lock(&priv->mutex);
9333 wrqu->bitrate.value = priv->last_rate;
9334 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9335 mutex_unlock(&priv->mutex);
9336 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9337 return 0;
9340 static int ipw_wx_set_rts(struct net_device *dev,
9341 struct iw_request_info *info,
9342 union iwreq_data *wrqu, char *extra)
9344 struct ipw_priv *priv = ieee80211_priv(dev);
9345 mutex_lock(&priv->mutex);
9346 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9347 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9348 else {
9349 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9350 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9351 mutex_unlock(&priv->mutex);
9352 return -EINVAL;
9354 priv->rts_threshold = wrqu->rts.value;
9357 ipw_send_rts_threshold(priv, priv->rts_threshold);
9358 mutex_unlock(&priv->mutex);
9359 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9360 return 0;
9363 static int ipw_wx_get_rts(struct net_device *dev,
9364 struct iw_request_info *info,
9365 union iwreq_data *wrqu, char *extra)
9367 struct ipw_priv *priv = ieee80211_priv(dev);
9368 mutex_lock(&priv->mutex);
9369 wrqu->rts.value = priv->rts_threshold;
9370 wrqu->rts.fixed = 0; /* no auto select */
9371 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9372 mutex_unlock(&priv->mutex);
9373 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9374 return 0;
9377 static int ipw_wx_set_txpow(struct net_device *dev,
9378 struct iw_request_info *info,
9379 union iwreq_data *wrqu, char *extra)
9381 struct ipw_priv *priv = ieee80211_priv(dev);
9382 int err = 0;
9384 mutex_lock(&priv->mutex);
9385 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9386 err = -EINPROGRESS;
9387 goto out;
9390 if (!wrqu->power.fixed)
9391 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9393 if (wrqu->power.flags != IW_TXPOW_DBM) {
9394 err = -EINVAL;
9395 goto out;
9398 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9399 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9400 err = -EINVAL;
9401 goto out;
9404 priv->tx_power = wrqu->power.value;
9405 err = ipw_set_tx_power(priv);
9406 out:
9407 mutex_unlock(&priv->mutex);
9408 return err;
9411 static int ipw_wx_get_txpow(struct net_device *dev,
9412 struct iw_request_info *info,
9413 union iwreq_data *wrqu, char *extra)
9415 struct ipw_priv *priv = ieee80211_priv(dev);
9416 mutex_lock(&priv->mutex);
9417 wrqu->power.value = priv->tx_power;
9418 wrqu->power.fixed = 1;
9419 wrqu->power.flags = IW_TXPOW_DBM;
9420 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9421 mutex_unlock(&priv->mutex);
9423 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9424 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9426 return 0;
9429 static int ipw_wx_set_frag(struct net_device *dev,
9430 struct iw_request_info *info,
9431 union iwreq_data *wrqu, char *extra)
9433 struct ipw_priv *priv = ieee80211_priv(dev);
9434 mutex_lock(&priv->mutex);
9435 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9436 priv->ieee->fts = DEFAULT_FTS;
9437 else {
9438 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9439 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9440 mutex_unlock(&priv->mutex);
9441 return -EINVAL;
9444 priv->ieee->fts = wrqu->frag.value & ~0x1;
9447 ipw_send_frag_threshold(priv, wrqu->frag.value);
9448 mutex_unlock(&priv->mutex);
9449 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9450 return 0;
9453 static int ipw_wx_get_frag(struct net_device *dev,
9454 struct iw_request_info *info,
9455 union iwreq_data *wrqu, char *extra)
9457 struct ipw_priv *priv = ieee80211_priv(dev);
9458 mutex_lock(&priv->mutex);
9459 wrqu->frag.value = priv->ieee->fts;
9460 wrqu->frag.fixed = 0; /* no auto select */
9461 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9462 mutex_unlock(&priv->mutex);
9463 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9465 return 0;
9468 static int ipw_wx_set_retry(struct net_device *dev,
9469 struct iw_request_info *info,
9470 union iwreq_data *wrqu, char *extra)
9472 struct ipw_priv *priv = ieee80211_priv(dev);
9474 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9475 return -EINVAL;
9477 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9478 return 0;
9480 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9481 return -EINVAL;
9483 mutex_lock(&priv->mutex);
9484 if (wrqu->retry.flags & IW_RETRY_SHORT)
9485 priv->short_retry_limit = (u8) wrqu->retry.value;
9486 else if (wrqu->retry.flags & IW_RETRY_LONG)
9487 priv->long_retry_limit = (u8) wrqu->retry.value;
9488 else {
9489 priv->short_retry_limit = (u8) wrqu->retry.value;
9490 priv->long_retry_limit = (u8) wrqu->retry.value;
9493 ipw_send_retry_limit(priv, priv->short_retry_limit,
9494 priv->long_retry_limit);
9495 mutex_unlock(&priv->mutex);
9496 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9497 priv->short_retry_limit, priv->long_retry_limit);
9498 return 0;
9501 static int ipw_wx_get_retry(struct net_device *dev,
9502 struct iw_request_info *info,
9503 union iwreq_data *wrqu, char *extra)
9505 struct ipw_priv *priv = ieee80211_priv(dev);
9507 mutex_lock(&priv->mutex);
9508 wrqu->retry.disabled = 0;
9510 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9511 mutex_unlock(&priv->mutex);
9512 return -EINVAL;
9515 if (wrqu->retry.flags & IW_RETRY_LONG) {
9516 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9517 wrqu->retry.value = priv->long_retry_limit;
9518 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9519 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9520 wrqu->retry.value = priv->short_retry_limit;
9521 } else {
9522 wrqu->retry.flags = IW_RETRY_LIMIT;
9523 wrqu->retry.value = priv->short_retry_limit;
9525 mutex_unlock(&priv->mutex);
9527 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9529 return 0;
9532 static int ipw_wx_set_scan(struct net_device *dev,
9533 struct iw_request_info *info,
9534 union iwreq_data *wrqu, char *extra)
9536 struct ipw_priv *priv = ieee80211_priv(dev);
9537 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9538 struct delayed_work *work = NULL;
9540 mutex_lock(&priv->mutex);
9542 priv->user_requested_scan = 1;
9544 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9545 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9546 int len = min((int)req->essid_len,
9547 (int)sizeof(priv->direct_scan_ssid));
9548 memcpy(priv->direct_scan_ssid, req->essid, len);
9549 priv->direct_scan_ssid_len = len;
9550 work = &priv->request_direct_scan;
9551 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9552 work = &priv->request_passive_scan;
9554 } else {
9555 /* Normal active broadcast scan */
9556 work = &priv->request_scan;
9559 mutex_unlock(&priv->mutex);
9561 IPW_DEBUG_WX("Start scan\n");
9563 queue_delayed_work(priv->workqueue, work, 0);
9565 return 0;
9568 static int ipw_wx_get_scan(struct net_device *dev,
9569 struct iw_request_info *info,
9570 union iwreq_data *wrqu, char *extra)
9572 struct ipw_priv *priv = ieee80211_priv(dev);
9573 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9576 static int ipw_wx_set_encode(struct net_device *dev,
9577 struct iw_request_info *info,
9578 union iwreq_data *wrqu, char *key)
9580 struct ipw_priv *priv = ieee80211_priv(dev);
9581 int ret;
9582 u32 cap = priv->capability;
9584 mutex_lock(&priv->mutex);
9585 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9587 /* In IBSS mode, we need to notify the firmware to update
9588 * the beacon info after we changed the capability. */
9589 if (cap != priv->capability &&
9590 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9591 priv->status & STATUS_ASSOCIATED)
9592 ipw_disassociate(priv);
9594 mutex_unlock(&priv->mutex);
9595 return ret;
9598 static int ipw_wx_get_encode(struct net_device *dev,
9599 struct iw_request_info *info,
9600 union iwreq_data *wrqu, char *key)
9602 struct ipw_priv *priv = ieee80211_priv(dev);
9603 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9606 static int ipw_wx_set_power(struct net_device *dev,
9607 struct iw_request_info *info,
9608 union iwreq_data *wrqu, char *extra)
9610 struct ipw_priv *priv = ieee80211_priv(dev);
9611 int err;
9612 mutex_lock(&priv->mutex);
9613 if (wrqu->power.disabled) {
9614 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9615 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9616 if (err) {
9617 IPW_DEBUG_WX("failed setting power mode.\n");
9618 mutex_unlock(&priv->mutex);
9619 return err;
9621 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9622 mutex_unlock(&priv->mutex);
9623 return 0;
9626 switch (wrqu->power.flags & IW_POWER_MODE) {
9627 case IW_POWER_ON: /* If not specified */
9628 case IW_POWER_MODE: /* If set all mask */
9629 case IW_POWER_ALL_R: /* If explicitly state all */
9630 break;
9631 default: /* Otherwise we don't support it */
9632 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9633 wrqu->power.flags);
9634 mutex_unlock(&priv->mutex);
9635 return -EOPNOTSUPP;
9638 /* If the user hasn't specified a power management mode yet, default
9639 * to BATTERY */
9640 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9641 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9642 else
9643 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9645 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9646 if (err) {
9647 IPW_DEBUG_WX("failed setting power mode.\n");
9648 mutex_unlock(&priv->mutex);
9649 return err;
9652 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9653 mutex_unlock(&priv->mutex);
9654 return 0;
9657 static int ipw_wx_get_power(struct net_device *dev,
9658 struct iw_request_info *info,
9659 union iwreq_data *wrqu, char *extra)
9661 struct ipw_priv *priv = ieee80211_priv(dev);
9662 mutex_lock(&priv->mutex);
9663 if (!(priv->power_mode & IPW_POWER_ENABLED))
9664 wrqu->power.disabled = 1;
9665 else
9666 wrqu->power.disabled = 0;
9668 mutex_unlock(&priv->mutex);
9669 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9671 return 0;
9674 static int ipw_wx_set_powermode(struct net_device *dev,
9675 struct iw_request_info *info,
9676 union iwreq_data *wrqu, char *extra)
9678 struct ipw_priv *priv = ieee80211_priv(dev);
9679 int mode = *(int *)extra;
9680 int err;
9682 mutex_lock(&priv->mutex);
9683 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9684 mode = IPW_POWER_AC;
9686 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9687 err = ipw_send_power_mode(priv, mode);
9688 if (err) {
9689 IPW_DEBUG_WX("failed setting power mode.\n");
9690 mutex_unlock(&priv->mutex);
9691 return err;
9693 priv->power_mode = IPW_POWER_ENABLED | mode;
9695 mutex_unlock(&priv->mutex);
9696 return 0;
9699 #define MAX_WX_STRING 80
9700 static int ipw_wx_get_powermode(struct net_device *dev,
9701 struct iw_request_info *info,
9702 union iwreq_data *wrqu, char *extra)
9704 struct ipw_priv *priv = ieee80211_priv(dev);
9705 int level = IPW_POWER_LEVEL(priv->power_mode);
9706 char *p = extra;
9708 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9710 switch (level) {
9711 case IPW_POWER_AC:
9712 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9713 break;
9714 case IPW_POWER_BATTERY:
9715 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9716 break;
9717 default:
9718 p += snprintf(p, MAX_WX_STRING - (p - extra),
9719 "(Timeout %dms, Period %dms)",
9720 timeout_duration[level - 1] / 1000,
9721 period_duration[level - 1] / 1000);
9724 if (!(priv->power_mode & IPW_POWER_ENABLED))
9725 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9727 wrqu->data.length = p - extra + 1;
9729 return 0;
9732 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9733 struct iw_request_info *info,
9734 union iwreq_data *wrqu, char *extra)
9736 struct ipw_priv *priv = ieee80211_priv(dev);
9737 int mode = *(int *)extra;
9738 u8 band = 0, modulation = 0;
9740 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9741 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9742 return -EINVAL;
9744 mutex_lock(&priv->mutex);
9745 if (priv->adapter == IPW_2915ABG) {
9746 priv->ieee->abg_true = 1;
9747 if (mode & IEEE_A) {
9748 band |= IEEE80211_52GHZ_BAND;
9749 modulation |= IEEE80211_OFDM_MODULATION;
9750 } else
9751 priv->ieee->abg_true = 0;
9752 } else {
9753 if (mode & IEEE_A) {
9754 IPW_WARNING("Attempt to set 2200BG into "
9755 "802.11a mode\n");
9756 mutex_unlock(&priv->mutex);
9757 return -EINVAL;
9760 priv->ieee->abg_true = 0;
9763 if (mode & IEEE_B) {
9764 band |= IEEE80211_24GHZ_BAND;
9765 modulation |= IEEE80211_CCK_MODULATION;
9766 } else
9767 priv->ieee->abg_true = 0;
9769 if (mode & IEEE_G) {
9770 band |= IEEE80211_24GHZ_BAND;
9771 modulation |= IEEE80211_OFDM_MODULATION;
9772 } else
9773 priv->ieee->abg_true = 0;
9775 priv->ieee->mode = mode;
9776 priv->ieee->freq_band = band;
9777 priv->ieee->modulation = modulation;
9778 init_supported_rates(priv, &priv->rates);
9780 /* Network configuration changed -- force [re]association */
9781 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9782 if (!ipw_disassociate(priv)) {
9783 ipw_send_supported_rates(priv, &priv->rates);
9784 ipw_associate(priv);
9787 /* Update the band LEDs */
9788 ipw_led_band_on(priv);
9790 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9791 mode & IEEE_A ? 'a' : '.',
9792 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9793 mutex_unlock(&priv->mutex);
9794 return 0;
9797 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9798 struct iw_request_info *info,
9799 union iwreq_data *wrqu, char *extra)
9801 struct ipw_priv *priv = ieee80211_priv(dev);
9802 mutex_lock(&priv->mutex);
9803 switch (priv->ieee->mode) {
9804 case IEEE_A:
9805 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9806 break;
9807 case IEEE_B:
9808 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9809 break;
9810 case IEEE_A | IEEE_B:
9811 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9812 break;
9813 case IEEE_G:
9814 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9815 break;
9816 case IEEE_A | IEEE_G:
9817 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9818 break;
9819 case IEEE_B | IEEE_G:
9820 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9821 break;
9822 case IEEE_A | IEEE_B | IEEE_G:
9823 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9824 break;
9825 default:
9826 strncpy(extra, "unknown", MAX_WX_STRING);
9827 break;
9830 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9832 wrqu->data.length = strlen(extra) + 1;
9833 mutex_unlock(&priv->mutex);
9835 return 0;
9838 static int ipw_wx_set_preamble(struct net_device *dev,
9839 struct iw_request_info *info,
9840 union iwreq_data *wrqu, char *extra)
9842 struct ipw_priv *priv = ieee80211_priv(dev);
9843 int mode = *(int *)extra;
9844 mutex_lock(&priv->mutex);
9845 /* Switching from SHORT -> LONG requires a disassociation */
9846 if (mode == 1) {
9847 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9848 priv->config |= CFG_PREAMBLE_LONG;
9850 /* Network configuration changed -- force [re]association */
9851 IPW_DEBUG_ASSOC
9852 ("[re]association triggered due to preamble change.\n");
9853 if (!ipw_disassociate(priv))
9854 ipw_associate(priv);
9856 goto done;
9859 if (mode == 0) {
9860 priv->config &= ~CFG_PREAMBLE_LONG;
9861 goto done;
9863 mutex_unlock(&priv->mutex);
9864 return -EINVAL;
9866 done:
9867 mutex_unlock(&priv->mutex);
9868 return 0;
9871 static int ipw_wx_get_preamble(struct net_device *dev,
9872 struct iw_request_info *info,
9873 union iwreq_data *wrqu, char *extra)
9875 struct ipw_priv *priv = ieee80211_priv(dev);
9876 mutex_lock(&priv->mutex);
9877 if (priv->config & CFG_PREAMBLE_LONG)
9878 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9879 else
9880 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9881 mutex_unlock(&priv->mutex);
9882 return 0;
9885 #ifdef CONFIG_IPW2200_MONITOR
9886 static int ipw_wx_set_monitor(struct net_device *dev,
9887 struct iw_request_info *info,
9888 union iwreq_data *wrqu, char *extra)
9890 struct ipw_priv *priv = ieee80211_priv(dev);
9891 int *parms = (int *)extra;
9892 int enable = (parms[0] > 0);
9893 mutex_lock(&priv->mutex);
9894 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9895 if (enable) {
9896 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9897 #ifdef CONFIG_IPW2200_RADIOTAP
9898 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9899 #else
9900 priv->net_dev->type = ARPHRD_IEEE80211;
9901 #endif
9902 queue_work(priv->workqueue, &priv->adapter_restart);
9905 ipw_set_channel(priv, parms[1]);
9906 } else {
9907 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9908 mutex_unlock(&priv->mutex);
9909 return 0;
9911 priv->net_dev->type = ARPHRD_ETHER;
9912 queue_work(priv->workqueue, &priv->adapter_restart);
9914 mutex_unlock(&priv->mutex);
9915 return 0;
9918 #endif /* CONFIG_IPW2200_MONITOR */
9920 static int ipw_wx_reset(struct net_device *dev,
9921 struct iw_request_info *info,
9922 union iwreq_data *wrqu, char *extra)
9924 struct ipw_priv *priv = ieee80211_priv(dev);
9925 IPW_DEBUG_WX("RESET\n");
9926 queue_work(priv->workqueue, &priv->adapter_restart);
9927 return 0;
9930 static int ipw_wx_sw_reset(struct net_device *dev,
9931 struct iw_request_info *info,
9932 union iwreq_data *wrqu, char *extra)
9934 struct ipw_priv *priv = ieee80211_priv(dev);
9935 union iwreq_data wrqu_sec = {
9936 .encoding = {
9937 .flags = IW_ENCODE_DISABLED,
9940 int ret;
9942 IPW_DEBUG_WX("SW_RESET\n");
9944 mutex_lock(&priv->mutex);
9946 ret = ipw_sw_reset(priv, 2);
9947 if (!ret) {
9948 free_firmware();
9949 ipw_adapter_restart(priv);
9952 /* The SW reset bit might have been toggled on by the 'disable'
9953 * module parameter, so take appropriate action */
9954 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9956 mutex_unlock(&priv->mutex);
9957 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9958 mutex_lock(&priv->mutex);
9960 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9961 /* Configuration likely changed -- force [re]association */
9962 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9963 "reset.\n");
9964 if (!ipw_disassociate(priv))
9965 ipw_associate(priv);
9968 mutex_unlock(&priv->mutex);
9970 return 0;
9973 /* Rebase the WE IOCTLs to zero for the handler array */
9974 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9975 static iw_handler ipw_wx_handlers[] = {
9976 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9977 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9978 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9979 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9980 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9981 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9982 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9983 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9984 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9985 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9986 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9987 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9988 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9989 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9990 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9991 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9992 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9993 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9994 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9995 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9996 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9997 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9998 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9999 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
10000 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
10001 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
10002 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
10003 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
10004 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
10005 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
10006 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
10007 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
10008 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
10009 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
10010 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
10011 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
10012 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
10013 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
10014 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
10015 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
10016 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
10019 enum {
10020 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
10021 IPW_PRIV_GET_POWER,
10022 IPW_PRIV_SET_MODE,
10023 IPW_PRIV_GET_MODE,
10024 IPW_PRIV_SET_PREAMBLE,
10025 IPW_PRIV_GET_PREAMBLE,
10026 IPW_PRIV_RESET,
10027 IPW_PRIV_SW_RESET,
10028 #ifdef CONFIG_IPW2200_MONITOR
10029 IPW_PRIV_SET_MONITOR,
10030 #endif
10033 static struct iw_priv_args ipw_priv_args[] = {
10035 .cmd = IPW_PRIV_SET_POWER,
10036 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10037 .name = "set_power"},
10039 .cmd = IPW_PRIV_GET_POWER,
10040 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10041 .name = "get_power"},
10043 .cmd = IPW_PRIV_SET_MODE,
10044 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10045 .name = "set_mode"},
10047 .cmd = IPW_PRIV_GET_MODE,
10048 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10049 .name = "get_mode"},
10051 .cmd = IPW_PRIV_SET_PREAMBLE,
10052 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10053 .name = "set_preamble"},
10055 .cmd = IPW_PRIV_GET_PREAMBLE,
10056 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10057 .name = "get_preamble"},
10059 IPW_PRIV_RESET,
10060 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10062 IPW_PRIV_SW_RESET,
10063 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10064 #ifdef CONFIG_IPW2200_MONITOR
10066 IPW_PRIV_SET_MONITOR,
10067 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10068 #endif /* CONFIG_IPW2200_MONITOR */
10071 static iw_handler ipw_priv_handler[] = {
10072 ipw_wx_set_powermode,
10073 ipw_wx_get_powermode,
10074 ipw_wx_set_wireless_mode,
10075 ipw_wx_get_wireless_mode,
10076 ipw_wx_set_preamble,
10077 ipw_wx_get_preamble,
10078 ipw_wx_reset,
10079 ipw_wx_sw_reset,
10080 #ifdef CONFIG_IPW2200_MONITOR
10081 ipw_wx_set_monitor,
10082 #endif
10085 static struct iw_handler_def ipw_wx_handler_def = {
10086 .standard = ipw_wx_handlers,
10087 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10088 .num_private = ARRAY_SIZE(ipw_priv_handler),
10089 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10090 .private = ipw_priv_handler,
10091 .private_args = ipw_priv_args,
10092 .get_wireless_stats = ipw_get_wireless_stats,
10096 * Get wireless statistics.
10097 * Called by /proc/net/wireless
10098 * Also called by SIOCGIWSTATS
10100 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10102 struct ipw_priv *priv = ieee80211_priv(dev);
10103 struct iw_statistics *wstats;
10105 wstats = &priv->wstats;
10107 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10108 * netdev->get_wireless_stats seems to be called before fw is
10109 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10110 * and associated; if not associcated, the values are all meaningless
10111 * anyway, so set them all to NULL and INVALID */
10112 if (!(priv->status & STATUS_ASSOCIATED)) {
10113 wstats->miss.beacon = 0;
10114 wstats->discard.retries = 0;
10115 wstats->qual.qual = 0;
10116 wstats->qual.level = 0;
10117 wstats->qual.noise = 0;
10118 wstats->qual.updated = 7;
10119 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10120 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10121 return wstats;
10124 wstats->qual.qual = priv->quality;
10125 wstats->qual.level = priv->exp_avg_rssi;
10126 wstats->qual.noise = priv->exp_avg_noise;
10127 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10128 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10130 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10131 wstats->discard.retries = priv->last_tx_failures;
10132 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10134 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10135 goto fail_get_ordinal;
10136 wstats->discard.retries += tx_retry; */
10138 return wstats;
10141 /* net device stuff */
10143 static void init_sys_config(struct ipw_sys_config *sys_config)
10145 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10146 sys_config->bt_coexistence = 0;
10147 sys_config->answer_broadcast_ssid_probe = 0;
10148 sys_config->accept_all_data_frames = 0;
10149 sys_config->accept_non_directed_frames = 1;
10150 sys_config->exclude_unicast_unencrypted = 0;
10151 sys_config->disable_unicast_decryption = 1;
10152 sys_config->exclude_multicast_unencrypted = 0;
10153 sys_config->disable_multicast_decryption = 1;
10154 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10155 antenna = CFG_SYS_ANTENNA_BOTH;
10156 sys_config->antenna_diversity = antenna;
10157 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10158 sys_config->dot11g_auto_detection = 0;
10159 sys_config->enable_cts_to_self = 0;
10160 sys_config->bt_coexist_collision_thr = 0;
10161 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10162 sys_config->silence_threshold = 0x1e;
10165 static int ipw_net_open(struct net_device *dev)
10167 IPW_DEBUG_INFO("dev->open\n");
10168 netif_start_queue(dev);
10169 return 0;
10172 static int ipw_net_stop(struct net_device *dev)
10174 IPW_DEBUG_INFO("dev->close\n");
10175 netif_stop_queue(dev);
10176 return 0;
10180 todo:
10182 modify to send one tfd per fragment instead of using chunking. otherwise
10183 we need to heavily modify the ieee80211_skb_to_txb.
10186 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10187 int pri)
10189 struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10190 txb->fragments[0]->data;
10191 int i = 0;
10192 struct tfd_frame *tfd;
10193 #ifdef CONFIG_IPW2200_QOS
10194 int tx_id = ipw_get_tx_queue_number(priv, pri);
10195 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10196 #else
10197 struct clx2_tx_queue *txq = &priv->txq[0];
10198 #endif
10199 struct clx2_queue *q = &txq->q;
10200 u8 id, hdr_len, unicast;
10201 u16 remaining_bytes;
10202 int fc;
10204 if (!(priv->status & STATUS_ASSOCIATED))
10205 goto drop;
10207 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10208 switch (priv->ieee->iw_mode) {
10209 case IW_MODE_ADHOC:
10210 unicast = !is_multicast_ether_addr(hdr->addr1);
10211 id = ipw_find_station(priv, hdr->addr1);
10212 if (id == IPW_INVALID_STATION) {
10213 id = ipw_add_station(priv, hdr->addr1);
10214 if (id == IPW_INVALID_STATION) {
10215 IPW_WARNING("Attempt to send data to "
10216 "invalid cell: %pM\n",
10217 hdr->addr1);
10218 goto drop;
10221 break;
10223 case IW_MODE_INFRA:
10224 default:
10225 unicast = !is_multicast_ether_addr(hdr->addr3);
10226 id = 0;
10227 break;
10230 tfd = &txq->bd[q->first_empty];
10231 txq->txb[q->first_empty] = txb;
10232 memset(tfd, 0, sizeof(*tfd));
10233 tfd->u.data.station_number = id;
10235 tfd->control_flags.message_type = TX_FRAME_TYPE;
10236 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10238 tfd->u.data.cmd_id = DINO_CMD_TX;
10239 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10240 remaining_bytes = txb->payload_size;
10242 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10243 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10244 else
10245 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10247 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10248 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10250 fc = le16_to_cpu(hdr->frame_ctl);
10251 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10253 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10255 if (likely(unicast))
10256 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10258 if (txb->encrypted && !priv->ieee->host_encrypt) {
10259 switch (priv->ieee->sec.level) {
10260 case SEC_LEVEL_3:
10261 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10262 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10263 /* XXX: ACK flag must be set for CCMP even if it
10264 * is a multicast/broadcast packet, because CCMP
10265 * group communication encrypted by GTK is
10266 * actually done by the AP. */
10267 if (!unicast)
10268 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10270 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10271 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10272 tfd->u.data.key_index = 0;
10273 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10274 break;
10275 case SEC_LEVEL_2:
10276 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10277 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10278 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10279 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10280 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10281 break;
10282 case SEC_LEVEL_1:
10283 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10284 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10285 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10286 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10288 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10289 else
10290 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10291 break;
10292 case SEC_LEVEL_0:
10293 break;
10294 default:
10295 printk(KERN_ERR "Unknow security level %d\n",
10296 priv->ieee->sec.level);
10297 break;
10299 } else
10300 /* No hardware encryption */
10301 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10303 #ifdef CONFIG_IPW2200_QOS
10304 if (fc & IEEE80211_STYPE_QOS_DATA)
10305 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10306 #endif /* CONFIG_IPW2200_QOS */
10308 /* payload */
10309 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10310 txb->nr_frags));
10311 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10312 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10313 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10314 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10315 i, le32_to_cpu(tfd->u.data.num_chunks),
10316 txb->fragments[i]->len - hdr_len);
10317 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10318 i, tfd->u.data.num_chunks,
10319 txb->fragments[i]->len - hdr_len);
10320 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10321 txb->fragments[i]->len - hdr_len);
10323 tfd->u.data.chunk_ptr[i] =
10324 cpu_to_le32(pci_map_single
10325 (priv->pci_dev,
10326 txb->fragments[i]->data + hdr_len,
10327 txb->fragments[i]->len - hdr_len,
10328 PCI_DMA_TODEVICE));
10329 tfd->u.data.chunk_len[i] =
10330 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10333 if (i != txb->nr_frags) {
10334 struct sk_buff *skb;
10335 u16 remaining_bytes = 0;
10336 int j;
10338 for (j = i; j < txb->nr_frags; j++)
10339 remaining_bytes += txb->fragments[j]->len - hdr_len;
10341 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10342 remaining_bytes);
10343 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10344 if (skb != NULL) {
10345 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10346 for (j = i; j < txb->nr_frags; j++) {
10347 int size = txb->fragments[j]->len - hdr_len;
10349 printk(KERN_INFO "Adding frag %d %d...\n",
10350 j, size);
10351 memcpy(skb_put(skb, size),
10352 txb->fragments[j]->data + hdr_len, size);
10354 dev_kfree_skb_any(txb->fragments[i]);
10355 txb->fragments[i] = skb;
10356 tfd->u.data.chunk_ptr[i] =
10357 cpu_to_le32(pci_map_single
10358 (priv->pci_dev, skb->data,
10359 remaining_bytes,
10360 PCI_DMA_TODEVICE));
10362 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10366 /* kick DMA */
10367 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10368 ipw_write32(priv, q->reg_w, q->first_empty);
10370 if (ipw_tx_queue_space(q) < q->high_mark)
10371 netif_stop_queue(priv->net_dev);
10373 return NETDEV_TX_OK;
10375 drop:
10376 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10377 ieee80211_txb_free(txb);
10378 return NETDEV_TX_OK;
10381 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10383 struct ipw_priv *priv = ieee80211_priv(dev);
10384 #ifdef CONFIG_IPW2200_QOS
10385 int tx_id = ipw_get_tx_queue_number(priv, pri);
10386 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10387 #else
10388 struct clx2_tx_queue *txq = &priv->txq[0];
10389 #endif /* CONFIG_IPW2200_QOS */
10391 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10392 return 1;
10394 return 0;
10397 #ifdef CONFIG_IPW2200_PROMISCUOUS
10398 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10399 struct ieee80211_txb *txb)
10401 struct ieee80211_rx_stats dummystats;
10402 struct ieee80211_hdr *hdr;
10403 u8 n;
10404 u16 filter = priv->prom_priv->filter;
10405 int hdr_only = 0;
10407 if (filter & IPW_PROM_NO_TX)
10408 return;
10410 memset(&dummystats, 0, sizeof(dummystats));
10412 /* Filtering of fragment chains is done agains the first fragment */
10413 hdr = (void *)txb->fragments[0]->data;
10414 if (ieee80211_is_management(le16_to_cpu(hdr->frame_control))) {
10415 if (filter & IPW_PROM_NO_MGMT)
10416 return;
10417 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10418 hdr_only = 1;
10419 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_control))) {
10420 if (filter & IPW_PROM_NO_CTL)
10421 return;
10422 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10423 hdr_only = 1;
10424 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_control))) {
10425 if (filter & IPW_PROM_NO_DATA)
10426 return;
10427 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10428 hdr_only = 1;
10431 for(n=0; n<txb->nr_frags; ++n) {
10432 struct sk_buff *src = txb->fragments[n];
10433 struct sk_buff *dst;
10434 struct ieee80211_radiotap_header *rt_hdr;
10435 int len;
10437 if (hdr_only) {
10438 hdr = (void *)src->data;
10439 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
10440 } else
10441 len = src->len;
10443 dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
10444 if (!dst)
10445 continue;
10447 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10449 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10450 rt_hdr->it_pad = 0;
10451 rt_hdr->it_present = 0; /* after all, it's just an idea */
10452 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10454 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10455 ieee80211chan2mhz(priv->channel));
10456 if (priv->channel > 14) /* 802.11a */
10457 *(__le16*)skb_put(dst, sizeof(u16)) =
10458 cpu_to_le16(IEEE80211_CHAN_OFDM |
10459 IEEE80211_CHAN_5GHZ);
10460 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10461 *(__le16*)skb_put(dst, sizeof(u16)) =
10462 cpu_to_le16(IEEE80211_CHAN_CCK |
10463 IEEE80211_CHAN_2GHZ);
10464 else /* 802.11g */
10465 *(__le16*)skb_put(dst, sizeof(u16)) =
10466 cpu_to_le16(IEEE80211_CHAN_OFDM |
10467 IEEE80211_CHAN_2GHZ);
10469 rt_hdr->it_len = cpu_to_le16(dst->len);
10471 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10473 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10474 dev_kfree_skb_any(dst);
10477 #endif
10479 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10480 struct net_device *dev, int pri)
10482 struct ipw_priv *priv = ieee80211_priv(dev);
10483 unsigned long flags;
10484 int ret;
10486 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10487 spin_lock_irqsave(&priv->lock, flags);
10489 #ifdef CONFIG_IPW2200_PROMISCUOUS
10490 if (rtap_iface && netif_running(priv->prom_net_dev))
10491 ipw_handle_promiscuous_tx(priv, txb);
10492 #endif
10494 ret = ipw_tx_skb(priv, txb, pri);
10495 if (ret == NETDEV_TX_OK)
10496 __ipw_led_activity_on(priv);
10497 spin_unlock_irqrestore(&priv->lock, flags);
10499 return ret;
10502 static void ipw_net_set_multicast_list(struct net_device *dev)
10507 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10509 struct ipw_priv *priv = ieee80211_priv(dev);
10510 struct sockaddr *addr = p;
10512 if (!is_valid_ether_addr(addr->sa_data))
10513 return -EADDRNOTAVAIL;
10514 mutex_lock(&priv->mutex);
10515 priv->config |= CFG_CUSTOM_MAC;
10516 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10517 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10518 priv->net_dev->name, priv->mac_addr);
10519 queue_work(priv->workqueue, &priv->adapter_restart);
10520 mutex_unlock(&priv->mutex);
10521 return 0;
10524 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10525 struct ethtool_drvinfo *info)
10527 struct ipw_priv *p = ieee80211_priv(dev);
10528 char vers[64];
10529 char date[32];
10530 u32 len;
10532 strcpy(info->driver, DRV_NAME);
10533 strcpy(info->version, DRV_VERSION);
10535 len = sizeof(vers);
10536 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10537 len = sizeof(date);
10538 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10540 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10541 vers, date);
10542 strcpy(info->bus_info, pci_name(p->pci_dev));
10543 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10546 static u32 ipw_ethtool_get_link(struct net_device *dev)
10548 struct ipw_priv *priv = ieee80211_priv(dev);
10549 return (priv->status & STATUS_ASSOCIATED) != 0;
10552 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10554 return IPW_EEPROM_IMAGE_SIZE;
10557 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10558 struct ethtool_eeprom *eeprom, u8 * bytes)
10560 struct ipw_priv *p = ieee80211_priv(dev);
10562 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10563 return -EINVAL;
10564 mutex_lock(&p->mutex);
10565 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10566 mutex_unlock(&p->mutex);
10567 return 0;
10570 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10571 struct ethtool_eeprom *eeprom, u8 * bytes)
10573 struct ipw_priv *p = ieee80211_priv(dev);
10574 int i;
10576 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10577 return -EINVAL;
10578 mutex_lock(&p->mutex);
10579 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10580 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10581 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10582 mutex_unlock(&p->mutex);
10583 return 0;
10586 static const struct ethtool_ops ipw_ethtool_ops = {
10587 .get_link = ipw_ethtool_get_link,
10588 .get_drvinfo = ipw_ethtool_get_drvinfo,
10589 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10590 .get_eeprom = ipw_ethtool_get_eeprom,
10591 .set_eeprom = ipw_ethtool_set_eeprom,
10594 static irqreturn_t ipw_isr(int irq, void *data)
10596 struct ipw_priv *priv = data;
10597 u32 inta, inta_mask;
10599 if (!priv)
10600 return IRQ_NONE;
10602 spin_lock(&priv->irq_lock);
10604 if (!(priv->status & STATUS_INT_ENABLED)) {
10605 /* IRQ is disabled */
10606 goto none;
10609 inta = ipw_read32(priv, IPW_INTA_RW);
10610 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10612 if (inta == 0xFFFFFFFF) {
10613 /* Hardware disappeared */
10614 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10615 goto none;
10618 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10619 /* Shared interrupt */
10620 goto none;
10623 /* tell the device to stop sending interrupts */
10624 __ipw_disable_interrupts(priv);
10626 /* ack current interrupts */
10627 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10628 ipw_write32(priv, IPW_INTA_RW, inta);
10630 /* Cache INTA value for our tasklet */
10631 priv->isr_inta = inta;
10633 tasklet_schedule(&priv->irq_tasklet);
10635 spin_unlock(&priv->irq_lock);
10637 return IRQ_HANDLED;
10638 none:
10639 spin_unlock(&priv->irq_lock);
10640 return IRQ_NONE;
10643 static void ipw_rf_kill(void *adapter)
10645 struct ipw_priv *priv = adapter;
10646 unsigned long flags;
10648 spin_lock_irqsave(&priv->lock, flags);
10650 if (rf_kill_active(priv)) {
10651 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10652 if (priv->workqueue)
10653 queue_delayed_work(priv->workqueue,
10654 &priv->rf_kill, 2 * HZ);
10655 goto exit_unlock;
10658 /* RF Kill is now disabled, so bring the device back up */
10660 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10661 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10662 "device\n");
10664 /* we can not do an adapter restart while inside an irq lock */
10665 queue_work(priv->workqueue, &priv->adapter_restart);
10666 } else
10667 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10668 "enabled\n");
10670 exit_unlock:
10671 spin_unlock_irqrestore(&priv->lock, flags);
10674 static void ipw_bg_rf_kill(struct work_struct *work)
10676 struct ipw_priv *priv =
10677 container_of(work, struct ipw_priv, rf_kill.work);
10678 mutex_lock(&priv->mutex);
10679 ipw_rf_kill(priv);
10680 mutex_unlock(&priv->mutex);
10683 static void ipw_link_up(struct ipw_priv *priv)
10685 priv->last_seq_num = -1;
10686 priv->last_frag_num = -1;
10687 priv->last_packet_time = 0;
10689 netif_carrier_on(priv->net_dev);
10691 cancel_delayed_work(&priv->request_scan);
10692 cancel_delayed_work(&priv->request_direct_scan);
10693 cancel_delayed_work(&priv->request_passive_scan);
10694 cancel_delayed_work(&priv->scan_event);
10695 ipw_reset_stats(priv);
10696 /* Ensure the rate is updated immediately */
10697 priv->last_rate = ipw_get_current_rate(priv);
10698 ipw_gather_stats(priv);
10699 ipw_led_link_up(priv);
10700 notify_wx_assoc_event(priv);
10702 if (priv->config & CFG_BACKGROUND_SCAN)
10703 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10706 static void ipw_bg_link_up(struct work_struct *work)
10708 struct ipw_priv *priv =
10709 container_of(work, struct ipw_priv, link_up);
10710 mutex_lock(&priv->mutex);
10711 ipw_link_up(priv);
10712 mutex_unlock(&priv->mutex);
10715 static void ipw_link_down(struct ipw_priv *priv)
10717 ipw_led_link_down(priv);
10718 netif_carrier_off(priv->net_dev);
10719 notify_wx_assoc_event(priv);
10721 /* Cancel any queued work ... */
10722 cancel_delayed_work(&priv->request_scan);
10723 cancel_delayed_work(&priv->request_direct_scan);
10724 cancel_delayed_work(&priv->request_passive_scan);
10725 cancel_delayed_work(&priv->adhoc_check);
10726 cancel_delayed_work(&priv->gather_stats);
10728 ipw_reset_stats(priv);
10730 if (!(priv->status & STATUS_EXIT_PENDING)) {
10731 /* Queue up another scan... */
10732 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10733 } else
10734 cancel_delayed_work(&priv->scan_event);
10737 static void ipw_bg_link_down(struct work_struct *work)
10739 struct ipw_priv *priv =
10740 container_of(work, struct ipw_priv, link_down);
10741 mutex_lock(&priv->mutex);
10742 ipw_link_down(priv);
10743 mutex_unlock(&priv->mutex);
10746 static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10748 int ret = 0;
10750 priv->workqueue = create_workqueue(DRV_NAME);
10751 init_waitqueue_head(&priv->wait_command_queue);
10752 init_waitqueue_head(&priv->wait_state);
10754 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10755 INIT_WORK(&priv->associate, ipw_bg_associate);
10756 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10757 INIT_WORK(&priv->system_config, ipw_system_config);
10758 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10759 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10760 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10761 INIT_WORK(&priv->up, ipw_bg_up);
10762 INIT_WORK(&priv->down, ipw_bg_down);
10763 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10764 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10765 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10766 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10767 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10768 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10769 INIT_WORK(&priv->roam, ipw_bg_roam);
10770 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10771 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10772 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10773 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10774 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10775 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10776 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10778 #ifdef CONFIG_IPW2200_QOS
10779 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10780 #endif /* CONFIG_IPW2200_QOS */
10782 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10783 ipw_irq_tasklet, (unsigned long)priv);
10785 return ret;
10788 static void shim__set_security(struct net_device *dev,
10789 struct ieee80211_security *sec)
10791 struct ipw_priv *priv = ieee80211_priv(dev);
10792 int i;
10793 for (i = 0; i < 4; i++) {
10794 if (sec->flags & (1 << i)) {
10795 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10796 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10797 if (sec->key_sizes[i] == 0)
10798 priv->ieee->sec.flags &= ~(1 << i);
10799 else {
10800 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10801 sec->key_sizes[i]);
10802 priv->ieee->sec.flags |= (1 << i);
10804 priv->status |= STATUS_SECURITY_UPDATED;
10805 } else if (sec->level != SEC_LEVEL_1)
10806 priv->ieee->sec.flags &= ~(1 << i);
10809 if (sec->flags & SEC_ACTIVE_KEY) {
10810 if (sec->active_key <= 3) {
10811 priv->ieee->sec.active_key = sec->active_key;
10812 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10813 } else
10814 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10815 priv->status |= STATUS_SECURITY_UPDATED;
10816 } else
10817 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10819 if ((sec->flags & SEC_AUTH_MODE) &&
10820 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10821 priv->ieee->sec.auth_mode = sec->auth_mode;
10822 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10823 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10824 priv->capability |= CAP_SHARED_KEY;
10825 else
10826 priv->capability &= ~CAP_SHARED_KEY;
10827 priv->status |= STATUS_SECURITY_UPDATED;
10830 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10831 priv->ieee->sec.flags |= SEC_ENABLED;
10832 priv->ieee->sec.enabled = sec->enabled;
10833 priv->status |= STATUS_SECURITY_UPDATED;
10834 if (sec->enabled)
10835 priv->capability |= CAP_PRIVACY_ON;
10836 else
10837 priv->capability &= ~CAP_PRIVACY_ON;
10840 if (sec->flags & SEC_ENCRYPT)
10841 priv->ieee->sec.encrypt = sec->encrypt;
10843 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10844 priv->ieee->sec.level = sec->level;
10845 priv->ieee->sec.flags |= SEC_LEVEL;
10846 priv->status |= STATUS_SECURITY_UPDATED;
10849 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10850 ipw_set_hwcrypto_keys(priv);
10852 /* To match current functionality of ipw2100 (which works well w/
10853 * various supplicants, we don't force a disassociate if the
10854 * privacy capability changes ... */
10855 #if 0
10856 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10857 (((priv->assoc_request.capability &
10858 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10859 (!(priv->assoc_request.capability &
10860 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10861 IPW_DEBUG_ASSOC("Disassociating due to capability "
10862 "change.\n");
10863 ipw_disassociate(priv);
10865 #endif
10868 static int init_supported_rates(struct ipw_priv *priv,
10869 struct ipw_supported_rates *rates)
10871 /* TODO: Mask out rates based on priv->rates_mask */
10873 memset(rates, 0, sizeof(*rates));
10874 /* configure supported rates */
10875 switch (priv->ieee->freq_band) {
10876 case IEEE80211_52GHZ_BAND:
10877 rates->ieee_mode = IPW_A_MODE;
10878 rates->purpose = IPW_RATE_CAPABILITIES;
10879 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10880 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10881 break;
10883 default: /* Mixed or 2.4Ghz */
10884 rates->ieee_mode = IPW_G_MODE;
10885 rates->purpose = IPW_RATE_CAPABILITIES;
10886 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10887 IEEE80211_CCK_DEFAULT_RATES_MASK);
10888 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10889 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10890 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10892 break;
10895 return 0;
10898 static int ipw_config(struct ipw_priv *priv)
10900 /* This is only called from ipw_up, which resets/reloads the firmware
10901 so, we don't need to first disable the card before we configure
10902 it */
10903 if (ipw_set_tx_power(priv))
10904 goto error;
10906 /* initialize adapter address */
10907 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10908 goto error;
10910 /* set basic system config settings */
10911 init_sys_config(&priv->sys_config);
10913 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10914 * Does not support BT priority yet (don't abort or defer our Tx) */
10915 if (bt_coexist) {
10916 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10918 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10919 priv->sys_config.bt_coexistence
10920 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10921 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10922 priv->sys_config.bt_coexistence
10923 |= CFG_BT_COEXISTENCE_OOB;
10926 #ifdef CONFIG_IPW2200_PROMISCUOUS
10927 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10928 priv->sys_config.accept_all_data_frames = 1;
10929 priv->sys_config.accept_non_directed_frames = 1;
10930 priv->sys_config.accept_all_mgmt_bcpr = 1;
10931 priv->sys_config.accept_all_mgmt_frames = 1;
10933 #endif
10935 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10936 priv->sys_config.answer_broadcast_ssid_probe = 1;
10937 else
10938 priv->sys_config.answer_broadcast_ssid_probe = 0;
10940 if (ipw_send_system_config(priv))
10941 goto error;
10943 init_supported_rates(priv, &priv->rates);
10944 if (ipw_send_supported_rates(priv, &priv->rates))
10945 goto error;
10947 /* Set request-to-send threshold */
10948 if (priv->rts_threshold) {
10949 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10950 goto error;
10952 #ifdef CONFIG_IPW2200_QOS
10953 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10954 ipw_qos_activate(priv, NULL);
10955 #endif /* CONFIG_IPW2200_QOS */
10957 if (ipw_set_random_seed(priv))
10958 goto error;
10960 /* final state transition to the RUN state */
10961 if (ipw_send_host_complete(priv))
10962 goto error;
10964 priv->status |= STATUS_INIT;
10966 ipw_led_init(priv);
10967 ipw_led_radio_on(priv);
10968 priv->notif_missed_beacons = 0;
10970 /* Set hardware WEP key if it is configured. */
10971 if ((priv->capability & CAP_PRIVACY_ON) &&
10972 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10973 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10974 ipw_set_hwcrypto_keys(priv);
10976 return 0;
10978 error:
10979 return -EIO;
10983 * NOTE:
10985 * These tables have been tested in conjunction with the
10986 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10988 * Altering this values, using it on other hardware, or in geographies
10989 * not intended for resale of the above mentioned Intel adapters has
10990 * not been tested.
10992 * Remember to update the table in README.ipw2200 when changing this
10993 * table.
10996 static const struct ieee80211_geo ipw_geos[] = {
10997 { /* Restricted */
10998 "---",
10999 .bg_channels = 11,
11000 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11001 {2427, 4}, {2432, 5}, {2437, 6},
11002 {2442, 7}, {2447, 8}, {2452, 9},
11003 {2457, 10}, {2462, 11}},
11006 { /* Custom US/Canada */
11007 "ZZF",
11008 .bg_channels = 11,
11009 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11010 {2427, 4}, {2432, 5}, {2437, 6},
11011 {2442, 7}, {2447, 8}, {2452, 9},
11012 {2457, 10}, {2462, 11}},
11013 .a_channels = 8,
11014 .a = {{5180, 36},
11015 {5200, 40},
11016 {5220, 44},
11017 {5240, 48},
11018 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11019 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11020 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11021 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
11024 { /* Rest of World */
11025 "ZZD",
11026 .bg_channels = 13,
11027 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11028 {2427, 4}, {2432, 5}, {2437, 6},
11029 {2442, 7}, {2447, 8}, {2452, 9},
11030 {2457, 10}, {2462, 11}, {2467, 12},
11031 {2472, 13}},
11034 { /* Custom USA & Europe & High */
11035 "ZZA",
11036 .bg_channels = 11,
11037 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11038 {2427, 4}, {2432, 5}, {2437, 6},
11039 {2442, 7}, {2447, 8}, {2452, 9},
11040 {2457, 10}, {2462, 11}},
11041 .a_channels = 13,
11042 .a = {{5180, 36},
11043 {5200, 40},
11044 {5220, 44},
11045 {5240, 48},
11046 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11047 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11048 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11049 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11050 {5745, 149},
11051 {5765, 153},
11052 {5785, 157},
11053 {5805, 161},
11054 {5825, 165}},
11057 { /* Custom NA & Europe */
11058 "ZZB",
11059 .bg_channels = 11,
11060 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11061 {2427, 4}, {2432, 5}, {2437, 6},
11062 {2442, 7}, {2447, 8}, {2452, 9},
11063 {2457, 10}, {2462, 11}},
11064 .a_channels = 13,
11065 .a = {{5180, 36},
11066 {5200, 40},
11067 {5220, 44},
11068 {5240, 48},
11069 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11070 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11071 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11072 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11073 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11074 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11075 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11076 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11077 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11080 { /* Custom Japan */
11081 "ZZC",
11082 .bg_channels = 11,
11083 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11084 {2427, 4}, {2432, 5}, {2437, 6},
11085 {2442, 7}, {2447, 8}, {2452, 9},
11086 {2457, 10}, {2462, 11}},
11087 .a_channels = 4,
11088 .a = {{5170, 34}, {5190, 38},
11089 {5210, 42}, {5230, 46}},
11092 { /* Custom */
11093 "ZZM",
11094 .bg_channels = 11,
11095 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11096 {2427, 4}, {2432, 5}, {2437, 6},
11097 {2442, 7}, {2447, 8}, {2452, 9},
11098 {2457, 10}, {2462, 11}},
11101 { /* Europe */
11102 "ZZE",
11103 .bg_channels = 13,
11104 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11105 {2427, 4}, {2432, 5}, {2437, 6},
11106 {2442, 7}, {2447, 8}, {2452, 9},
11107 {2457, 10}, {2462, 11}, {2467, 12},
11108 {2472, 13}},
11109 .a_channels = 19,
11110 .a = {{5180, 36},
11111 {5200, 40},
11112 {5220, 44},
11113 {5240, 48},
11114 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11115 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11116 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11117 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11118 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11119 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11120 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11121 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11122 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11123 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11124 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11125 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11126 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11127 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11128 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
11131 { /* Custom Japan */
11132 "ZZJ",
11133 .bg_channels = 14,
11134 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11135 {2427, 4}, {2432, 5}, {2437, 6},
11136 {2442, 7}, {2447, 8}, {2452, 9},
11137 {2457, 10}, {2462, 11}, {2467, 12},
11138 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
11139 .a_channels = 4,
11140 .a = {{5170, 34}, {5190, 38},
11141 {5210, 42}, {5230, 46}},
11144 { /* Rest of World */
11145 "ZZR",
11146 .bg_channels = 14,
11147 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11148 {2427, 4}, {2432, 5}, {2437, 6},
11149 {2442, 7}, {2447, 8}, {2452, 9},
11150 {2457, 10}, {2462, 11}, {2467, 12},
11151 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11152 IEEE80211_CH_PASSIVE_ONLY}},
11155 { /* High Band */
11156 "ZZH",
11157 .bg_channels = 13,
11158 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11159 {2427, 4}, {2432, 5}, {2437, 6},
11160 {2442, 7}, {2447, 8}, {2452, 9},
11161 {2457, 10}, {2462, 11},
11162 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11163 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11164 .a_channels = 4,
11165 .a = {{5745, 149}, {5765, 153},
11166 {5785, 157}, {5805, 161}},
11169 { /* Custom Europe */
11170 "ZZG",
11171 .bg_channels = 13,
11172 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11173 {2427, 4}, {2432, 5}, {2437, 6},
11174 {2442, 7}, {2447, 8}, {2452, 9},
11175 {2457, 10}, {2462, 11},
11176 {2467, 12}, {2472, 13}},
11177 .a_channels = 4,
11178 .a = {{5180, 36}, {5200, 40},
11179 {5220, 44}, {5240, 48}},
11182 { /* Europe */
11183 "ZZK",
11184 .bg_channels = 13,
11185 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11186 {2427, 4}, {2432, 5}, {2437, 6},
11187 {2442, 7}, {2447, 8}, {2452, 9},
11188 {2457, 10}, {2462, 11},
11189 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11190 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11191 .a_channels = 24,
11192 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11193 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11194 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11195 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11196 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11197 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11198 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11199 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11200 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11201 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11202 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11203 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11204 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11205 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11206 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11207 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11208 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11209 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11210 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11211 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11212 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11213 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11214 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11215 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11218 { /* Europe */
11219 "ZZL",
11220 .bg_channels = 11,
11221 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11222 {2427, 4}, {2432, 5}, {2437, 6},
11223 {2442, 7}, {2447, 8}, {2452, 9},
11224 {2457, 10}, {2462, 11}},
11225 .a_channels = 13,
11226 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11227 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11228 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11229 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11230 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11231 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11232 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11233 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11234 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11235 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11236 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11237 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11238 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11242 #define MAX_HW_RESTARTS 5
11243 static int ipw_up(struct ipw_priv *priv)
11245 int rc, i, j;
11247 /* Age scan list entries found before suspend */
11248 if (priv->suspend_time) {
11249 ieee80211_networks_age(priv->ieee, priv->suspend_time);
11250 priv->suspend_time = 0;
11253 if (priv->status & STATUS_EXIT_PENDING)
11254 return -EIO;
11256 if (cmdlog && !priv->cmdlog) {
11257 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11258 GFP_KERNEL);
11259 if (priv->cmdlog == NULL) {
11260 IPW_ERROR("Error allocating %d command log entries.\n",
11261 cmdlog);
11262 return -ENOMEM;
11263 } else {
11264 priv->cmdlog_len = cmdlog;
11268 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11269 /* Load the microcode, firmware, and eeprom.
11270 * Also start the clocks. */
11271 rc = ipw_load(priv);
11272 if (rc) {
11273 IPW_ERROR("Unable to load firmware: %d\n", rc);
11274 return rc;
11277 ipw_init_ordinals(priv);
11278 if (!(priv->config & CFG_CUSTOM_MAC))
11279 eeprom_parse_mac(priv, priv->mac_addr);
11280 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11282 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11283 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11284 ipw_geos[j].name, 3))
11285 break;
11287 if (j == ARRAY_SIZE(ipw_geos)) {
11288 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11289 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11290 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11291 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11292 j = 0;
11294 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11295 IPW_WARNING("Could not set geography.");
11296 return 0;
11299 if (priv->status & STATUS_RF_KILL_SW) {
11300 IPW_WARNING("Radio disabled by module parameter.\n");
11301 return 0;
11302 } else if (rf_kill_active(priv)) {
11303 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11304 "Kill switch must be turned off for "
11305 "wireless networking to work.\n");
11306 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11307 2 * HZ);
11308 return 0;
11311 rc = ipw_config(priv);
11312 if (!rc) {
11313 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11315 /* If configure to try and auto-associate, kick
11316 * off a scan. */
11317 queue_delayed_work(priv->workqueue,
11318 &priv->request_scan, 0);
11320 return 0;
11323 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11324 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11325 i, MAX_HW_RESTARTS);
11327 /* We had an error bringing up the hardware, so take it
11328 * all the way back down so we can try again */
11329 ipw_down(priv);
11332 /* tried to restart and config the device for as long as our
11333 * patience could withstand */
11334 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11336 return -EIO;
11339 static void ipw_bg_up(struct work_struct *work)
11341 struct ipw_priv *priv =
11342 container_of(work, struct ipw_priv, up);
11343 mutex_lock(&priv->mutex);
11344 ipw_up(priv);
11345 mutex_unlock(&priv->mutex);
11348 static void ipw_deinit(struct ipw_priv *priv)
11350 int i;
11352 if (priv->status & STATUS_SCANNING) {
11353 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11354 ipw_abort_scan(priv);
11357 if (priv->status & STATUS_ASSOCIATED) {
11358 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11359 ipw_disassociate(priv);
11362 ipw_led_shutdown(priv);
11364 /* Wait up to 1s for status to change to not scanning and not
11365 * associated (disassociation can take a while for a ful 802.11
11366 * exchange */
11367 for (i = 1000; i && (priv->status &
11368 (STATUS_DISASSOCIATING |
11369 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11370 udelay(10);
11372 if (priv->status & (STATUS_DISASSOCIATING |
11373 STATUS_ASSOCIATED | STATUS_SCANNING))
11374 IPW_DEBUG_INFO("Still associated or scanning...\n");
11375 else
11376 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11378 /* Attempt to disable the card */
11379 ipw_send_card_disable(priv, 0);
11381 priv->status &= ~STATUS_INIT;
11384 static void ipw_down(struct ipw_priv *priv)
11386 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11388 priv->status |= STATUS_EXIT_PENDING;
11390 if (ipw_is_init(priv))
11391 ipw_deinit(priv);
11393 /* Wipe out the EXIT_PENDING status bit if we are not actually
11394 * exiting the module */
11395 if (!exit_pending)
11396 priv->status &= ~STATUS_EXIT_PENDING;
11398 /* tell the device to stop sending interrupts */
11399 ipw_disable_interrupts(priv);
11401 /* Clear all bits but the RF Kill */
11402 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11403 netif_carrier_off(priv->net_dev);
11405 ipw_stop_nic(priv);
11407 ipw_led_radio_off(priv);
11410 static void ipw_bg_down(struct work_struct *work)
11412 struct ipw_priv *priv =
11413 container_of(work, struct ipw_priv, down);
11414 mutex_lock(&priv->mutex);
11415 ipw_down(priv);
11416 mutex_unlock(&priv->mutex);
11419 /* Called by register_netdev() */
11420 static int ipw_net_init(struct net_device *dev)
11422 struct ipw_priv *priv = ieee80211_priv(dev);
11423 mutex_lock(&priv->mutex);
11425 if (ipw_up(priv)) {
11426 mutex_unlock(&priv->mutex);
11427 return -EIO;
11430 mutex_unlock(&priv->mutex);
11431 return 0;
11434 /* PCI driver stuff */
11435 static struct pci_device_id card_ids[] = {
11436 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11437 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11438 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11439 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11440 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11441 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11442 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11443 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11444 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11445 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11446 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11447 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11448 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11449 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11450 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11451 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11452 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11453 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11454 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11455 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11456 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11457 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11459 /* required last entry */
11460 {0,}
11463 MODULE_DEVICE_TABLE(pci, card_ids);
11465 static struct attribute *ipw_sysfs_entries[] = {
11466 &dev_attr_rf_kill.attr,
11467 &dev_attr_direct_dword.attr,
11468 &dev_attr_indirect_byte.attr,
11469 &dev_attr_indirect_dword.attr,
11470 &dev_attr_mem_gpio_reg.attr,
11471 &dev_attr_command_event_reg.attr,
11472 &dev_attr_nic_type.attr,
11473 &dev_attr_status.attr,
11474 &dev_attr_cfg.attr,
11475 &dev_attr_error.attr,
11476 &dev_attr_event_log.attr,
11477 &dev_attr_cmd_log.attr,
11478 &dev_attr_eeprom_delay.attr,
11479 &dev_attr_ucode_version.attr,
11480 &dev_attr_rtc.attr,
11481 &dev_attr_scan_age.attr,
11482 &dev_attr_led.attr,
11483 &dev_attr_speed_scan.attr,
11484 &dev_attr_net_stats.attr,
11485 &dev_attr_channels.attr,
11486 #ifdef CONFIG_IPW2200_PROMISCUOUS
11487 &dev_attr_rtap_iface.attr,
11488 &dev_attr_rtap_filter.attr,
11489 #endif
11490 NULL
11493 static struct attribute_group ipw_attribute_group = {
11494 .name = NULL, /* put in device directory */
11495 .attrs = ipw_sysfs_entries,
11498 #ifdef CONFIG_IPW2200_PROMISCUOUS
11499 static int ipw_prom_open(struct net_device *dev)
11501 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11502 struct ipw_priv *priv = prom_priv->priv;
11504 IPW_DEBUG_INFO("prom dev->open\n");
11505 netif_carrier_off(dev);
11507 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11508 priv->sys_config.accept_all_data_frames = 1;
11509 priv->sys_config.accept_non_directed_frames = 1;
11510 priv->sys_config.accept_all_mgmt_bcpr = 1;
11511 priv->sys_config.accept_all_mgmt_frames = 1;
11513 ipw_send_system_config(priv);
11516 return 0;
11519 static int ipw_prom_stop(struct net_device *dev)
11521 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11522 struct ipw_priv *priv = prom_priv->priv;
11524 IPW_DEBUG_INFO("prom dev->stop\n");
11526 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11527 priv->sys_config.accept_all_data_frames = 0;
11528 priv->sys_config.accept_non_directed_frames = 0;
11529 priv->sys_config.accept_all_mgmt_bcpr = 0;
11530 priv->sys_config.accept_all_mgmt_frames = 0;
11532 ipw_send_system_config(priv);
11535 return 0;
11538 static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11540 IPW_DEBUG_INFO("prom dev->xmit\n");
11541 dev_kfree_skb(skb);
11542 return NETDEV_TX_OK;
11545 static const struct net_device_ops ipw_prom_netdev_ops = {
11546 .ndo_open = ipw_prom_open,
11547 .ndo_stop = ipw_prom_stop,
11548 .ndo_start_xmit = ipw_prom_hard_start_xmit,
11549 .ndo_change_mtu = ieee80211_change_mtu,
11550 .ndo_set_mac_address = eth_mac_addr,
11551 .ndo_validate_addr = eth_validate_addr,
11554 static int ipw_prom_alloc(struct ipw_priv *priv)
11556 int rc = 0;
11558 if (priv->prom_net_dev)
11559 return -EPERM;
11561 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11562 if (priv->prom_net_dev == NULL)
11563 return -ENOMEM;
11565 priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11566 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11567 priv->prom_priv->priv = priv;
11569 strcpy(priv->prom_net_dev->name, "rtap%d");
11570 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11572 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11573 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11575 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11576 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11578 rc = register_netdev(priv->prom_net_dev);
11579 if (rc) {
11580 free_ieee80211(priv->prom_net_dev);
11581 priv->prom_net_dev = NULL;
11582 return rc;
11585 return 0;
11588 static void ipw_prom_free(struct ipw_priv *priv)
11590 if (!priv->prom_net_dev)
11591 return;
11593 unregister_netdev(priv->prom_net_dev);
11594 free_ieee80211(priv->prom_net_dev);
11596 priv->prom_net_dev = NULL;
11599 #endif
11601 static const struct net_device_ops ipw_netdev_ops = {
11602 .ndo_init = ipw_net_init,
11603 .ndo_open = ipw_net_open,
11604 .ndo_stop = ipw_net_stop,
11605 .ndo_set_multicast_list = ipw_net_set_multicast_list,
11606 .ndo_set_mac_address = ipw_net_set_mac_address,
11607 .ndo_start_xmit = ieee80211_xmit,
11608 .ndo_change_mtu = ieee80211_change_mtu,
11609 .ndo_validate_addr = eth_validate_addr,
11612 static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11613 const struct pci_device_id *ent)
11615 int err = 0;
11616 struct net_device *net_dev;
11617 void __iomem *base;
11618 u32 length, val;
11619 struct ipw_priv *priv;
11620 int i;
11622 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11623 if (net_dev == NULL) {
11624 err = -ENOMEM;
11625 goto out;
11628 priv = ieee80211_priv(net_dev);
11629 priv->ieee = netdev_priv(net_dev);
11631 priv->net_dev = net_dev;
11632 priv->pci_dev = pdev;
11633 ipw_debug_level = debug;
11634 spin_lock_init(&priv->irq_lock);
11635 spin_lock_init(&priv->lock);
11636 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11637 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11639 mutex_init(&priv->mutex);
11640 if (pci_enable_device(pdev)) {
11641 err = -ENODEV;
11642 goto out_free_ieee80211;
11645 pci_set_master(pdev);
11647 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11648 if (!err)
11649 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11650 if (err) {
11651 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11652 goto out_pci_disable_device;
11655 pci_set_drvdata(pdev, priv);
11657 err = pci_request_regions(pdev, DRV_NAME);
11658 if (err)
11659 goto out_pci_disable_device;
11661 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11662 * PCI Tx retries from interfering with C3 CPU state */
11663 pci_read_config_dword(pdev, 0x40, &val);
11664 if ((val & 0x0000ff00) != 0)
11665 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11667 length = pci_resource_len(pdev, 0);
11668 priv->hw_len = length;
11670 base = pci_ioremap_bar(pdev, 0);
11671 if (!base) {
11672 err = -ENODEV;
11673 goto out_pci_release_regions;
11676 priv->hw_base = base;
11677 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11678 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11680 err = ipw_setup_deferred_work(priv);
11681 if (err) {
11682 IPW_ERROR("Unable to setup deferred work\n");
11683 goto out_iounmap;
11686 ipw_sw_reset(priv, 1);
11688 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11689 if (err) {
11690 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11691 goto out_destroy_workqueue;
11694 SET_NETDEV_DEV(net_dev, &pdev->dev);
11696 mutex_lock(&priv->mutex);
11698 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11699 priv->ieee->set_security = shim__set_security;
11700 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11702 #ifdef CONFIG_IPW2200_QOS
11703 priv->ieee->is_qos_active = ipw_is_qos_active;
11704 priv->ieee->handle_probe_response = ipw_handle_beacon;
11705 priv->ieee->handle_beacon = ipw_handle_probe_response;
11706 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11707 #endif /* CONFIG_IPW2200_QOS */
11709 priv->ieee->perfect_rssi = -20;
11710 priv->ieee->worst_rssi = -85;
11712 net_dev->netdev_ops = &ipw_netdev_ops;
11713 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11714 net_dev->wireless_data = &priv->wireless_data;
11715 net_dev->wireless_handlers = &ipw_wx_handler_def;
11716 net_dev->ethtool_ops = &ipw_ethtool_ops;
11717 net_dev->irq = pdev->irq;
11718 net_dev->base_addr = (unsigned long)priv->hw_base;
11719 net_dev->mem_start = pci_resource_start(pdev, 0);
11720 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11722 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11723 if (err) {
11724 IPW_ERROR("failed to create sysfs device attributes\n");
11725 mutex_unlock(&priv->mutex);
11726 goto out_release_irq;
11729 mutex_unlock(&priv->mutex);
11730 err = register_netdev(net_dev);
11731 if (err) {
11732 IPW_ERROR("failed to register network device\n");
11733 goto out_remove_sysfs;
11736 #ifdef CONFIG_IPW2200_PROMISCUOUS
11737 if (rtap_iface) {
11738 err = ipw_prom_alloc(priv);
11739 if (err) {
11740 IPW_ERROR("Failed to register promiscuous network "
11741 "device (error %d).\n", err);
11742 unregister_netdev(priv->net_dev);
11743 goto out_remove_sysfs;
11746 #endif
11748 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11749 "channels, %d 802.11a channels)\n",
11750 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11751 priv->ieee->geo.a_channels);
11753 return 0;
11755 out_remove_sysfs:
11756 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11757 out_release_irq:
11758 free_irq(pdev->irq, priv);
11759 out_destroy_workqueue:
11760 destroy_workqueue(priv->workqueue);
11761 priv->workqueue = NULL;
11762 out_iounmap:
11763 iounmap(priv->hw_base);
11764 out_pci_release_regions:
11765 pci_release_regions(pdev);
11766 out_pci_disable_device:
11767 pci_disable_device(pdev);
11768 pci_set_drvdata(pdev, NULL);
11769 out_free_ieee80211:
11770 free_ieee80211(priv->net_dev);
11771 out:
11772 return err;
11775 static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11777 struct ipw_priv *priv = pci_get_drvdata(pdev);
11778 struct list_head *p, *q;
11779 int i;
11781 if (!priv)
11782 return;
11784 mutex_lock(&priv->mutex);
11786 priv->status |= STATUS_EXIT_PENDING;
11787 ipw_down(priv);
11788 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11790 mutex_unlock(&priv->mutex);
11792 unregister_netdev(priv->net_dev);
11794 if (priv->rxq) {
11795 ipw_rx_queue_free(priv, priv->rxq);
11796 priv->rxq = NULL;
11798 ipw_tx_queue_free(priv);
11800 if (priv->cmdlog) {
11801 kfree(priv->cmdlog);
11802 priv->cmdlog = NULL;
11804 /* ipw_down will ensure that there is no more pending work
11805 * in the workqueue's, so we can safely remove them now. */
11806 cancel_delayed_work(&priv->adhoc_check);
11807 cancel_delayed_work(&priv->gather_stats);
11808 cancel_delayed_work(&priv->request_scan);
11809 cancel_delayed_work(&priv->request_direct_scan);
11810 cancel_delayed_work(&priv->request_passive_scan);
11811 cancel_delayed_work(&priv->scan_event);
11812 cancel_delayed_work(&priv->rf_kill);
11813 cancel_delayed_work(&priv->scan_check);
11814 destroy_workqueue(priv->workqueue);
11815 priv->workqueue = NULL;
11817 /* Free MAC hash list for ADHOC */
11818 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11819 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11820 list_del(p);
11821 kfree(list_entry(p, struct ipw_ibss_seq, list));
11825 kfree(priv->error);
11826 priv->error = NULL;
11828 #ifdef CONFIG_IPW2200_PROMISCUOUS
11829 ipw_prom_free(priv);
11830 #endif
11832 free_irq(pdev->irq, priv);
11833 iounmap(priv->hw_base);
11834 pci_release_regions(pdev);
11835 pci_disable_device(pdev);
11836 pci_set_drvdata(pdev, NULL);
11837 free_ieee80211(priv->net_dev);
11838 free_firmware();
11841 #ifdef CONFIG_PM
11842 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11844 struct ipw_priv *priv = pci_get_drvdata(pdev);
11845 struct net_device *dev = priv->net_dev;
11847 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11849 /* Take down the device; powers it off, etc. */
11850 ipw_down(priv);
11852 /* Remove the PRESENT state of the device */
11853 netif_device_detach(dev);
11855 pci_save_state(pdev);
11856 pci_disable_device(pdev);
11857 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11859 priv->suspend_at = get_seconds();
11861 return 0;
11864 static int ipw_pci_resume(struct pci_dev *pdev)
11866 struct ipw_priv *priv = pci_get_drvdata(pdev);
11867 struct net_device *dev = priv->net_dev;
11868 int err;
11869 u32 val;
11871 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11873 pci_set_power_state(pdev, PCI_D0);
11874 err = pci_enable_device(pdev);
11875 if (err) {
11876 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11877 dev->name);
11878 return err;
11880 pci_restore_state(pdev);
11883 * Suspend/Resume resets the PCI configuration space, so we have to
11884 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11885 * from interfering with C3 CPU state. pci_restore_state won't help
11886 * here since it only restores the first 64 bytes pci config header.
11888 pci_read_config_dword(pdev, 0x40, &val);
11889 if ((val & 0x0000ff00) != 0)
11890 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11892 /* Set the device back into the PRESENT state; this will also wake
11893 * the queue of needed */
11894 netif_device_attach(dev);
11896 priv->suspend_time = get_seconds() - priv->suspend_at;
11898 /* Bring the device back up */
11899 queue_work(priv->workqueue, &priv->up);
11901 return 0;
11903 #endif
11905 static void ipw_pci_shutdown(struct pci_dev *pdev)
11907 struct ipw_priv *priv = pci_get_drvdata(pdev);
11909 /* Take down the device; powers it off, etc. */
11910 ipw_down(priv);
11912 pci_disable_device(pdev);
11915 /* driver initialization stuff */
11916 static struct pci_driver ipw_driver = {
11917 .name = DRV_NAME,
11918 .id_table = card_ids,
11919 .probe = ipw_pci_probe,
11920 .remove = __devexit_p(ipw_pci_remove),
11921 #ifdef CONFIG_PM
11922 .suspend = ipw_pci_suspend,
11923 .resume = ipw_pci_resume,
11924 #endif
11925 .shutdown = ipw_pci_shutdown,
11928 static int __init ipw_init(void)
11930 int ret;
11932 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11933 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11935 ret = pci_register_driver(&ipw_driver);
11936 if (ret) {
11937 IPW_ERROR("Unable to initialize PCI module\n");
11938 return ret;
11941 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11942 if (ret) {
11943 IPW_ERROR("Unable to create driver sysfs file\n");
11944 pci_unregister_driver(&ipw_driver);
11945 return ret;
11948 return ret;
11951 static void __exit ipw_exit(void)
11953 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11954 pci_unregister_driver(&ipw_driver);
11957 module_param(disable, int, 0444);
11958 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11960 module_param(associate, int, 0444);
11961 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
11963 module_param(auto_create, int, 0444);
11964 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11966 module_param(led, int, 0444);
11967 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
11969 module_param(debug, int, 0444);
11970 MODULE_PARM_DESC(debug, "debug output mask");
11972 module_param(channel, int, 0444);
11973 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11975 #ifdef CONFIG_IPW2200_PROMISCUOUS
11976 module_param(rtap_iface, int, 0444);
11977 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11978 #endif
11980 #ifdef CONFIG_IPW2200_QOS
11981 module_param(qos_enable, int, 0444);
11982 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11984 module_param(qos_burst_enable, int, 0444);
11985 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11987 module_param(qos_no_ack_mask, int, 0444);
11988 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11990 module_param(burst_duration_CCK, int, 0444);
11991 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11993 module_param(burst_duration_OFDM, int, 0444);
11994 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11995 #endif /* CONFIG_IPW2200_QOS */
11997 #ifdef CONFIG_IPW2200_MONITOR
11998 module_param(mode, int, 0444);
11999 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12000 #else
12001 module_param(mode, int, 0444);
12002 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12003 #endif
12005 module_param(bt_coexist, int, 0444);
12006 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12008 module_param(hwcrypto, int, 0444);
12009 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12011 module_param(cmdlog, int, 0444);
12012 MODULE_PARM_DESC(cmdlog,
12013 "allocate a ring buffer for logging firmware commands");
12015 module_param(roaming, int, 0444);
12016 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12018 module_param(antenna, int, 0444);
12019 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12021 module_exit(ipw_exit);
12022 module_init(ipw_init);