update madwifi
[linux-2.6/zen-sources.git] / drivers / net / wireless / madwifi / net80211 / ieee80211_linux.h
blobab35a2e235586ce0164f3142dbfdbc5a13eeacd3
1 /*-
2 * Copyright (c) 2003-2005 Sam Leffler, Errno Consulting
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 * $Id: ieee80211_linux.h 3710 2008-06-06 20:32:29Z mentor $
29 #ifndef _NET80211_IEEE80211_LINUX_H_
30 #define _NET80211_IEEE80211_LINUX_H_
32 #include <linux/wireless.h>
33 #include <linux/fs.h>
36 * Compatibility definition of statistics flags
37 * (bitmask in (struct iw_quality *)->updated)
39 #ifndef IW_QUAL_QUAL_UPDATED
40 #define IW_QUAL_QUAL_UPDATED 0x01 /* Value was updated since last read */
41 #define IW_QUAL_LEVEL_UPDATED 0x02
42 #define IW_QUAL_NOISE_UPDATED 0x04
43 #define IW_QUAL_QUAL_INVALID 0x10 /* Driver doesn't provide value */
44 #define IW_QUAL_LEVEL_INVALID 0x20
45 #define IW_QUAL_NOISE_INVALID 0x40
46 #endif /* IW_QUAL_QUAL_UPDATED */
48 #ifndef IW_QUAL_ALL_UPDATED
49 #define IW_QUAL_ALL_UPDATED \
50 (IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_UPDATED)
51 #endif
52 #ifndef IW_QUAL_ALL_INVALID
53 #define IW_QUAL_ALL_INVALID \
54 (IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID | IW_QUAL_NOISE_INVALID)
55 #endif
58 * The RSSI values reported in the TX/RX descriptors in the driver are the SNR
59 * expressed in dBm. Thus 'rssi' is signal level above the noise floor in dBm.
61 * Noise is measured in dBm and is negative unless there is an unimaginable
62 * level of RF noise.
64 * The signal level is noise + rssi.
66 * Note that the iw_quality values are 1 byte, and can be signed, unsigned or
67 * negative depending on context.
70 static __inline void
71 set_quality(struct iw_quality *iq, u_int rssi, int noise)
73 iq->qual = rssi;
74 iq->noise = noise;
75 iq->level = ((((int)rssi + noise) <= 0) ? ((int)rssi + noise) : 0);
76 iq->updated = IW_QUAL_ALL_UPDATED;
77 #if WIRELESS_EXT >= 19
78 iq->updated |= IW_QUAL_DBM;
79 #endif
83 * Task deferral
85 * Deduce if tasklets are available. If not then
86 * fall back to using the immediate work queue.
88 #include <linux/interrupt.h>
89 #ifdef DECLARE_TASKLET /* native tasklets */
90 #define IEEE80211_TQ_STRUCT tasklet_struct
91 #define IEEE80211_INIT_TQUEUE(a,b,c) tasklet_init((a), (b), (unsigned long)(c))
92 #define IEEE80211_SCHEDULE_TQUEUE(a) tasklet_schedule((a))
93 #define IEEE80211_CANCEL_TQUEUE(a) if (!in_interrupt()) tasklet_kill((a))
94 typedef unsigned long IEEE80211_TQUEUE_ARG;
95 #define mark_bh(a) do {} while (0)
96 #else /* immediate work queue */
97 #define IEEE80211_TQ_STRUCT tq_struct
98 #define IEEE80211_INIT_TQUEUE(a,b,c) INIT_TQUEUE(a,b,c)
99 #define IEEE80211_SCHEDULE_TQUEUE(a) do { \
100 int __macro_needmark; \
101 __macro_needmark |= queue_task((a), &tq_immediate); \
102 if (__macro_needmark) \
103 mark_bh(IMMEDIATE_BH); \
104 } while (0)
105 typedef void *IEEE80211_TQUEUE_ARG;
106 #define tasklet_disable(t) do { (void) t; local_bh_disable(); } while (0)
107 #define tasklet_enable(t) do { (void) t; local_bh_enable(); } while (0)
108 /* XXX: not supporting cancel in old kernels! */
109 #define IEEE80211_CANCEL_TQUEUE(a) ((a), 0)
110 #endif /* !DECLARE_TASKLET */
112 #define IEEE80211_RESCHEDULE schedule
114 /* Locking */
115 /* NB: beware, spin_is_locked() is not usefully defined for !(DEBUG || SMP)
116 * because spinlocks do not exist in this configuration. Instead IRQs
117 * or pre-emption are simply disabled, as this is all that is needed.
121 * Beacon handler locking definitions.
122 * Beacon locking
123 * UAPSD locking
125 typedef spinlock_t ieee80211com_lock_t;
126 #define IEEE80211_LOCK_INIT(_ic, _name) \
127 spin_lock_init(&(_ic)->ic_comlock)
128 #define IEEE80211_LOCK_DESTROY(_ic)
129 #define IEEE80211_LOCK_IRQ(_ic) do { \
130 unsigned long __ilockflags; \
131 IEEE80211_LOCK_CHECK(_ic); \
132 spin_lock_irqsave(&(_ic)->ic_comlock, __ilockflags);
133 #define IEEE80211_UNLOCK_IRQ(_ic) \
134 IEEE80211_LOCK_ASSERT(_ic); \
135 spin_unlock_irqrestore(&(_ic)->ic_comlock, __ilockflags); \
136 } while (0)
137 #define IEEE80211_UNLOCK_IRQ_EARLY(_ic) \
138 IEEE80211_LOCK_ASSERT(_ic); \
139 spin_unlock_irqrestore(&(_ic)->ic_comlock, __ilockflags);
141 #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
142 #define IEEE80211_LOCK_ASSERT(_ic) \
143 KASSERT(spin_is_locked(&(_ic)->ic_comlock), ("ieee80211com not locked!"))
144 #if (defined(ATH_DEBUG_SPINLOCKS))
145 #define IEEE80211_LOCK_CHECK(_ic) do { \
146 if (spin_is_locked(&(_ic)->ic_comlock)) \
147 printk("%s:%d - about to block on ieee80211com lock!\n", __func__, __LINE__); \
148 } while (0)
149 #else /* #if (defined(ATH_DEBUG_SPINLOCKS)) */
150 #define IEEE80211_LOCK_CHECK(_ic)
151 #endif
152 #else
153 #define IEEE80211_LOCK_ASSERT(_ic)
154 #define IEEE80211_LOCK_CHECK(_ic)
155 #endif
158 #define IEEE80211_VAPS_LOCK_INIT(_ic, _name) \
159 spin_lock_init(&(_ic)->ic_vapslock)
160 #define IEEE80211_VAPS_LOCK_DESTROY(_ic)
161 #define IEEE80211_VAPS_LOCK_BH(_ic) do { \
162 IEEE80211_VAPS_LOCK_CHECK(_ic); \
163 spin_lock_bh(&(_ic)->ic_vapslock);
164 #define IEEE80211_VAPS_UNLOCK_BH(_ic) \
165 IEEE80211_VAPS_LOCK_ASSERT(_ic); \
166 spin_unlock_bh(&(_ic)->ic_vapslock); \
167 } while (0)
169 #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
170 #define IEEE80211_VAPS_LOCK_ASSERT(_ic) \
171 KASSERT(spin_is_locked(&(_ic)->ic_vapslock), \
172 ("ieee80211com_vaps not locked!"))
173 #if (defined(ATH_DEBUG_SPINLOCKS))
174 #define IEEE80211_VAPS_LOCK_CHECK(_ic) do { \
175 if (spin_is_locked(&(_ic)->ic_vapslock)) \
176 printk("%s:%d - about to block on ieee80211com_vaps lock!\n", __func__, __LINE__); \
177 } while (0)
178 #else /* #if (defined(ATH_DEBUG_SPINLOCKS)) */
179 #define IEEE80211_VAPS_LOCK_CHECK(_ic)
180 #endif /* #if (defined(ATH_DEBUG_SPINLOCKS)) */
181 #else
182 #define IEEE80211_VAPS_LOCK_ASSERT(_ic)
183 #define IEEE80211_VAPS_LOCK_CHECK(_ic)
184 #endif
188 * Node locking definitions.
190 #if 0
192 typedef spinlock_t ieee80211_node_lock_t;
193 #define IEEE80211_NODE_LOCK_INIT(_ni, _name) spin_lock_init(&(_ni)->ni_nodelock)
194 #define IEEE80211_NODE_LOCK_DESTROY(_ni)
195 #define IEEE80211_NODE_LOCK_IRQ(_ni) do { \
196 unsigned long __node_lockflags; \
197 IEEE80211_NODE_LOCK_CHECK(_ni); \
198 spin_lock_irqsave(&(_ni)->ni_nodelock, __node_lockflags);
199 #define IEEE80211_NODE_UNLOCK_IRQ(_ni) \
200 IEEE80211_NODE_LOCK_ASSERT(_ni); \
201 spin_unlock_irqrestore(&(_ni)->ni_nodelock, __node_lockflags); \
202 } while (0)
203 #define IEEE80211_NODE_LOCK_IRQ_INSIDE(_tq) do { \
204 IEEE80211_NODE_LOCK_CHECK(_ni); \
205 spin_lock(&(_ni)->ni_nodelock);
206 } while (0)
207 #define IEEE80211_NODE_UNLOCK_IRQ_INSIDE(_tq) do { \
208 IEEE80211_NODE_LOCK_ASSERT(_ni); \
209 spin_unlock(&(_ni)->ni_nodelock); \
210 }while (0)
211 #define IEEE80211_NODE_UNLOCK_IRQ_EARLY(_ni) \
212 IEEE80211_NODE_LOCK_ASSERT(_ni); \
213 spin_unlock_irqrestore(&(_ni)->ni_nodelock, __node_lockflags);
215 #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
216 #define IEEE80211_NODE_LOCK_ASSERT(_ni) \
217 KASSERT(spin_is_locked(&(_ni)->ni_nodelock), \
218 ("802.11 node not locked!"))
219 #if (defined(ATH_DEBUG_SPINLOCKS))
220 #define IEEE80211_NODE_LOCK_CHECK(_ni) do { \
221 if (spin_is_locked(&(_ni)->ni_nodelock)) \
222 printk("%s:%d - about to block on node lock!\n", __func__, __LINE__); \
223 } while (0)
224 #else /* #if (defined(ATH_DEBUG_SPINLOCKS)) */
225 #define IEEE80211_NODE_LOCK_CHECK(_ni)
226 #endif /* #if (defined(ATH_DEBUG_SPINLOCKS)) */
227 #else
228 #define IEEE80211_NODE_LOCK_ASSERT(_ni)
229 #define IEEE80211_NODE_LOCK_CHECK(_ni)
230 #endif
232 #endif /* node lock */
235 * Node table locking definitions.
237 typedef spinlock_t ieee80211_node_table_lock_t;
238 #define IEEE80211_NODE_TABLE_LOCK_INIT(_nt, _name) spin_lock_init(&(_nt)->nt_nodelock)
239 #define IEEE80211_NODE_TABLE_LOCK_DESTROY(_nt)
240 #if 0 /* We should always be contesting in the same contexts */
241 #define IEEE80211_NODE_TABLE_LOCK(_nt) spin_lock(&(_nt)->nt_nodelock)
242 #define IEEE80211_NODE_TABLE_UNLOCK(_nt) spin_unlock(&(_nt)->nt_nodelock)
243 #define IEEE80211_NODE_TABLE_LOCK_BH(_nt) spin_lock_bh(&(_nt)->nt_nodelock)
244 #define IEEE80211_NODE_TABLE_UNLOCK_BH(_nt) spin_unlock_bh(&(_nt)->nt_nodelock)
245 #endif
246 #define IEEE80211_NODE_TABLE_LOCK_IRQ(_nt) do { \
247 unsigned long __node_lockflags; \
248 spin_lock_irqsave(&(_nt)->nt_nodelock, __node_lockflags);
249 #define IEEE80211_NODE_TABLE_UNLOCK_IRQ(_nt) \
250 spin_unlock_irqrestore(&(_nt)->nt_nodelock, __node_lockflags); \
251 } while (0)
252 #define IEEE80211_NODE_TABLE_UNLOCK_IRQ_EARLY(_nt) \
253 spin_unlock_irqrestore(&(_nt)->nt_nodelock, __node_lockflags);
255 #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
256 #define IEEE80211_NODE_TABLE_LOCK_ASSERT(_nt) \
257 KASSERT(spin_is_locked(&(_nt)->nt_nodelock), \
258 ("802.11 node table not locked!"))
259 #else
260 #define IEEE80211_NODE_TABLE_LOCK_ASSERT(_nt)
261 #endif
264 * Node table scangen locking definitions.
266 typedef spinlock_t ieee80211_scan_lock_t;
267 #define IEEE80211_SCAN_LOCK_INIT(_nt, _name) spin_lock_init(&(_nt)->nt_scanlock)
268 #define IEEE80211_SCAN_LOCK_DESTROY(_nt)
269 #define IEEE80211_SCAN_LOCK_IRQ(_nt) do { \
270 unsigned long __scan_lockflags; \
271 spin_lock_irqsave(&(_nt)->nt_scanlock, __scan_lockflags);
272 #define IEEE80211_SCAN_UNLOCK_IRQ(_nt) \
273 spin_unlock_irqrestore(&(_nt)->nt_scanlock, __scan_lockflags); \
274 } while (0)
275 #define IEEE80211_SCAN_UNLOCK_IRQ_EARLY(_nt) \
276 spin_unlock_irqrestore(&(_nt)->nt_scanlock, __scan_lockflags);
278 #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
279 #define IEEE80211_SCAN_LOCK_ASSERT(_nt) \
280 KASSERT(spin_is_locked(&(_nt)->nt_scanlock), ("scangen not locked!"))
281 #else
282 #define IEEE80211_SCAN_LOCK_ASSERT(_nt)
283 #endif
286 * 802.1x MAC ACL database locking definitions.
288 typedef spinlock_t acl_lock_t;
289 #define ACL_LOCK_INIT(_as, _name) spin_lock_init(&(_as)->as_lock)
290 #define ACL_LOCK_DESTROY(_as)
291 #define ACL_LOCK(_as) do { \
292 ACL_LOCK_CHECK(_as); \
293 spin_lock(&(_as)->as_lock);
294 #define ACL_UNLOCK(_as) \
295 ACL_LOCK_ASSERT(_as); \
296 spin_unlock(&(_as)->as_lock); \
297 } while (0)
298 #define ACL_UNLOCK_EARLY(_as) \
299 ACL_LOCK_ASSERT(_as); \
300 spin_unlock(&(_as)->as_lock);
302 #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
303 #define ACL_LOCK_ASSERT(_as) \
304 KASSERT(spin_is_locked(&(_as)->as_lock), ("ACL not locked!"))
305 #if (defined(ATH_DEBUG_SPINLOCKS))
306 #define ACL_LOCK_CHECK(_as) do { \
307 if (spin_is_locked(&(_as)->as_lock)) \
308 printk("%s:%d - about to block on ACL lock!\n", __func__, __LINE__); \
309 } while (0)
310 #else /* #if (defined(ATH_DEBUG_SPINLOCKS)) */
311 #define ACL_LOCK_CHECK(_as)
312 #endif /* #if (defined(ATH_DEBUG_SPINLOCKS)) */
313 #else
314 #define ACL_LOCK_ASSERT(_as)
315 #define ACL_LOCK_CHECK(_as)
316 #endif
319 * Per-node power-save queue definitions. Beware of control
320 * flow with IEEE80211_NODE_SAVEQ_LOCK/IEEE80211_NODE_SAVEQ_UNLOCK.
322 #define IEEE80211_NODE_SAVEQ_INIT(_ni, _name) do { \
323 skb_queue_head_init(&(_ni)->ni_savedq); \
324 } while (0)
325 #define IEEE80211_NODE_SAVEQ_DESTROY(_ni)
326 #define IEEE80211_NODE_SAVEQ_QLEN(_ni) skb_queue_len(&(_ni)->ni_savedq)
327 #define IEEE80211_NODE_SAVEQ_LOCK_IRQ(_ni) do { \
328 unsigned long __qlockflags; \
329 IEEE80211_NODE_SAVEQ_LOCK_CHECK(_ni); \
330 spin_lock_irqsave(&(_ni)->ni_savedq.lock, __qlockflags);
331 #define IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(_ni) \
332 IEEE80211_NODE_SAVEQ_LOCK_ASSERT(_ni); \
333 spin_unlock_irqrestore(&(_ni)->ni_savedq.lock, __qlockflags); \
334 } while (0)
335 #define IEEE80211_NODE_SAVEQ_LOCK_IRQ_INSIDE(_ni) do { \
336 IEEE80211_NODE_SAVEQ_LOCK_CHECK(_ni); \
337 spin_lock(&(_ni)->ni_savedq.lock); \
338 } while (0)
339 #define IEEE80211_NODE_SAVEQ_UNLOCK_IRQ_INSIDE(_ni) do { \
340 IEEE80211_NODE_SAVEQ_LOCK_ASSERT(_ni); \
341 spin_unlock(&(_ni)->ni_savedq.lock); \
342 } while (0)
343 #define IEEE80211_NODE_SAVEQ_UNLOCK_IRQ_EARLY(_ni) \
344 IEEE80211_NODE_SAVEQ_LOCK_ASSERT(_ni); \
345 spin_unlock_irqrestore(&(_ni)->ni_savedq.lock, __qlockflags);
347 #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
348 #define IEEE80211_NODE_SAVEQ_LOCK_ASSERT(_ni) \
349 KASSERT(spin_is_locked(&(_ni)->ni_savedq.lock), \
350 ("node saveq not locked!"))
351 #if (defined(ATH_DEBUG_SPINLOCKS))
352 #define IEEE80211_NODE_SAVEQ_LOCK_CHECK(_ni) do { \
353 if (spin_is_locked(&(_ni)->ni_savedq.lock)) \
354 printk("%s:%d - about to block on node saveq lock!\n", __func__, __LINE__); \
355 } while (0)
356 #else /* #if (defined(ATH_DEBUG_SPINLOCKS)) */
357 #define IEEE80211_NODE_SAVEQ_LOCK_CHECK(_ni)
358 #endif /* #if (defined(ATH_DEBUG_SPINLOCKS)) */
359 #else
360 #define IEEE80211_NODE_SAVEQ_LOCK_ASSERT(_ni)
361 #define IEEE80211_NODE_SAVEQ_LOCK_CHECK(_ni)
362 #endif
364 /* caller MUST lock IEEE80211_NODE_SAVEQ */
365 #define IEEE80211_NODE_SAVEQ_DEQUEUE(_ni, _skb, _qlen) do { \
366 _skb = __skb_dequeue(&(_ni)->ni_savedq); \
367 (_qlen) = skb_queue_len(&(_ni)->ni_savedq); \
368 } while (0)
369 #define _IEEE80211_NODE_SAVEQ_ENQUEUE(_ni, _skb, _qlen, _age) do { \
370 struct sk_buff *tail = skb_peek_tail(&(_ni)->ni_savedq); \
371 if (tail != NULL) { \
372 _age -= M_AGE_GET(tail); \
373 __skb_queue_after(&(_ni)->ni_savedq, tail, _skb); \
374 } else { \
375 __skb_queue_head(&(_ni)->ni_savedq, _skb); \
377 M_AGE_SET(_skb, _age); \
378 (_qlen) = skb_queue_len(&(_ni)->ni_savedq); \
379 } while (0)
382 * Transmitted frames have the following information
383 * held in the sk_buff control buffer. This is used to
384 * communicate various inter-procedural state that needs
385 * to be associated with the frame for the duration of
386 * its existence.
388 * NB: sizeof(cb) == 48 and the vlan code grabs the first
389 * 8 bytes so we reserve/avoid it.
392 struct ieee80211_phy_params {
393 u_int8_t rate[4];
394 u_int8_t try[4];
396 u_int8_t power;
397 u_int32_t flags;
400 struct ieee80211_cb {
401 u_int8_t __reserved_vlan[8]; /* reserve for vlan tag info */
402 struct ieee80211_phy_params phy;
403 struct ieee80211_node *ni;
404 u_int32_t flags;
405 #define M_LINK0 0x01 /* frame needs WEP encryption */
406 #define M_FF 0x02 /* fast frame */
407 #define M_PWR_SAV 0x04 /* bypass power save handling */
408 #define M_UAPSD 0x08 /* frame flagged for u-apsd handling */
409 #define M_RAW 0x10
410 #ifdef IEEE80211_DEBUG_REFCNT
411 #define M_SKB_TRACKED 0x20
412 void (*next_destructor)(struct sk_buff *skb);
413 #endif
416 struct __assert {
417 int __ieee80211_cb_size[sizeof(struct ieee80211_cb) <= 48 ? 0 : -1];
420 struct ieee80211com;
421 struct ieee80211vap;
423 int ieee80211_load_module(const char *);
425 #define le16toh(_x) le16_to_cpu(_x)
426 #define htole16(_x) cpu_to_le16(_x)
427 #define le32toh(_x) le32_to_cpu(_x)
428 #define htole32(_x) cpu_to_le32(_x)
429 #define be32toh(_x) be32_to_cpu(_x)
430 #define htobe32(_x) cpu_to_be32(_x)
433 * Linux has no equivalents to malloc types so null these out.
435 #define MALLOC_DEFINE(type, shortdesc, longdesc)
436 #define MALLOC_DECLARE(type)
439 * flags to malloc.
441 #define M_NOWAIT 0x0001 /* do not block */
442 #define M_WAITOK 0x0002 /* ok to block */
443 #define M_ZERO 0x0100 /* bzero the allocation */
445 static __inline void *
446 ieee80211_malloc(size_t size, int flags)
448 void *p = kmalloc(size, flags & M_NOWAIT ? GFP_ATOMIC : GFP_KERNEL);
449 if (p && (flags & M_ZERO))
450 memset(p, 0, size);
451 return p;
453 #define MALLOC(_ptr, cast, _size, _type, _flags) \
454 ((_ptr) = (cast)ieee80211_malloc(_size, _flags))
455 #define FREE(addr, type) kfree((addr))
458 * This unlikely to be popular but it dramatically reduces diffs.
460 #define printf(...) printk(__VA_ARGS__)
461 struct ieee80211com;
462 extern void if_printf(struct net_device *, const char *, ...);
465 * Queue write-arounds and support routines.
467 #ifdef IEEE80211_DEBUG_REFCNT
468 #define ieee80211_getmgtframe(_ppfrm, _pktlen) \
469 ieee80211_getmgtframe_debug(_ppfrm, _pktlen, __func__, __LINE__)
470 extern struct sk_buff * ieee80211_getmgtframe_debug(u_int8_t **frm, u_int pktlen,
471 const char *func, int line);
472 #else
473 extern struct sk_buff * ieee80211_getmgtframe(u_int8_t **frm, u_int pktlen);
474 #endif
476 #define IF_ENQUEUE(_q,_skb) skb_queue_tail(_q, _skb)
477 #define IF_DEQUEUE(_q,_skb) (_skb = skb_dequeue(_q))
478 #define _IF_QLEN(_q) skb_queue_len(_q)
479 #define IF_DRAIN(_q) skb_queue_drain(_q)
480 extern void skb_queue_drain(struct sk_buff_head *q);
482 #ifndef __MOD_INC_USE_COUNT
483 #define _MOD_INC_USE(_m, _err) \
484 if (!try_module_get(_m)) { \
485 printk(KERN_WARNING "%s: try_module_get failed\n", \
486 __func__); \
487 _err; \
489 #define _MOD_DEC_USE(_m) module_put(_m)
490 #else
491 #define _MOD_INC_USE(_m, _err) MOD_INC_USE_COUNT
492 #define _MOD_DEC_USE(_m) MOD_DEC_USE_COUNT
493 #endif
495 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
496 static __inline u_int64_t
497 get_jiffies_64(void)
499 return (u_int64_t) jiffies; /* XXX not right */
501 #endif
503 /* msecs_to_jiffies appeared in 2.6.7 and 2.4.29 */
504 #include <linux/delay.h>
505 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
506 LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)) || \
507 LINUX_VERSION_CODE < KERNEL_VERSION(2,4,29)
509 /* The following definitions and inline functions are
510 * copied from the kernel src, include/linux/jiffies.h */
512 #ifndef MSEC_PER_SEC
513 #define MSEC_PER_SEC (1000L)
514 #endif
516 #ifndef MAX_JIFFY_OFFSET
517 #define MAX_JIFFY_OFFSET ((~0UL >> 1)-1)
518 #endif
520 static __inline unsigned int jiffies_to_msecs(const unsigned long j)
522 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
523 return (MSEC_PER_SEC / HZ) * j;
524 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
525 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
526 #else
527 return (j * MSEC_PER_SEC) / HZ;
528 #endif
531 static __inline unsigned long msecs_to_jiffies(const unsigned int m)
533 if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
534 return MAX_JIFFY_OFFSET;
535 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
536 return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
537 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
538 return m * (HZ / MSEC_PER_SEC);
539 #else
540 return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
541 #endif
544 #endif
546 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7)
547 #include <linux/jiffies.h>
548 #endif
550 #ifndef CLONE_KERNEL
552 * List of flags we want to share for kernel threads,
553 * if only because they are not used by them anyway.
555 #define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
556 #endif
558 #include <linux/mm.h>
559 #ifndef offset_in_page
560 #define offset_in_page(p) ((unsigned long) (p) & ~PAGE_MASK)
561 #endif
563 #ifndef module_put_and_exit
564 #define module_put_and_exit(code) do { \
565 _MOD_DEC_USE(THIS_MODULE); \
566 do_exit(code); \
567 } while (0)
568 #endif
571 * Linux uses __BIG_ENDIAN and __LITTLE_ENDIAN while BSD uses _foo
572 * and an explicit _BYTE_ORDER. Sorry, BSD got there first--define
573 * things in the BSD way...
575 #undef _LITTLE_ENDIAN
576 #define _LITTLE_ENDIAN 1234 /* LSB first: i386, vax */
577 #undef _BIG_ENDIAN
578 #define _BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net */
579 #include <asm/byteorder.h>
580 #if defined(__LITTLE_ENDIAN)
581 #define _BYTE_ORDER _LITTLE_ENDIAN
582 #elif defined(__BIG_ENDIAN)
583 #define _BYTE_ORDER _BIG_ENDIAN
584 #else
585 #error "Please fix asm/byteorder.h"
586 #endif
590 * Deal with the sysctl handler api changing.
592 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8)
593 #define IEEE80211_SYSCTL_DECL(f, ctl, write, filp, buffer, lenp, ppos) \
594 f(ctl_table *ctl, int write, struct file *filp, \
595 void __user *buffer, size_t *lenp)
596 #define IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \
597 proc_dointvec(ctl, write, filp, buffer, lenp)
598 #else
599 #define IEEE80211_SYSCTL_DECL(f, ctl, write, filp, buffer, lenp, ppos) \
600 f(ctl_table *ctl, int write, struct file *filp, \
601 void __user *buffer, size_t *lenp, loff_t *ppos)
602 #define IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \
603 proc_dointvec(ctl, write, filp, buffer, lenp, ppos)
604 #endif
606 void ieee80211_virtfs_latevattach(struct ieee80211vap *);
607 void ieee80211_virtfs_vdetach(struct ieee80211vap *);
608 int ieee80211_proc_vcreate(struct ieee80211vap *, struct file_operations *,
609 char *);
610 void ieee80211_proc_cleanup(struct ieee80211vap *);
612 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
613 #define IEEE80211_VLAN_TAG_USED 1
615 #ifndef VLAN_GROUP_ARRAY_PART_LEN
616 #define vlan_group_set_device(group, vid, dev) do { \
617 group->vlan_devices[vid] = dev; \
618 } while (0);
619 #endif
621 #else
622 #define IEEE80211_VLAN_TAG_USED 0
623 #endif
624 void ieee80211_vlan_vattach(struct ieee80211vap *);
625 void ieee80211_vlan_vdetach(struct ieee80211vap *);
627 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
628 #define free_netdev(dev) kfree(dev)
629 #endif
631 void ieee80211_ioctl_vattach(struct ieee80211vap *);
632 void ieee80211_ioctl_vdetach(struct ieee80211vap *);
633 struct ifreq;
634 int ieee80211_ioctl_create_vap(struct ieee80211com *, struct ifreq *,
635 struct net_device *);
636 struct ieee80211vap *ieee80211_create_vap(struct ieee80211com *, char *,
637 struct net_device *, int, int);
638 #endif /* _NET80211_IEEE80211_LINUX_H_ */