Import 2.3.18pre1
[davej-history.git] / include / linux / skbuff.h
blob27232bd5bb8e33864a7a9fe9ffc3d4a8cc349da9
1 /*
2 * Definitions for the 'struct sk_buff' memory handlers.
4 * Authors:
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
17 #include <linux/config.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/time.h>
22 #include <asm/atomic.h>
23 #include <asm/types.h>
24 #include <linux/spinlock.h>
26 #define HAVE_ALLOC_SKB /* For the drivers to know */
27 #define HAVE_ALIGNABLE_SKB /* Ditto 8) */
28 #define SLAB_SKB /* Slabified skbuffs */
30 #define CHECKSUM_NONE 0
31 #define CHECKSUM_HW 1
32 #define CHECKSUM_UNNECESSARY 2
34 #ifdef __i386__
35 #define NET_CALLER(arg) (*(((void**)&arg)-1))
36 #else
37 #define NET_CALLER(arg) __builtin_return_address(0)
38 #endif
40 struct sk_buff_head {
41 /* These two members must be first. */
42 struct sk_buff * next;
43 struct sk_buff * prev;
45 __u32 qlen;
46 spinlock_t lock;
49 struct sk_buff {
50 /* These two members must be first. */
51 struct sk_buff * next; /* Next buffer in list */
52 struct sk_buff * prev; /* Previous buffer in list */
54 struct sk_buff_head * list; /* List we are on */
55 struct sock *sk; /* Socket we are owned by */
56 struct timeval stamp; /* Time we arrived */
57 struct net_device *dev; /* Device we arrived on/are leaving by */
58 struct net_device *rx_dev;
60 /* Transport layer header */
61 union
63 struct tcphdr *th;
64 struct udphdr *uh;
65 struct icmphdr *icmph;
66 struct igmphdr *igmph;
67 struct iphdr *ipiph;
68 struct spxhdr *spxh;
69 unsigned char *raw;
70 } h;
72 /* Network layer header */
73 union
75 struct iphdr *iph;
76 struct ipv6hdr *ipv6h;
77 struct arphdr *arph;
78 struct ipxhdr *ipxh;
79 unsigned char *raw;
80 } nh;
82 /* Link layer header */
83 union
85 struct ethhdr *ethernet;
86 unsigned char *raw;
87 } mac;
89 struct dst_entry *dst;
91 char cb[48];
93 unsigned int len; /* Length of actual data */
94 unsigned int csum; /* Checksum */
95 volatile char used; /* Data moved to user and not MSG_PEEK */
96 unsigned char is_clone, /* We are a clone */
97 cloned, /* head may be cloned (check refcnt to be sure). */
98 pkt_type, /* Packet class */
99 pkt_bridged, /* Tracker for bridging */
100 ip_summed; /* Driver fed us an IP checksum */
101 __u32 priority; /* Packet queueing priority */
102 atomic_t users; /* User count - see datagram.c,tcp.c */
103 unsigned short protocol; /* Packet protocol from driver. */
104 unsigned short security; /* Security level of packet */
105 unsigned int truesize; /* Buffer size */
107 unsigned char *head; /* Head of buffer */
108 unsigned char *data; /* Data head pointer */
109 unsigned char *tail; /* Tail pointer */
110 unsigned char *end; /* End pointer */
111 void (*destructor)(struct sk_buff *); /* Destruct function */
112 #ifdef CONFIG_NETFILTER
113 /* Can be used for communication between hooks. */
114 unsigned long nfmark;
115 /* Reason for doing this to the packet (see netfilter.h) */
116 __u32 nfreason;
117 /* Cache info */
118 __u32 nfcache;
119 #ifdef CONFIG_NETFILTER_DEBUG
120 unsigned int nf_debug;
121 #endif
122 #endif /*CONFIG_NETFILTER*/
123 #if defined(CONFIG_SHAPER) || defined(CONFIG_SHAPER_MODULE)
124 __u32 shapelatency; /* Latency on frame */
125 __u32 shapeclock; /* Time it should go out */
126 __u32 shapelen; /* Frame length in clocks */
127 __u32 shapestamp; /* Stamp for shaper */
128 __u16 shapepend; /* Pending */
129 #endif
131 #if defined(CONFIG_HIPPI)
132 union{
133 __u32 ifield;
134 } private;
135 #endif
138 /* These are just the default values. This is run time configurable.
139 * FIXME: Probably the config option should go away. -- erics
141 #ifdef CONFIG_SKB_LARGE
142 #define SK_WMEM_MAX 65535
143 #define SK_RMEM_MAX 65535
144 #else
145 #define SK_WMEM_MAX 32767
146 #define SK_RMEM_MAX 32767
147 #endif
149 #ifdef __KERNEL__
151 * Handling routines are only of interest to the kernel
153 #include <linux/malloc.h>
155 #include <asm/system.h>
157 extern void __kfree_skb(struct sk_buff *skb);
158 extern void skb_queue_head_init(struct sk_buff_head *list);
159 extern void skb_queue_head(struct sk_buff_head *list,struct sk_buff *buf);
160 extern void skb_queue_tail(struct sk_buff_head *list,struct sk_buff *buf);
161 extern struct sk_buff * skb_dequeue(struct sk_buff_head *list);
162 extern void skb_insert(struct sk_buff *old,struct sk_buff *newsk);
163 extern void skb_append(struct sk_buff *old,struct sk_buff *newsk);
164 extern void skb_unlink(struct sk_buff *buf);
165 extern __u32 skb_queue_len(struct sk_buff_head *list);
166 extern struct sk_buff * skb_peek_copy(struct sk_buff_head *list);
167 extern struct sk_buff * alloc_skb(unsigned int size, int priority);
168 extern struct sk_buff * dev_alloc_skb(unsigned int size);
169 extern void kfree_skbmem(struct sk_buff *skb);
170 extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
171 extern struct sk_buff * skb_copy(struct sk_buff *skb, int priority);
172 extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, int newheadroom);
173 #define dev_kfree_skb(a) kfree_skb(a)
174 extern unsigned char * skb_put(struct sk_buff *skb, unsigned int len);
175 extern unsigned char * skb_push(struct sk_buff *skb, unsigned int len);
176 extern unsigned char * skb_pull(struct sk_buff *skb, unsigned int len);
177 extern int skb_headroom(struct sk_buff *skb);
178 extern int skb_tailroom(struct sk_buff *skb);
179 extern void skb_reserve(struct sk_buff *skb, unsigned int len);
180 extern void skb_trim(struct sk_buff *skb, unsigned int len);
181 extern void skb_over_panic(struct sk_buff *skb, int len, void *here);
182 extern void skb_under_panic(struct sk_buff *skb, int len, void *here);
184 /* Internal */
185 extern __inline__ atomic_t *skb_datarefp(struct sk_buff *skb)
187 return (atomic_t *)(skb->end);
190 extern __inline__ int skb_queue_empty(struct sk_buff_head *list)
192 return (list->next == (struct sk_buff *) list);
195 extern __inline__ void kfree_skb(struct sk_buff *skb)
197 if (atomic_dec_and_test(&skb->users))
198 __kfree_skb(skb);
201 /* Use this if you didn't touch the skb state [for fast switching] */
202 extern __inline__ void kfree_skb_fast(struct sk_buff *skb)
204 if (atomic_dec_and_test(&skb->users))
205 kfree_skbmem(skb);
208 extern __inline__ int skb_cloned(struct sk_buff *skb)
210 return skb->cloned && atomic_read(skb_datarefp(skb)) != 1;
213 extern __inline__ int skb_shared(struct sk_buff *skb)
215 return (atomic_read(&skb->users) != 1);
218 extern __inline__ struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
220 if (skb_shared(skb)) {
221 struct sk_buff *nskb;
222 nskb = skb_clone(skb, pri);
223 kfree_skb(skb);
224 return nskb;
226 return skb;
231 * Copy shared buffers into a new sk_buff. We effectively do COW on
232 * packets to handle cases where we have a local reader and forward
233 * and a couple of other messy ones. The normal one is tcpdumping
234 * a packet thats being forwarded.
237 extern __inline__ struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
239 struct sk_buff *nskb;
240 if(!skb_cloned(skb))
241 return skb;
242 nskb=skb_copy(skb, pri);
243 kfree_skb(skb); /* Free our shared copy */
244 return nskb;
248 * Peek an sk_buff. Unlike most other operations you _MUST_
249 * be careful with this one. A peek leaves the buffer on the
250 * list and someone else may run off with it. For an interrupt
251 * type system cli() peek the buffer copy the data and sti();
254 extern __inline__ struct sk_buff *skb_peek(struct sk_buff_head *list_)
256 struct sk_buff *list = ((struct sk_buff *)list_)->next;
257 if (list == (struct sk_buff *)list_)
258 list = NULL;
259 return list;
262 extern __inline__ struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
264 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
265 if (list == (struct sk_buff *)list_)
266 list = NULL;
267 return list;
271 * Return the length of an sk_buff queue
274 extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_)
276 return(list_->qlen);
279 extern __inline__ void skb_queue_head_init(struct sk_buff_head *list)
281 spin_lock_init(&list->lock);
282 list->prev = (struct sk_buff *)list;
283 list->next = (struct sk_buff *)list;
284 list->qlen = 0;
288 * Insert an sk_buff at the start of a list.
290 * The "__skb_xxxx()" functions are the non-atomic ones that
291 * can only be called with interrupts disabled.
294 extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
296 struct sk_buff *prev, *next;
298 newsk->list = list;
299 list->qlen++;
300 prev = (struct sk_buff *)list;
301 next = prev->next;
302 newsk->next = next;
303 newsk->prev = prev;
304 next->prev = newsk;
305 prev->next = newsk;
308 extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
310 unsigned long flags;
312 spin_lock_irqsave(&list->lock, flags);
313 __skb_queue_head(list, newsk);
314 spin_unlock_irqrestore(&list->lock, flags);
318 * Insert an sk_buff at the end of a list.
321 extern __inline__ void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
323 struct sk_buff *prev, *next;
325 newsk->list = list;
326 list->qlen++;
327 next = (struct sk_buff *)list;
328 prev = next->prev;
329 newsk->next = next;
330 newsk->prev = prev;
331 next->prev = newsk;
332 prev->next = newsk;
335 extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
337 unsigned long flags;
339 spin_lock_irqsave(&list->lock, flags);
340 __skb_queue_tail(list, newsk);
341 spin_unlock_irqrestore(&list->lock, flags);
345 * Remove an sk_buff from a list.
348 extern __inline__ struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
350 struct sk_buff *next, *prev, *result;
352 prev = (struct sk_buff *) list;
353 next = prev->next;
354 result = NULL;
355 if (next != prev) {
356 result = next;
357 next = next->next;
358 list->qlen--;
359 next->prev = prev;
360 prev->next = next;
361 result->next = NULL;
362 result->prev = NULL;
363 result->list = NULL;
365 return result;
368 extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
370 long flags;
371 struct sk_buff *result;
373 spin_lock_irqsave(&list->lock, flags);
374 result = __skb_dequeue(list);
375 spin_unlock_irqrestore(&list->lock, flags);
376 return result;
380 * Insert a packet on a list.
383 extern __inline__ void __skb_insert(struct sk_buff *newsk,
384 struct sk_buff * prev, struct sk_buff *next,
385 struct sk_buff_head * list)
387 newsk->next = next;
388 newsk->prev = prev;
389 next->prev = newsk;
390 prev->next = newsk;
391 newsk->list = list;
392 list->qlen++;
396 * Place a packet before a given packet in a list
398 extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
400 unsigned long flags;
402 spin_lock_irqsave(&old->list->lock, flags);
403 __skb_insert(newsk, old->prev, old, old->list);
404 spin_unlock_irqrestore(&old->list->lock, flags);
408 * Place a packet after a given packet in a list.
411 extern __inline__ void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
413 __skb_insert(newsk, old, old->next, old->list);
416 extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
418 unsigned long flags;
420 spin_lock_irqsave(&old->list->lock, flags);
421 __skb_append(old, newsk);
422 spin_unlock_irqrestore(&old->list->lock, flags);
426 * remove sk_buff from list. _Must_ be called atomically, and with
427 * the list known..
429 extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
431 struct sk_buff * next, * prev;
433 list->qlen--;
434 next = skb->next;
435 prev = skb->prev;
436 skb->next = NULL;
437 skb->prev = NULL;
438 skb->list = NULL;
439 next->prev = prev;
440 prev->next = next;
444 * Remove an sk_buff from its list. Works even without knowing the list it
445 * is sitting on, which can be handy at times. It also means that THE LIST
446 * MUST EXIST when you unlink. Thus a list must have its contents unlinked
447 * _FIRST_.
450 extern __inline__ void skb_unlink(struct sk_buff *skb)
452 struct sk_buff_head *list = skb->list;
454 if(list) {
455 unsigned long flags;
457 spin_lock_irqsave(&list->lock, flags);
458 if(skb->list == list)
459 __skb_unlink(skb, skb->list);
460 spin_unlock_irqrestore(&list->lock, flags);
464 /* XXX: more streamlined implementation */
465 extern __inline__ struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
467 struct sk_buff *skb = skb_peek_tail(list);
468 if (skb)
469 __skb_unlink(skb, list);
470 return skb;
473 extern __inline__ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
475 long flags;
476 struct sk_buff *result;
478 spin_lock_irqsave(&list->lock, flags);
479 result = __skb_dequeue_tail(list);
480 spin_unlock_irqrestore(&list->lock, flags);
481 return result;
485 * Add data to an sk_buff
488 extern __inline__ unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
490 unsigned char *tmp=skb->tail;
491 skb->tail+=len;
492 skb->len+=len;
493 return tmp;
496 extern __inline__ unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
498 unsigned char *tmp=skb->tail;
499 skb->tail+=len;
500 skb->len+=len;
501 if(skb->tail>skb->end) {
502 skb_over_panic(skb, len, current_text_addr());
504 return tmp;
507 extern __inline__ unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
509 skb->data-=len;
510 skb->len+=len;
511 return skb->data;
514 extern __inline__ unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
516 skb->data-=len;
517 skb->len+=len;
518 if(skb->data<skb->head) {
519 skb_under_panic(skb, len, current_text_addr());
521 return skb->data;
524 extern __inline__ char *__skb_pull(struct sk_buff *skb, unsigned int len)
526 skb->len-=len;
527 return skb->data+=len;
530 extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
532 if (len > skb->len)
533 return NULL;
534 return __skb_pull(skb,len);
537 extern __inline__ int skb_headroom(struct sk_buff *skb)
539 return skb->data-skb->head;
542 extern __inline__ int skb_tailroom(struct sk_buff *skb)
544 return skb->end-skb->tail;
547 extern __inline__ void skb_reserve(struct sk_buff *skb, unsigned int len)
549 skb->data+=len;
550 skb->tail+=len;
553 extern __inline__ void __skb_trim(struct sk_buff *skb, unsigned int len)
555 skb->len = len;
556 skb->tail = skb->data+len;
559 extern __inline__ void skb_trim(struct sk_buff *skb, unsigned int len)
561 if (skb->len > len) {
562 __skb_trim(skb, len);
566 extern __inline__ void skb_orphan(struct sk_buff *skb)
568 if (skb->destructor)
569 skb->destructor(skb);
570 skb->destructor = NULL;
571 skb->sk = NULL;
574 extern __inline__ void skb_queue_purge(struct sk_buff_head *list)
576 struct sk_buff *skb;
577 while ((skb=skb_dequeue(list))!=NULL)
578 kfree_skb(skb);
581 extern __inline__ void __skb_queue_purge(struct sk_buff_head *list)
583 struct sk_buff *skb;
584 while ((skb=__skb_dequeue(list))!=NULL)
585 kfree_skb(skb);
588 extern __inline__ struct sk_buff *dev_alloc_skb(unsigned int length)
590 struct sk_buff *skb;
592 skb = alloc_skb(length+16, GFP_ATOMIC);
593 if (skb)
594 skb_reserve(skb,16);
595 return skb;
598 extern __inline__ struct sk_buff *
599 skb_cow(struct sk_buff *skb, unsigned int headroom)
601 headroom = (headroom+15)&~15;
603 if ((unsigned)skb_headroom(skb) < headroom || skb_cloned(skb)) {
604 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
605 kfree_skb(skb);
606 skb = skb2;
608 return skb;
611 extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
612 extern unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);
613 extern int skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size);
614 extern int skb_copy_datagram_iovec(struct sk_buff *from, int offset, struct iovec *to,int size);
615 extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);
617 extern void skb_init(void);
618 extern void skb_add_mtu(int mtu);
620 #endif /* __KERNEL__ */
621 #endif /* _LINUX_SKBUFF_H */