Import 2.3.10pre5
[davej-history.git] / include / linux / skbuff.h
blobd499875ded0852b67dfdec33a87e11e0ae0483c3
1 /*
2 * Definitions for the 'struct sk_buff' memory handlers.
4 * Authors:
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
17 #include <linux/config.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/time.h>
22 #include <asm/atomic.h>
23 #include <asm/types.h>
24 #include <asm/spinlock.h>
26 #define HAVE_ALLOC_SKB /* For the drivers to know */
27 #define HAVE_ALIGNABLE_SKB /* Ditto 8) */
28 #define SLAB_SKB /* Slabified skbuffs */
30 #define CHECKSUM_NONE 0
31 #define CHECKSUM_HW 1
32 #define CHECKSUM_UNNECESSARY 2
34 struct sk_buff_head {
35 /* These two members must be first. */
36 struct sk_buff * next;
37 struct sk_buff * prev;
39 __u32 qlen;
40 spinlock_t lock;
43 struct sk_buff {
44 /* These two members must be first. */
45 struct sk_buff * next; /* Next buffer in list */
46 struct sk_buff * prev; /* Previous buffer in list */
48 struct sk_buff_head * list; /* List we are on */
49 struct sock *sk; /* Socket we are owned by */
50 struct timeval stamp; /* Time we arrived */
51 struct device *dev; /* Device we arrived on/are leaving by */
53 /* Transport layer header */
54 union
56 struct tcphdr *th;
57 struct udphdr *uh;
58 struct icmphdr *icmph;
59 struct igmphdr *igmph;
60 struct iphdr *ipiph;
61 struct spxhdr *spxh;
62 unsigned char *raw;
63 } h;
65 /* Network layer header */
66 union
68 struct iphdr *iph;
69 struct ipv6hdr *ipv6h;
70 struct arphdr *arph;
71 struct ipxhdr *ipxh;
72 unsigned char *raw;
73 } nh;
75 /* Link layer header */
76 union
78 struct ethhdr *ethernet;
79 unsigned char *raw;
80 } mac;
82 struct dst_entry *dst;
84 char cb[48];
86 unsigned int len; /* Length of actual data */
87 unsigned int csum; /* Checksum */
88 volatile char used; /* Data moved to user and not MSG_PEEK */
89 unsigned char is_clone, /* We are a clone */
90 cloned, /* head may be cloned (check refcnt to be sure). */
91 pkt_type, /* Packet class */
92 pkt_bridged, /* Tracker for bridging */
93 ip_summed; /* Driver fed us an IP checksum */
94 __u32 priority; /* Packet queueing priority */
95 atomic_t users; /* User count - see datagram.c,tcp.c */
96 unsigned short protocol; /* Packet protocol from driver. */
97 unsigned short security; /* Security level of packet */
98 unsigned int truesize; /* Buffer size */
100 unsigned char *head; /* Head of buffer */
101 unsigned char *data; /* Data head pointer */
102 unsigned char *tail; /* Tail pointer */
103 unsigned char *end; /* End pointer */
104 void (*destructor)(struct sk_buff *); /* Destruct function */
105 #ifdef CONFIG_IP_FIREWALL
106 __u32 fwmark; /* Label made by fwchains, used by pktsched */
107 #endif
108 #if defined(CONFIG_SHAPER) || defined(CONFIG_SHAPER_MODULE)
109 __u32 shapelatency; /* Latency on frame */
110 __u32 shapeclock; /* Time it should go out */
111 __u32 shapelen; /* Frame length in clocks */
112 __u32 shapestamp; /* Stamp for shaper */
113 __u16 shapepend; /* Pending */
114 #endif
116 #if defined(CONFIG_HIPPI)
117 union{
118 __u32 ifield;
119 } private;
120 #endif
123 /* These are just the default values. This is run time configurable.
124 * FIXME: Probably the config option should go away. -- erics
126 #ifdef CONFIG_SKB_LARGE
127 #define SK_WMEM_MAX 65535
128 #define SK_RMEM_MAX 65535
129 #else
130 #define SK_WMEM_MAX 32767
131 #define SK_RMEM_MAX 32767
132 #endif
134 #ifdef __KERNEL__
136 * Handling routines are only of interest to the kernel
138 #include <linux/malloc.h>
140 #include <asm/system.h>
142 extern void __kfree_skb(struct sk_buff *skb);
143 extern void skb_queue_head_init(struct sk_buff_head *list);
144 extern void skb_queue_head(struct sk_buff_head *list,struct sk_buff *buf);
145 extern void skb_queue_tail(struct sk_buff_head *list,struct sk_buff *buf);
146 extern struct sk_buff * skb_dequeue(struct sk_buff_head *list);
147 extern void skb_insert(struct sk_buff *old,struct sk_buff *newsk);
148 extern void skb_append(struct sk_buff *old,struct sk_buff *newsk);
149 extern void skb_unlink(struct sk_buff *buf);
150 extern __u32 skb_queue_len(struct sk_buff_head *list);
151 extern struct sk_buff * skb_peek_copy(struct sk_buff_head *list);
152 extern struct sk_buff * alloc_skb(unsigned int size, int priority);
153 extern struct sk_buff * dev_alloc_skb(unsigned int size);
154 extern void kfree_skbmem(struct sk_buff *skb);
155 extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
156 extern struct sk_buff * skb_copy(struct sk_buff *skb, int priority);
157 extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, int newheadroom);
158 #define dev_kfree_skb(a) kfree_skb(a)
159 extern unsigned char * skb_put(struct sk_buff *skb, unsigned int len);
160 extern unsigned char * skb_push(struct sk_buff *skb, unsigned int len);
161 extern unsigned char * skb_pull(struct sk_buff *skb, unsigned int len);
162 extern int skb_headroom(struct sk_buff *skb);
163 extern int skb_tailroom(struct sk_buff *skb);
164 extern void skb_reserve(struct sk_buff *skb, unsigned int len);
165 extern void skb_trim(struct sk_buff *skb, unsigned int len);
166 extern void skb_over_panic(struct sk_buff *skb, int len, void *here);
167 extern void skb_under_panic(struct sk_buff *skb, int len, void *here);
169 /* Internal */
170 extern __inline__ atomic_t *skb_datarefp(struct sk_buff *skb)
172 return (atomic_t *)(skb->end);
175 extern __inline__ int skb_queue_empty(struct sk_buff_head *list)
177 return (list->next == (struct sk_buff *) list);
180 extern __inline__ void kfree_skb(struct sk_buff *skb)
182 if (atomic_dec_and_test(&skb->users))
183 __kfree_skb(skb);
186 /* Use this if you didn't touch the skb state [for fast switching] */
187 extern __inline__ void kfree_skb_fast(struct sk_buff *skb)
189 if (atomic_dec_and_test(&skb->users))
190 kfree_skbmem(skb);
193 extern __inline__ int skb_cloned(struct sk_buff *skb)
195 return skb->cloned && atomic_read(skb_datarefp(skb)) != 1;
198 extern __inline__ int skb_shared(struct sk_buff *skb)
200 return (atomic_read(&skb->users) != 1);
204 * Copy shared buffers into a new sk_buff. We effectively do COW on
205 * packets to handle cases where we have a local reader and forward
206 * and a couple of other messy ones. The normal one is tcpdumping
207 * a packet thats being forwarded.
210 extern __inline__ struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
212 struct sk_buff *nskb;
213 if(!skb_cloned(skb))
214 return skb;
215 nskb=skb_copy(skb, pri);
216 kfree_skb(skb); /* Free our shared copy */
217 return nskb;
221 * Peek an sk_buff. Unlike most other operations you _MUST_
222 * be careful with this one. A peek leaves the buffer on the
223 * list and someone else may run off with it. For an interrupt
224 * type system cli() peek the buffer copy the data and sti();
227 extern __inline__ struct sk_buff *skb_peek(struct sk_buff_head *list_)
229 struct sk_buff *list = ((struct sk_buff *)list_)->next;
230 if (list == (struct sk_buff *)list_)
231 list = NULL;
232 return list;
235 extern __inline__ struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
237 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
238 if (list == (struct sk_buff *)list_)
239 list = NULL;
240 return list;
244 * Return the length of an sk_buff queue
247 extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_)
249 return(list_->qlen);
252 extern __inline__ void skb_queue_head_init(struct sk_buff_head *list)
254 spin_lock_init(&list->lock);
255 list->prev = (struct sk_buff *)list;
256 list->next = (struct sk_buff *)list;
257 list->qlen = 0;
261 * Insert an sk_buff at the start of a list.
263 * The "__skb_xxxx()" functions are the non-atomic ones that
264 * can only be called with interrupts disabled.
267 extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
269 struct sk_buff *prev, *next;
271 newsk->list = list;
272 list->qlen++;
273 prev = (struct sk_buff *)list;
274 next = prev->next;
275 newsk->next = next;
276 newsk->prev = prev;
277 next->prev = newsk;
278 prev->next = newsk;
281 extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
283 unsigned long flags;
285 spin_lock_irqsave(&list->lock, flags);
286 __skb_queue_head(list, newsk);
287 spin_unlock_irqrestore(&list->lock, flags);
291 * Insert an sk_buff at the end of a list.
294 extern __inline__ void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
296 struct sk_buff *prev, *next;
298 newsk->list = list;
299 list->qlen++;
300 next = (struct sk_buff *)list;
301 prev = next->prev;
302 newsk->next = next;
303 newsk->prev = prev;
304 next->prev = newsk;
305 prev->next = newsk;
308 extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
310 unsigned long flags;
312 spin_lock_irqsave(&list->lock, flags);
313 __skb_queue_tail(list, newsk);
314 spin_unlock_irqrestore(&list->lock, flags);
318 * Remove an sk_buff from a list.
321 extern __inline__ struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
323 struct sk_buff *next, *prev, *result;
325 prev = (struct sk_buff *) list;
326 next = prev->next;
327 result = NULL;
328 if (next != prev) {
329 result = next;
330 next = next->next;
331 list->qlen--;
332 next->prev = prev;
333 prev->next = next;
334 result->next = NULL;
335 result->prev = NULL;
336 result->list = NULL;
338 return result;
341 extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
343 long flags;
344 struct sk_buff *result;
346 spin_lock_irqsave(&list->lock, flags);
347 result = __skb_dequeue(list);
348 spin_unlock_irqrestore(&list->lock, flags);
349 return result;
353 * Insert a packet on a list.
356 extern __inline__ void __skb_insert(struct sk_buff *newsk,
357 struct sk_buff * prev, struct sk_buff *next,
358 struct sk_buff_head * list)
360 newsk->next = next;
361 newsk->prev = prev;
362 next->prev = newsk;
363 prev->next = newsk;
364 newsk->list = list;
365 list->qlen++;
369 * Place a packet before a given packet in a list
371 extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
373 unsigned long flags;
375 spin_lock_irqsave(&old->list->lock, flags);
376 __skb_insert(newsk, old->prev, old, old->list);
377 spin_unlock_irqrestore(&old->list->lock, flags);
381 * Place a packet after a given packet in a list.
384 extern __inline__ void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
386 __skb_insert(newsk, old, old->next, old->list);
389 extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
391 unsigned long flags;
393 spin_lock_irqsave(&old->list->lock, flags);
394 __skb_append(old, newsk);
395 spin_unlock_irqrestore(&old->list->lock, flags);
399 * remove sk_buff from list. _Must_ be called atomically, and with
400 * the list known..
402 extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
404 struct sk_buff * next, * prev;
406 list->qlen--;
407 next = skb->next;
408 prev = skb->prev;
409 skb->next = NULL;
410 skb->prev = NULL;
411 skb->list = NULL;
412 next->prev = prev;
413 prev->next = next;
417 * Remove an sk_buff from its list. Works even without knowing the list it
418 * is sitting on, which can be handy at times. It also means that THE LIST
419 * MUST EXIST when you unlink. Thus a list must have its contents unlinked
420 * _FIRST_.
423 extern __inline__ void skb_unlink(struct sk_buff *skb)
425 struct sk_buff_head *list = skb->list;
427 if(list) {
428 unsigned long flags;
430 spin_lock_irqsave(&list->lock, flags);
431 if(skb->list == list)
432 __skb_unlink(skb, skb->list);
433 spin_unlock_irqrestore(&list->lock, flags);
437 /* XXX: more streamlined implementation */
438 extern __inline__ struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
440 struct sk_buff *skb = skb_peek_tail(list);
441 if (skb)
442 __skb_unlink(skb, list);
443 return skb;
446 extern __inline__ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
448 long flags;
449 struct sk_buff *result;
451 spin_lock_irqsave(&list->lock, flags);
452 result = __skb_dequeue_tail(list);
453 spin_unlock_irqrestore(&list->lock, flags);
454 return result;
458 * Add data to an sk_buff
461 extern __inline__ unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
463 unsigned char *tmp=skb->tail;
464 skb->tail+=len;
465 skb->len+=len;
466 return tmp;
469 extern __inline__ unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
471 unsigned char *tmp=skb->tail;
472 skb->tail+=len;
473 skb->len+=len;
474 if(skb->tail>skb->end) {
475 skb_over_panic(skb, len, current_text_addr());
477 return tmp;
480 extern __inline__ unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
482 skb->data-=len;
483 skb->len+=len;
484 return skb->data;
487 extern __inline__ unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
489 skb->data-=len;
490 skb->len+=len;
491 if(skb->data<skb->head) {
492 skb_under_panic(skb, len, current_text_addr());
494 return skb->data;
497 extern __inline__ char *__skb_pull(struct sk_buff *skb, unsigned int len)
499 skb->len-=len;
500 return skb->data+=len;
503 extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
505 if (len > skb->len)
506 return NULL;
507 return __skb_pull(skb,len);
510 extern __inline__ int skb_headroom(struct sk_buff *skb)
512 return skb->data-skb->head;
515 extern __inline__ int skb_tailroom(struct sk_buff *skb)
517 return skb->end-skb->tail;
520 extern __inline__ void skb_reserve(struct sk_buff *skb, unsigned int len)
522 skb->data+=len;
523 skb->tail+=len;
526 extern __inline__ void __skb_trim(struct sk_buff *skb, unsigned int len)
528 skb->len = len;
529 skb->tail = skb->data+len;
532 extern __inline__ void skb_trim(struct sk_buff *skb, unsigned int len)
534 if (skb->len > len) {
535 __skb_trim(skb, len);
539 extern __inline__ void skb_orphan(struct sk_buff *skb)
541 if (skb->destructor)
542 skb->destructor(skb);
543 skb->destructor = NULL;
544 skb->sk = NULL;
547 extern __inline__ void skb_queue_purge(struct sk_buff_head *list)
549 struct sk_buff *skb;
550 while ((skb=skb_dequeue(list))!=NULL)
551 kfree_skb(skb);
554 extern __inline__ struct sk_buff *dev_alloc_skb(unsigned int length)
556 struct sk_buff *skb;
558 skb = alloc_skb(length+16, GFP_ATOMIC);
559 if (skb)
560 skb_reserve(skb,16);
561 return skb;
564 extern __inline__ struct sk_buff *
565 skb_cow(struct sk_buff *skb, unsigned int headroom)
567 headroom = (headroom+15)&~15;
569 if ((unsigned)skb_headroom(skb) < headroom || skb_cloned(skb)) {
570 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
571 kfree_skb(skb);
572 skb = skb2;
574 return skb;
577 extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
578 extern unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);
579 extern int skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size);
580 extern int skb_copy_datagram_iovec(struct sk_buff *from, int offset, struct iovec *to,int size);
581 extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);
583 extern void skb_init(void);
584 extern void skb_add_mtu(int mtu);
586 #endif /* __KERNEL__ */
587 #endif /* _LINUX_SKBUFF_H */