2 * Definitions for the 'struct sk_buff' memory handlers.
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
17 #include <linux/config.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/time.h>
22 #include <asm/atomic.h>
23 #include <asm/types.h>
24 #include <asm/spinlock.h>
26 #define HAVE_ALLOC_SKB /* For the drivers to know */
27 #define HAVE_ALIGNABLE_SKB /* Ditto 8) */
28 #define SLAB_SKB /* Slabified skbuffs */
30 #define CHECKSUM_NONE 0
32 #define CHECKSUM_UNNECESSARY 2
35 /* These two members must be first. */
36 struct sk_buff
* next
;
37 struct sk_buff
* prev
;
44 /* These two members must be first. */
45 struct sk_buff
* next
; /* Next buffer in list */
46 struct sk_buff
* prev
; /* Previous buffer in list */
48 struct sk_buff_head
* list
; /* List we are on */
49 struct sock
*sk
; /* Socket we are owned by */
50 struct timeval stamp
; /* Time we arrived */
51 struct device
*dev
; /* Device we arrived on/are leaving by */
53 /* Transport layer header */
58 struct icmphdr
*icmph
;
59 struct igmphdr
*igmph
;
65 /* Network layer header */
69 struct ipv6hdr
*ipv6h
;
75 /* Link layer header */
78 struct ethhdr
*ethernet
;
82 struct dst_entry
*dst
;
86 unsigned int len
; /* Length of actual data */
87 unsigned int csum
; /* Checksum */
88 volatile char used
; /* Data moved to user and not MSG_PEEK */
89 unsigned char is_clone
, /* We are a clone */
90 cloned
, /* head may be cloned (check refcnt to be sure). */
91 pkt_type
, /* Packet class */
92 pkt_bridged
, /* Tracker for bridging */
93 ip_summed
; /* Driver fed us an IP checksum */
94 __u32 priority
; /* Packet queueing priority */
95 atomic_t users
; /* User count - see datagram.c,tcp.c */
96 unsigned short protocol
; /* Packet protocol from driver. */
97 unsigned short security
; /* Security level of packet */
98 unsigned int truesize
; /* Buffer size */
100 unsigned char *head
; /* Head of buffer */
101 unsigned char *data
; /* Data head pointer */
102 unsigned char *tail
; /* Tail pointer */
103 unsigned char *end
; /* End pointer */
104 void (*destructor
)(struct sk_buff
*); /* Destruct function */
105 #ifdef CONFIG_IP_FIREWALL
106 __u32 fwmark
; /* Label made by fwchains, used by pktsched */
108 #if defined(CONFIG_SHAPER) || defined(CONFIG_SHAPER_MODULE)
109 __u32 shapelatency
; /* Latency on frame */
110 __u32 shapeclock
; /* Time it should go out */
111 __u32 shapelen
; /* Frame length in clocks */
112 __u32 shapestamp
; /* Stamp for shaper */
113 __u16 shapepend
; /* Pending */
116 #if defined(CONFIG_HIPPI)
123 /* These are just the default values. This is run time configurable.
124 * FIXME: Probably the config option should go away. -- erics
126 #ifdef CONFIG_SKB_LARGE
127 #define SK_WMEM_MAX 65535
128 #define SK_RMEM_MAX 65535
130 #define SK_WMEM_MAX 32767
131 #define SK_RMEM_MAX 32767
136 * Handling routines are only of interest to the kernel
138 #include <linux/malloc.h>
140 #include <asm/system.h>
142 extern void __kfree_skb(struct sk_buff
*skb
);
143 extern void skb_queue_head_init(struct sk_buff_head
*list
);
144 extern void skb_queue_head(struct sk_buff_head
*list
,struct sk_buff
*buf
);
145 extern void skb_queue_tail(struct sk_buff_head
*list
,struct sk_buff
*buf
);
146 extern struct sk_buff
* skb_dequeue(struct sk_buff_head
*list
);
147 extern void skb_insert(struct sk_buff
*old
,struct sk_buff
*newsk
);
148 extern void skb_append(struct sk_buff
*old
,struct sk_buff
*newsk
);
149 extern void skb_unlink(struct sk_buff
*buf
);
150 extern __u32
skb_queue_len(struct sk_buff_head
*list
);
151 extern struct sk_buff
* skb_peek_copy(struct sk_buff_head
*list
);
152 extern struct sk_buff
* alloc_skb(unsigned int size
, int priority
);
153 extern struct sk_buff
* dev_alloc_skb(unsigned int size
);
154 extern void kfree_skbmem(struct sk_buff
*skb
);
155 extern struct sk_buff
* skb_clone(struct sk_buff
*skb
, int priority
);
156 extern struct sk_buff
* skb_copy(struct sk_buff
*skb
, int priority
);
157 extern struct sk_buff
* skb_realloc_headroom(struct sk_buff
*skb
, int newheadroom
);
158 #define dev_kfree_skb(a) kfree_skb(a)
159 extern unsigned char * skb_put(struct sk_buff
*skb
, unsigned int len
);
160 extern unsigned char * skb_push(struct sk_buff
*skb
, unsigned int len
);
161 extern unsigned char * skb_pull(struct sk_buff
*skb
, unsigned int len
);
162 extern int skb_headroom(struct sk_buff
*skb
);
163 extern int skb_tailroom(struct sk_buff
*skb
);
164 extern void skb_reserve(struct sk_buff
*skb
, unsigned int len
);
165 extern void skb_trim(struct sk_buff
*skb
, unsigned int len
);
166 extern void skb_over_panic(struct sk_buff
*skb
, int len
, void *here
);
167 extern void skb_under_panic(struct sk_buff
*skb
, int len
, void *here
);
170 extern __inline__ atomic_t
*skb_datarefp(struct sk_buff
*skb
)
172 return (atomic_t
*)(skb
->end
);
175 extern __inline__
int skb_queue_empty(struct sk_buff_head
*list
)
177 return (list
->next
== (struct sk_buff
*) list
);
180 extern __inline__
void kfree_skb(struct sk_buff
*skb
)
182 if (atomic_dec_and_test(&skb
->users
))
186 /* Use this if you didn't touch the skb state [for fast switching] */
187 extern __inline__
void kfree_skb_fast(struct sk_buff
*skb
)
189 if (atomic_dec_and_test(&skb
->users
))
193 extern __inline__
int skb_cloned(struct sk_buff
*skb
)
195 return skb
->cloned
&& atomic_read(skb_datarefp(skb
)) != 1;
198 extern __inline__
int skb_shared(struct sk_buff
*skb
)
200 return (atomic_read(&skb
->users
) != 1);
204 * Copy shared buffers into a new sk_buff. We effectively do COW on
205 * packets to handle cases where we have a local reader and forward
206 * and a couple of other messy ones. The normal one is tcpdumping
207 * a packet thats being forwarded.
210 extern __inline__
struct sk_buff
*skb_unshare(struct sk_buff
*skb
, int pri
)
212 struct sk_buff
*nskb
;
215 nskb
=skb_copy(skb
, pri
);
216 kfree_skb(skb
); /* Free our shared copy */
221 * Peek an sk_buff. Unlike most other operations you _MUST_
222 * be careful with this one. A peek leaves the buffer on the
223 * list and someone else may run off with it. For an interrupt
224 * type system cli() peek the buffer copy the data and sti();
227 extern __inline__
struct sk_buff
*skb_peek(struct sk_buff_head
*list_
)
229 struct sk_buff
*list
= ((struct sk_buff
*)list_
)->next
;
230 if (list
== (struct sk_buff
*)list_
)
235 extern __inline__
struct sk_buff
*skb_peek_tail(struct sk_buff_head
*list_
)
237 struct sk_buff
*list
= ((struct sk_buff
*)list_
)->prev
;
238 if (list
== (struct sk_buff
*)list_
)
244 * Return the length of an sk_buff queue
247 extern __inline__ __u32
skb_queue_len(struct sk_buff_head
*list_
)
252 extern __inline__
void skb_queue_head_init(struct sk_buff_head
*list
)
254 spin_lock_init(&list
->lock
);
255 list
->prev
= (struct sk_buff
*)list
;
256 list
->next
= (struct sk_buff
*)list
;
261 * Insert an sk_buff at the start of a list.
263 * The "__skb_xxxx()" functions are the non-atomic ones that
264 * can only be called with interrupts disabled.
267 extern __inline__
void __skb_queue_head(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
269 struct sk_buff
*prev
, *next
;
273 prev
= (struct sk_buff
*)list
;
281 extern __inline__
void skb_queue_head(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
285 spin_lock_irqsave(&list
->lock
, flags
);
286 __skb_queue_head(list
, newsk
);
287 spin_unlock_irqrestore(&list
->lock
, flags
);
291 * Insert an sk_buff at the end of a list.
294 extern __inline__
void __skb_queue_tail(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
296 struct sk_buff
*prev
, *next
;
300 next
= (struct sk_buff
*)list
;
308 extern __inline__
void skb_queue_tail(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
312 spin_lock_irqsave(&list
->lock
, flags
);
313 __skb_queue_tail(list
, newsk
);
314 spin_unlock_irqrestore(&list
->lock
, flags
);
318 * Remove an sk_buff from a list.
321 extern __inline__
struct sk_buff
*__skb_dequeue(struct sk_buff_head
*list
)
323 struct sk_buff
*next
, *prev
, *result
;
325 prev
= (struct sk_buff
*) list
;
341 extern __inline__
struct sk_buff
*skb_dequeue(struct sk_buff_head
*list
)
344 struct sk_buff
*result
;
346 spin_lock_irqsave(&list
->lock
, flags
);
347 result
= __skb_dequeue(list
);
348 spin_unlock_irqrestore(&list
->lock
, flags
);
353 * Insert a packet on a list.
356 extern __inline__
void __skb_insert(struct sk_buff
*newsk
,
357 struct sk_buff
* prev
, struct sk_buff
*next
,
358 struct sk_buff_head
* list
)
369 * Place a packet before a given packet in a list
371 extern __inline__
void skb_insert(struct sk_buff
*old
, struct sk_buff
*newsk
)
375 spin_lock_irqsave(&old
->list
->lock
, flags
);
376 __skb_insert(newsk
, old
->prev
, old
, old
->list
);
377 spin_unlock_irqrestore(&old
->list
->lock
, flags
);
381 * Place a packet after a given packet in a list.
384 extern __inline__
void __skb_append(struct sk_buff
*old
, struct sk_buff
*newsk
)
386 __skb_insert(newsk
, old
, old
->next
, old
->list
);
389 extern __inline__
void skb_append(struct sk_buff
*old
, struct sk_buff
*newsk
)
393 spin_lock_irqsave(&old
->list
->lock
, flags
);
394 __skb_append(old
, newsk
);
395 spin_unlock_irqrestore(&old
->list
->lock
, flags
);
399 * remove sk_buff from list. _Must_ be called atomically, and with
402 extern __inline__
void __skb_unlink(struct sk_buff
*skb
, struct sk_buff_head
*list
)
404 struct sk_buff
* next
, * prev
;
417 * Remove an sk_buff from its list. Works even without knowing the list it
418 * is sitting on, which can be handy at times. It also means that THE LIST
419 * MUST EXIST when you unlink. Thus a list must have its contents unlinked
423 extern __inline__
void skb_unlink(struct sk_buff
*skb
)
425 struct sk_buff_head
*list
= skb
->list
;
430 spin_lock_irqsave(&list
->lock
, flags
);
431 if(skb
->list
== list
)
432 __skb_unlink(skb
, skb
->list
);
433 spin_unlock_irqrestore(&list
->lock
, flags
);
437 /* XXX: more streamlined implementation */
438 extern __inline__
struct sk_buff
*__skb_dequeue_tail(struct sk_buff_head
*list
)
440 struct sk_buff
*skb
= skb_peek_tail(list
);
442 __skb_unlink(skb
, list
);
446 extern __inline__
struct sk_buff
*skb_dequeue_tail(struct sk_buff_head
*list
)
449 struct sk_buff
*result
;
451 spin_lock_irqsave(&list
->lock
, flags
);
452 result
= __skb_dequeue_tail(list
);
453 spin_unlock_irqrestore(&list
->lock
, flags
);
458 * Add data to an sk_buff
461 extern __inline__
unsigned char *__skb_put(struct sk_buff
*skb
, unsigned int len
)
463 unsigned char *tmp
=skb
->tail
;
469 extern __inline__
unsigned char *skb_put(struct sk_buff
*skb
, unsigned int len
)
471 unsigned char *tmp
=skb
->tail
;
474 if(skb
->tail
>skb
->end
) {
475 skb_over_panic(skb
, len
, current_text_addr());
480 extern __inline__
unsigned char *__skb_push(struct sk_buff
*skb
, unsigned int len
)
487 extern __inline__
unsigned char *skb_push(struct sk_buff
*skb
, unsigned int len
)
491 if(skb
->data
<skb
->head
) {
492 skb_under_panic(skb
, len
, current_text_addr());
497 extern __inline__
char *__skb_pull(struct sk_buff
*skb
, unsigned int len
)
500 return skb
->data
+=len
;
503 extern __inline__
unsigned char * skb_pull(struct sk_buff
*skb
, unsigned int len
)
507 return __skb_pull(skb
,len
);
510 extern __inline__
int skb_headroom(struct sk_buff
*skb
)
512 return skb
->data
-skb
->head
;
515 extern __inline__
int skb_tailroom(struct sk_buff
*skb
)
517 return skb
->end
-skb
->tail
;
520 extern __inline__
void skb_reserve(struct sk_buff
*skb
, unsigned int len
)
526 extern __inline__
void __skb_trim(struct sk_buff
*skb
, unsigned int len
)
529 skb
->tail
= skb
->data
+len
;
532 extern __inline__
void skb_trim(struct sk_buff
*skb
, unsigned int len
)
534 if (skb
->len
> len
) {
535 __skb_trim(skb
, len
);
539 extern __inline__
void skb_orphan(struct sk_buff
*skb
)
542 skb
->destructor(skb
);
543 skb
->destructor
= NULL
;
547 extern __inline__
void skb_queue_purge(struct sk_buff_head
*list
)
550 while ((skb
=skb_dequeue(list
))!=NULL
)
554 extern __inline__
struct sk_buff
*dev_alloc_skb(unsigned int length
)
558 skb
= alloc_skb(length
+16, GFP_ATOMIC
);
564 extern __inline__
struct sk_buff
*
565 skb_cow(struct sk_buff
*skb
, unsigned int headroom
)
567 headroom
= (headroom
+15)&~15;
569 if ((unsigned)skb_headroom(skb
) < headroom
|| skb_cloned(skb
)) {
570 struct sk_buff
*skb2
= skb_realloc_headroom(skb
, headroom
);
577 extern struct sk_buff
* skb_recv_datagram(struct sock
*sk
,unsigned flags
,int noblock
, int *err
);
578 extern unsigned int datagram_poll(struct file
*file
, struct socket
*sock
, struct poll_table_struct
*wait
);
579 extern int skb_copy_datagram(struct sk_buff
*from
, int offset
, char *to
,int size
);
580 extern int skb_copy_datagram_iovec(struct sk_buff
*from
, int offset
, struct iovec
*to
,int size
);
581 extern void skb_free_datagram(struct sock
* sk
, struct sk_buff
*skb
);
583 extern void skb_init(void);
584 extern void skb_add_mtu(int mtu
);
586 #endif /* __KERNEL__ */
587 #endif /* _LINUX_SKBUFF_H */