[NET]: Clean up sk_buff walkers.
[linux-2.6/kvm.git] / net / core / datagram.c
blobe1afa767944568106d21a62d343cb3dc0101306d
1 /*
2 * SUCS NET3:
4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
12 * Authors: Alan Cox <alan@redhat.com>. (datagram_poll() from old
13 * udp.c code)
15 * Fixes:
16 * Alan Cox : NULL return from skb_peek_copy()
17 * understood
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
23 * feasible.
24 * Alan Cox : Fixed write poll of non IP protocol
25 * crash.
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <asm/uaccess.h>
40 #include <asm/system.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <linux/errno.h>
44 #include <linux/sched.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/poll.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
55 #include <net/checksum.h>
56 #include <net/sock.h>
57 #include <net/tcp_states.h>
60 * Is a socket 'connection oriented' ?
62 static inline int connection_based(struct sock *sk)
64 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
68 * Wait for a packet..
70 static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
72 int error;
73 DEFINE_WAIT(wait);
75 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
77 /* Socket errors? */
78 error = sock_error(sk);
79 if (error)
80 goto out_err;
82 if (!skb_queue_empty(&sk->sk_receive_queue))
83 goto out;
85 /* Socket shut down? */
86 if (sk->sk_shutdown & RCV_SHUTDOWN)
87 goto out_noerr;
89 /* Sequenced packets can come disconnected.
90 * If so we report the problem
92 error = -ENOTCONN;
93 if (connection_based(sk) &&
94 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
95 goto out_err;
97 /* handle signals */
98 if (signal_pending(current))
99 goto interrupted;
101 error = 0;
102 *timeo_p = schedule_timeout(*timeo_p);
103 out:
104 finish_wait(sk->sk_sleep, &wait);
105 return error;
106 interrupted:
107 error = sock_intr_errno(*timeo_p);
108 out_err:
109 *err = error;
110 goto out;
111 out_noerr:
112 *err = 0;
113 error = 1;
114 goto out;
118 * skb_recv_datagram - Receive a datagram skbuff
119 * @sk: socket
120 * @flags: MSG_ flags
121 * @noblock: blocking operation?
122 * @err: error code returned
124 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
125 * and possible races. This replaces identical code in packet, raw and
126 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
127 * the long standing peek and read race for datagram sockets. If you
128 * alter this routine remember it must be re-entrant.
130 * This function will lock the socket if a skb is returned, so the caller
131 * needs to unlock the socket in that case (usually by calling
132 * skb_free_datagram)
134 * * It does not lock socket since today. This function is
135 * * free of race conditions. This measure should/can improve
136 * * significantly datagram socket latencies at high loads,
137 * * when data copying to user space takes lots of time.
138 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
139 * * 8) Great win.)
140 * * --ANK (980729)
142 * The order of the tests when we find no data waiting are specified
143 * quite explicitly by POSIX 1003.1g, don't change them without having
144 * the standard around please.
146 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
147 int noblock, int *err)
149 struct sk_buff *skb;
150 long timeo;
152 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
154 int error = sock_error(sk);
156 if (error)
157 goto no_packet;
159 timeo = sock_rcvtimeo(sk, noblock);
161 do {
162 /* Again only user level code calls this function, so nothing
163 * interrupt level will suddenly eat the receive_queue.
165 * Look at current nfs client by the way...
166 * However, this function was corrent in any case. 8)
168 if (flags & MSG_PEEK) {
169 unsigned long cpu_flags;
171 spin_lock_irqsave(&sk->sk_receive_queue.lock,
172 cpu_flags);
173 skb = skb_peek(&sk->sk_receive_queue);
174 if (skb)
175 atomic_inc(&skb->users);
176 spin_unlock_irqrestore(&sk->sk_receive_queue.lock,
177 cpu_flags);
178 } else
179 skb = skb_dequeue(&sk->sk_receive_queue);
181 if (skb)
182 return skb;
184 /* User doesn't want to wait */
185 error = -EAGAIN;
186 if (!timeo)
187 goto no_packet;
189 } while (!wait_for_packet(sk, err, &timeo));
191 return NULL;
193 no_packet:
194 *err = error;
195 return NULL;
198 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
200 kfree_skb(skb);
204 * skb_kill_datagram - Free a datagram skbuff forcibly
205 * @sk: socket
206 * @skb: datagram skbuff
207 * @flags: MSG_ flags
209 * This function frees a datagram skbuff that was received by
210 * skb_recv_datagram. The flags argument must match the one
211 * used for skb_recv_datagram.
213 * If the MSG_PEEK flag is set, and the packet is still on the
214 * receive queue of the socket, it will be taken off the queue
215 * before it is freed.
217 * This function currently only disables BH when acquiring the
218 * sk_receive_queue lock. Therefore it must not be used in a
219 * context where that lock is acquired in an IRQ context.
222 void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
224 if (flags & MSG_PEEK) {
225 spin_lock_bh(&sk->sk_receive_queue.lock);
226 if (skb == skb_peek(&sk->sk_receive_queue)) {
227 __skb_unlink(skb, &sk->sk_receive_queue);
228 atomic_dec(&skb->users);
230 spin_unlock_bh(&sk->sk_receive_queue.lock);
233 kfree_skb(skb);
236 EXPORT_SYMBOL(skb_kill_datagram);
239 * skb_copy_datagram_iovec - Copy a datagram to an iovec.
240 * @skb: buffer to copy
241 * @offset: offset in the buffer to start copying from
242 * @to: io vector to copy to
243 * @len: amount of data to copy from buffer to iovec
245 * Note: the iovec is modified during the copy.
247 int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
248 struct iovec *to, int len)
250 int end = skb_headlen(skb);
251 int i, copy = end - offset;
253 /* Copy header. */
254 if (copy > 0) {
255 if (copy > len)
256 copy = len;
257 if (memcpy_toiovec(to, skb->data + offset, copy))
258 goto fault;
259 if ((len -= copy) == 0)
260 return 0;
261 offset += copy;
264 /* Copy paged appendix. Hmm... why does this look so complicated? */
265 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
266 BUG_TRAP(len >= 0);
268 end = offset + skb_shinfo(skb)->frags[i].size;
269 if ((copy = end - offset) > 0) {
270 int err;
271 u8 *vaddr;
272 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
273 struct page *page = frag->page;
275 if (copy > len)
276 copy = len;
277 vaddr = kmap(page);
278 err = memcpy_toiovec(to, vaddr + frag->page_offset,
279 copy);
280 kunmap(page);
281 if (err)
282 goto fault;
283 if (!(len -= copy))
284 return 0;
285 offset += copy;
289 if (skb_shinfo(skb)->frag_list) {
290 struct sk_buff *list = skb_shinfo(skb)->frag_list;
292 for (; list; list = list->next) {
293 BUG_TRAP(len >= 0);
295 end = offset + list->len;
296 if ((copy = end - offset) > 0) {
297 if (copy > len)
298 copy = len;
299 if (skb_copy_datagram_iovec(list, 0, to, copy))
300 goto fault;
301 if ((len -= copy) == 0)
302 return 0;
303 offset += copy;
307 if (!len)
308 return 0;
310 fault:
311 return -EFAULT;
314 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
315 u8 __user *to, int len,
316 __wsum *csump)
318 int end = skb_headlen(skb);
319 int pos = 0;
320 int i, copy = end - offset;
322 /* Copy header. */
323 if (copy > 0) {
324 int err = 0;
325 if (copy > len)
326 copy = len;
327 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
328 *csump, &err);
329 if (err)
330 goto fault;
331 if ((len -= copy) == 0)
332 return 0;
333 offset += copy;
334 to += copy;
335 pos = copy;
338 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
339 BUG_TRAP(len >= 0);
341 end = offset + skb_shinfo(skb)->frags[i].size;
342 if ((copy = end - offset) > 0) {
343 __wsum csum2;
344 int err = 0;
345 u8 *vaddr;
346 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
347 struct page *page = frag->page;
349 if (copy > len)
350 copy = len;
351 vaddr = kmap(page);
352 csum2 = csum_and_copy_to_user(vaddr +
353 frag->page_offset,
354 to, copy, 0, &err);
355 kunmap(page);
356 if (err)
357 goto fault;
358 *csump = csum_block_add(*csump, csum2, pos);
359 if (!(len -= copy))
360 return 0;
361 offset += copy;
362 to += copy;
363 pos += copy;
367 if (skb_shinfo(skb)->frag_list) {
368 struct sk_buff *list = skb_shinfo(skb)->frag_list;
370 for (; list; list=list->next) {
371 BUG_TRAP(len >= 0);
373 end = offset + list->len;
374 if ((copy = end - offset) > 0) {
375 __wsum csum2 = 0;
376 if (copy > len)
377 copy = len;
378 if (skb_copy_and_csum_datagram(list, 0,
379 to, copy,
380 &csum2))
381 goto fault;
382 *csump = csum_block_add(*csump, csum2, pos);
383 if ((len -= copy) == 0)
384 return 0;
385 offset += copy;
386 to += copy;
387 pos += copy;
391 if (!len)
392 return 0;
394 fault:
395 return -EFAULT;
398 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
400 __sum16 sum;
402 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
403 if (likely(!sum)) {
404 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
405 netdev_rx_csum_fault(skb->dev);
406 skb->ip_summed = CHECKSUM_UNNECESSARY;
408 return sum;
410 EXPORT_SYMBOL(__skb_checksum_complete_head);
412 __sum16 __skb_checksum_complete(struct sk_buff *skb)
414 return __skb_checksum_complete_head(skb, skb->len);
416 EXPORT_SYMBOL(__skb_checksum_complete);
419 * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
420 * @skb: skbuff
421 * @hlen: hardware length
422 * @iov: io vector
424 * Caller _must_ check that skb will fit to this iovec.
426 * Returns: 0 - success.
427 * -EINVAL - checksum failure.
428 * -EFAULT - fault during copy. Beware, in this case iovec
429 * can be modified!
431 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
432 int hlen, struct iovec *iov)
434 __wsum csum;
435 int chunk = skb->len - hlen;
437 /* Skip filled elements.
438 * Pretty silly, look at memcpy_toiovec, though 8)
440 while (!iov->iov_len)
441 iov++;
443 if (iov->iov_len < chunk) {
444 if (__skb_checksum_complete(skb))
445 goto csum_error;
446 if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
447 goto fault;
448 } else {
449 csum = csum_partial(skb->data, hlen, skb->csum);
450 if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base,
451 chunk, &csum))
452 goto fault;
453 if (csum_fold(csum))
454 goto csum_error;
455 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
456 netdev_rx_csum_fault(skb->dev);
457 iov->iov_len -= chunk;
458 iov->iov_base += chunk;
460 return 0;
461 csum_error:
462 return -EINVAL;
463 fault:
464 return -EFAULT;
468 * datagram_poll - generic datagram poll
469 * @file: file struct
470 * @sock: socket
471 * @wait: poll table
473 * Datagram poll: Again totally generic. This also handles
474 * sequenced packet sockets providing the socket receive queue
475 * is only ever holding data ready to receive.
477 * Note: when you _don't_ use this routine for this protocol,
478 * and you use a different write policy from sock_writeable()
479 * then please supply your own write_space callback.
481 unsigned int datagram_poll(struct file *file, struct socket *sock,
482 poll_table *wait)
484 struct sock *sk = sock->sk;
485 unsigned int mask;
487 poll_wait(file, sk->sk_sleep, wait);
488 mask = 0;
490 /* exceptional events? */
491 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
492 mask |= POLLERR;
493 if (sk->sk_shutdown & RCV_SHUTDOWN)
494 mask |= POLLRDHUP;
495 if (sk->sk_shutdown == SHUTDOWN_MASK)
496 mask |= POLLHUP;
498 /* readable? */
499 if (!skb_queue_empty(&sk->sk_receive_queue) ||
500 (sk->sk_shutdown & RCV_SHUTDOWN))
501 mask |= POLLIN | POLLRDNORM;
503 /* Connection-based need to check for termination and startup */
504 if (connection_based(sk)) {
505 if (sk->sk_state == TCP_CLOSE)
506 mask |= POLLHUP;
507 /* connection hasn't started yet? */
508 if (sk->sk_state == TCP_SYN_SENT)
509 return mask;
512 /* writable? */
513 if (sock_writeable(sk))
514 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
515 else
516 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
518 return mask;
521 EXPORT_SYMBOL(datagram_poll);
522 EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
523 EXPORT_SYMBOL(skb_copy_datagram_iovec);
524 EXPORT_SYMBOL(skb_free_datagram);
525 EXPORT_SYMBOL(skb_recv_datagram);