V4L/DVB (13596): ov511.c typo: lock => unlock
[linux-2.6/mini2440.git] / net / core / datagram.c
blobcaeb2852c3c8b52232753700d4e50950cb127fc0
1 /*
2 * SUCS NET3:
4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
12 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
13 * udp.c code)
15 * Fixes:
16 * Alan Cox : NULL return from skb_peek_copy()
17 * understood
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
23 * feasible.
24 * Alan Cox : Fixed write poll of non IP protocol
25 * crash.
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <asm/uaccess.h>
40 #include <asm/system.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <linux/errno.h>
44 #include <linux/sched.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/poll.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
55 #include <net/checksum.h>
56 #include <net/sock.h>
57 #include <net/tcp_states.h>
60 * Is a socket 'connection oriented' ?
62 static inline int connection_based(struct sock *sk)
64 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
67 static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync,
68 void *key)
70 unsigned long bits = (unsigned long)key;
73 * Avoid a wakeup if event not interesting for us
75 if (bits && !(bits & (POLLIN | POLLERR)))
76 return 0;
77 return autoremove_wake_function(wait, mode, sync, key);
80 * Wait for a packet..
82 static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
84 int error;
85 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
87 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
89 /* Socket errors? */
90 error = sock_error(sk);
91 if (error)
92 goto out_err;
94 if (!skb_queue_empty(&sk->sk_receive_queue))
95 goto out;
97 /* Socket shut down? */
98 if (sk->sk_shutdown & RCV_SHUTDOWN)
99 goto out_noerr;
101 /* Sequenced packets can come disconnected.
102 * If so we report the problem
104 error = -ENOTCONN;
105 if (connection_based(sk) &&
106 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
107 goto out_err;
109 /* handle signals */
110 if (signal_pending(current))
111 goto interrupted;
113 error = 0;
114 *timeo_p = schedule_timeout(*timeo_p);
115 out:
116 finish_wait(sk->sk_sleep, &wait);
117 return error;
118 interrupted:
119 error = sock_intr_errno(*timeo_p);
120 out_err:
121 *err = error;
122 goto out;
123 out_noerr:
124 *err = 0;
125 error = 1;
126 goto out;
130 * __skb_recv_datagram - Receive a datagram skbuff
131 * @sk: socket
132 * @flags: MSG_ flags
133 * @peeked: returns non-zero if this packet has been seen before
134 * @err: error code returned
136 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
137 * and possible races. This replaces identical code in packet, raw and
138 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
139 * the long standing peek and read race for datagram sockets. If you
140 * alter this routine remember it must be re-entrant.
142 * This function will lock the socket if a skb is returned, so the caller
143 * needs to unlock the socket in that case (usually by calling
144 * skb_free_datagram)
146 * * It does not lock socket since today. This function is
147 * * free of race conditions. This measure should/can improve
148 * * significantly datagram socket latencies at high loads,
149 * * when data copying to user space takes lots of time.
150 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
151 * * 8) Great win.)
152 * * --ANK (980729)
154 * The order of the tests when we find no data waiting are specified
155 * quite explicitly by POSIX 1003.1g, don't change them without having
156 * the standard around please.
158 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
159 int *peeked, int *err)
161 struct sk_buff *skb;
162 long timeo;
164 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
166 int error = sock_error(sk);
168 if (error)
169 goto no_packet;
171 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
173 do {
174 /* Again only user level code calls this function, so nothing
175 * interrupt level will suddenly eat the receive_queue.
177 * Look at current nfs client by the way...
178 * However, this function was corrent in any case. 8)
180 unsigned long cpu_flags;
182 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
183 skb = skb_peek(&sk->sk_receive_queue);
184 if (skb) {
185 *peeked = skb->peeked;
186 if (flags & MSG_PEEK) {
187 skb->peeked = 1;
188 atomic_inc(&skb->users);
189 } else
190 __skb_unlink(skb, &sk->sk_receive_queue);
192 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
194 if (skb)
195 return skb;
197 /* User doesn't want to wait */
198 error = -EAGAIN;
199 if (!timeo)
200 goto no_packet;
202 } while (!wait_for_packet(sk, err, &timeo));
204 return NULL;
206 no_packet:
207 *err = error;
208 return NULL;
210 EXPORT_SYMBOL(__skb_recv_datagram);
212 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
213 int noblock, int *err)
215 int peeked;
217 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
218 &peeked, err);
221 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
223 consume_skb(skb);
224 sk_mem_reclaim_partial(sk);
226 EXPORT_SYMBOL(skb_free_datagram);
228 void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
230 lock_sock(sk);
231 skb_free_datagram(sk, skb);
232 release_sock(sk);
234 EXPORT_SYMBOL(skb_free_datagram_locked);
237 * skb_kill_datagram - Free a datagram skbuff forcibly
238 * @sk: socket
239 * @skb: datagram skbuff
240 * @flags: MSG_ flags
242 * This function frees a datagram skbuff that was received by
243 * skb_recv_datagram. The flags argument must match the one
244 * used for skb_recv_datagram.
246 * If the MSG_PEEK flag is set, and the packet is still on the
247 * receive queue of the socket, it will be taken off the queue
248 * before it is freed.
250 * This function currently only disables BH when acquiring the
251 * sk_receive_queue lock. Therefore it must not be used in a
252 * context where that lock is acquired in an IRQ context.
254 * It returns 0 if the packet was removed by us.
257 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
259 int err = 0;
261 if (flags & MSG_PEEK) {
262 err = -ENOENT;
263 spin_lock_bh(&sk->sk_receive_queue.lock);
264 if (skb == skb_peek(&sk->sk_receive_queue)) {
265 __skb_unlink(skb, &sk->sk_receive_queue);
266 atomic_dec(&skb->users);
267 err = 0;
269 spin_unlock_bh(&sk->sk_receive_queue.lock);
272 kfree_skb(skb);
273 sk_mem_reclaim_partial(sk);
275 return err;
278 EXPORT_SYMBOL(skb_kill_datagram);
281 * skb_copy_datagram_iovec - Copy a datagram to an iovec.
282 * @skb: buffer to copy
283 * @offset: offset in the buffer to start copying from
284 * @to: io vector to copy to
285 * @len: amount of data to copy from buffer to iovec
287 * Note: the iovec is modified during the copy.
289 int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
290 struct iovec *to, int len)
292 int start = skb_headlen(skb);
293 int i, copy = start - offset;
294 struct sk_buff *frag_iter;
296 /* Copy header. */
297 if (copy > 0) {
298 if (copy > len)
299 copy = len;
300 if (memcpy_toiovec(to, skb->data + offset, copy))
301 goto fault;
302 if ((len -= copy) == 0)
303 return 0;
304 offset += copy;
307 /* Copy paged appendix. Hmm... why does this look so complicated? */
308 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
309 int end;
311 WARN_ON(start > offset + len);
313 end = start + skb_shinfo(skb)->frags[i].size;
314 if ((copy = end - offset) > 0) {
315 int err;
316 u8 *vaddr;
317 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
318 struct page *page = frag->page;
320 if (copy > len)
321 copy = len;
322 vaddr = kmap(page);
323 err = memcpy_toiovec(to, vaddr + frag->page_offset +
324 offset - start, copy);
325 kunmap(page);
326 if (err)
327 goto fault;
328 if (!(len -= copy))
329 return 0;
330 offset += copy;
332 start = end;
335 skb_walk_frags(skb, frag_iter) {
336 int end;
338 WARN_ON(start > offset + len);
340 end = start + frag_iter->len;
341 if ((copy = end - offset) > 0) {
342 if (copy > len)
343 copy = len;
344 if (skb_copy_datagram_iovec(frag_iter,
345 offset - start,
346 to, copy))
347 goto fault;
348 if ((len -= copy) == 0)
349 return 0;
350 offset += copy;
352 start = end;
354 if (!len)
355 return 0;
357 fault:
358 return -EFAULT;
362 * skb_copy_datagram_const_iovec - Copy a datagram to an iovec.
363 * @skb: buffer to copy
364 * @offset: offset in the buffer to start copying from
365 * @to: io vector to copy to
366 * @to_offset: offset in the io vector to start copying to
367 * @len: amount of data to copy from buffer to iovec
369 * Returns 0 or -EFAULT.
370 * Note: the iovec is not modified during the copy.
372 int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
373 const struct iovec *to, int to_offset,
374 int len)
376 int start = skb_headlen(skb);
377 int i, copy = start - offset;
378 struct sk_buff *frag_iter;
380 /* Copy header. */
381 if (copy > 0) {
382 if (copy > len)
383 copy = len;
384 if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy))
385 goto fault;
386 if ((len -= copy) == 0)
387 return 0;
388 offset += copy;
389 to_offset += copy;
392 /* Copy paged appendix. Hmm... why does this look so complicated? */
393 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
394 int end;
396 WARN_ON(start > offset + len);
398 end = start + skb_shinfo(skb)->frags[i].size;
399 if ((copy = end - offset) > 0) {
400 int err;
401 u8 *vaddr;
402 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
403 struct page *page = frag->page;
405 if (copy > len)
406 copy = len;
407 vaddr = kmap(page);
408 err = memcpy_toiovecend(to, vaddr + frag->page_offset +
409 offset - start, to_offset, copy);
410 kunmap(page);
411 if (err)
412 goto fault;
413 if (!(len -= copy))
414 return 0;
415 offset += copy;
416 to_offset += copy;
418 start = end;
421 skb_walk_frags(skb, frag_iter) {
422 int end;
424 WARN_ON(start > offset + len);
426 end = start + frag_iter->len;
427 if ((copy = end - offset) > 0) {
428 if (copy > len)
429 copy = len;
430 if (skb_copy_datagram_const_iovec(frag_iter,
431 offset - start,
432 to, to_offset,
433 copy))
434 goto fault;
435 if ((len -= copy) == 0)
436 return 0;
437 offset += copy;
438 to_offset += copy;
440 start = end;
442 if (!len)
443 return 0;
445 fault:
446 return -EFAULT;
448 EXPORT_SYMBOL(skb_copy_datagram_const_iovec);
451 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
452 * @skb: buffer to copy
453 * @offset: offset in the buffer to start copying to
454 * @from: io vector to copy to
455 * @from_offset: offset in the io vector to start copying from
456 * @len: amount of data to copy to buffer from iovec
458 * Returns 0 or -EFAULT.
459 * Note: the iovec is not modified during the copy.
461 int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
462 const struct iovec *from, int from_offset,
463 int len)
465 int start = skb_headlen(skb);
466 int i, copy = start - offset;
467 struct sk_buff *frag_iter;
469 /* Copy header. */
470 if (copy > 0) {
471 if (copy > len)
472 copy = len;
473 if (memcpy_fromiovecend(skb->data + offset, from, from_offset,
474 copy))
475 goto fault;
476 if ((len -= copy) == 0)
477 return 0;
478 offset += copy;
479 from_offset += copy;
482 /* Copy paged appendix. Hmm... why does this look so complicated? */
483 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
484 int end;
486 WARN_ON(start > offset + len);
488 end = start + skb_shinfo(skb)->frags[i].size;
489 if ((copy = end - offset) > 0) {
490 int err;
491 u8 *vaddr;
492 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
493 struct page *page = frag->page;
495 if (copy > len)
496 copy = len;
497 vaddr = kmap(page);
498 err = memcpy_fromiovecend(vaddr + frag->page_offset +
499 offset - start,
500 from, from_offset, copy);
501 kunmap(page);
502 if (err)
503 goto fault;
505 if (!(len -= copy))
506 return 0;
507 offset += copy;
508 from_offset += copy;
510 start = end;
513 skb_walk_frags(skb, frag_iter) {
514 int end;
516 WARN_ON(start > offset + len);
518 end = start + frag_iter->len;
519 if ((copy = end - offset) > 0) {
520 if (copy > len)
521 copy = len;
522 if (skb_copy_datagram_from_iovec(frag_iter,
523 offset - start,
524 from,
525 from_offset,
526 copy))
527 goto fault;
528 if ((len -= copy) == 0)
529 return 0;
530 offset += copy;
531 from_offset += copy;
533 start = end;
535 if (!len)
536 return 0;
538 fault:
539 return -EFAULT;
541 EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
543 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
544 u8 __user *to, int len,
545 __wsum *csump)
547 int start = skb_headlen(skb);
548 int i, copy = start - offset;
549 struct sk_buff *frag_iter;
550 int pos = 0;
552 /* Copy header. */
553 if (copy > 0) {
554 int err = 0;
555 if (copy > len)
556 copy = len;
557 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
558 *csump, &err);
559 if (err)
560 goto fault;
561 if ((len -= copy) == 0)
562 return 0;
563 offset += copy;
564 to += copy;
565 pos = copy;
568 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
569 int end;
571 WARN_ON(start > offset + len);
573 end = start + skb_shinfo(skb)->frags[i].size;
574 if ((copy = end - offset) > 0) {
575 __wsum csum2;
576 int err = 0;
577 u8 *vaddr;
578 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
579 struct page *page = frag->page;
581 if (copy > len)
582 copy = len;
583 vaddr = kmap(page);
584 csum2 = csum_and_copy_to_user(vaddr +
585 frag->page_offset +
586 offset - start,
587 to, copy, 0, &err);
588 kunmap(page);
589 if (err)
590 goto fault;
591 *csump = csum_block_add(*csump, csum2, pos);
592 if (!(len -= copy))
593 return 0;
594 offset += copy;
595 to += copy;
596 pos += copy;
598 start = end;
601 skb_walk_frags(skb, frag_iter) {
602 int end;
604 WARN_ON(start > offset + len);
606 end = start + frag_iter->len;
607 if ((copy = end - offset) > 0) {
608 __wsum csum2 = 0;
609 if (copy > len)
610 copy = len;
611 if (skb_copy_and_csum_datagram(frag_iter,
612 offset - start,
613 to, copy,
614 &csum2))
615 goto fault;
616 *csump = csum_block_add(*csump, csum2, pos);
617 if ((len -= copy) == 0)
618 return 0;
619 offset += copy;
620 to += copy;
621 pos += copy;
623 start = end;
625 if (!len)
626 return 0;
628 fault:
629 return -EFAULT;
632 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
634 __sum16 sum;
636 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
637 if (likely(!sum)) {
638 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
639 netdev_rx_csum_fault(skb->dev);
640 skb->ip_summed = CHECKSUM_UNNECESSARY;
642 return sum;
644 EXPORT_SYMBOL(__skb_checksum_complete_head);
646 __sum16 __skb_checksum_complete(struct sk_buff *skb)
648 return __skb_checksum_complete_head(skb, skb->len);
650 EXPORT_SYMBOL(__skb_checksum_complete);
653 * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
654 * @skb: skbuff
655 * @hlen: hardware length
656 * @iov: io vector
658 * Caller _must_ check that skb will fit to this iovec.
660 * Returns: 0 - success.
661 * -EINVAL - checksum failure.
662 * -EFAULT - fault during copy. Beware, in this case iovec
663 * can be modified!
665 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
666 int hlen, struct iovec *iov)
668 __wsum csum;
669 int chunk = skb->len - hlen;
671 if (!chunk)
672 return 0;
674 /* Skip filled elements.
675 * Pretty silly, look at memcpy_toiovec, though 8)
677 while (!iov->iov_len)
678 iov++;
680 if (iov->iov_len < chunk) {
681 if (__skb_checksum_complete(skb))
682 goto csum_error;
683 if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
684 goto fault;
685 } else {
686 csum = csum_partial(skb->data, hlen, skb->csum);
687 if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base,
688 chunk, &csum))
689 goto fault;
690 if (csum_fold(csum))
691 goto csum_error;
692 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
693 netdev_rx_csum_fault(skb->dev);
694 iov->iov_len -= chunk;
695 iov->iov_base += chunk;
697 return 0;
698 csum_error:
699 return -EINVAL;
700 fault:
701 return -EFAULT;
705 * datagram_poll - generic datagram poll
706 * @file: file struct
707 * @sock: socket
708 * @wait: poll table
710 * Datagram poll: Again totally generic. This also handles
711 * sequenced packet sockets providing the socket receive queue
712 * is only ever holding data ready to receive.
714 * Note: when you _don't_ use this routine for this protocol,
715 * and you use a different write policy from sock_writeable()
716 * then please supply your own write_space callback.
718 unsigned int datagram_poll(struct file *file, struct socket *sock,
719 poll_table *wait)
721 struct sock *sk = sock->sk;
722 unsigned int mask;
724 sock_poll_wait(file, sk->sk_sleep, wait);
725 mask = 0;
727 /* exceptional events? */
728 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
729 mask |= POLLERR;
730 if (sk->sk_shutdown & RCV_SHUTDOWN)
731 mask |= POLLRDHUP;
732 if (sk->sk_shutdown == SHUTDOWN_MASK)
733 mask |= POLLHUP;
735 /* readable? */
736 if (!skb_queue_empty(&sk->sk_receive_queue) ||
737 (sk->sk_shutdown & RCV_SHUTDOWN))
738 mask |= POLLIN | POLLRDNORM;
740 /* Connection-based need to check for termination and startup */
741 if (connection_based(sk)) {
742 if (sk->sk_state == TCP_CLOSE)
743 mask |= POLLHUP;
744 /* connection hasn't started yet? */
745 if (sk->sk_state == TCP_SYN_SENT)
746 return mask;
749 /* writable? */
750 if (sock_writeable(sk))
751 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
752 else
753 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
755 return mask;
758 EXPORT_SYMBOL(datagram_poll);
759 EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
760 EXPORT_SYMBOL(skb_copy_datagram_iovec);
761 EXPORT_SYMBOL(skb_recv_datagram);