perf_counter, x86: rename __hw_perf_counter_set_period into x86_perf_counter_set_period
[linux-2.6/mini2440.git] / net / can / raw.c
blob6aa154e806ae722c53e71603c0b22865526452a0
1 /*
2 * raw.c - Raw sockets for protocol family CAN
4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
40 * Send feedback to <socketcan-users@lists.berlios.de>
44 #include <linux/module.h>
45 #include <linux/init.h>
46 #include <linux/uio.h>
47 #include <linux/net.h>
48 #include <linux/netdevice.h>
49 #include <linux/socket.h>
50 #include <linux/if_arp.h>
51 #include <linux/skbuff.h>
52 #include <linux/can.h>
53 #include <linux/can/core.h>
54 #include <linux/can/raw.h>
55 #include <net/sock.h>
56 #include <net/net_namespace.h>
58 #define CAN_RAW_VERSION CAN_VERSION
59 static __initdata const char banner[] =
60 KERN_INFO "can: raw protocol (rev " CAN_RAW_VERSION ")\n";
62 MODULE_DESCRIPTION("PF_CAN raw protocol");
63 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
66 #define MASK_ALL 0
69 * A raw socket has a list of can_filters attached to it, each receiving
70 * the CAN frames matching that filter. If the filter list is empty,
71 * no CAN frames will be received by the socket. The default after
72 * opening the socket, is to have one filter which receives all frames.
73 * The filter list is allocated dynamically with the exception of the
74 * list containing only one item. This common case is optimized by
75 * storing the single filter in dfilter, to avoid using dynamic memory.
78 struct raw_sock {
79 struct sock sk;
80 int bound;
81 int ifindex;
82 struct notifier_block notifier;
83 int loopback;
84 int recv_own_msgs;
85 int count; /* number of active filters */
86 struct can_filter dfilter; /* default/single filter */
87 struct can_filter *filter; /* pointer to filter(s) */
88 can_err_mask_t err_mask;
91 static inline struct raw_sock *raw_sk(const struct sock *sk)
93 return (struct raw_sock *)sk;
96 static void raw_rcv(struct sk_buff *skb, void *data)
98 struct sock *sk = (struct sock *)data;
99 struct raw_sock *ro = raw_sk(sk);
100 struct sockaddr_can *addr;
102 /* check the received tx sock reference */
103 if (!ro->recv_own_msgs && skb->sk == sk)
104 return;
106 /* clone the given skb to be able to enqueue it into the rcv queue */
107 skb = skb_clone(skb, GFP_ATOMIC);
108 if (!skb)
109 return;
112 * Put the datagram to the queue so that raw_recvmsg() can
113 * get it from there. We need to pass the interface index to
114 * raw_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
115 * containing the interface index.
118 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
119 addr = (struct sockaddr_can *)skb->cb;
120 memset(addr, 0, sizeof(*addr));
121 addr->can_family = AF_CAN;
122 addr->can_ifindex = skb->dev->ifindex;
124 if (sock_queue_rcv_skb(sk, skb) < 0)
125 kfree_skb(skb);
128 static int raw_enable_filters(struct net_device *dev, struct sock *sk,
129 struct can_filter *filter, int count)
131 int err = 0;
132 int i;
134 for (i = 0; i < count; i++) {
135 err = can_rx_register(dev, filter[i].can_id,
136 filter[i].can_mask,
137 raw_rcv, sk, "raw");
138 if (err) {
139 /* clean up successfully registered filters */
140 while (--i >= 0)
141 can_rx_unregister(dev, filter[i].can_id,
142 filter[i].can_mask,
143 raw_rcv, sk);
144 break;
148 return err;
151 static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
152 can_err_mask_t err_mask)
154 int err = 0;
156 if (err_mask)
157 err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
158 raw_rcv, sk, "raw");
160 return err;
163 static void raw_disable_filters(struct net_device *dev, struct sock *sk,
164 struct can_filter *filter, int count)
166 int i;
168 for (i = 0; i < count; i++)
169 can_rx_unregister(dev, filter[i].can_id, filter[i].can_mask,
170 raw_rcv, sk);
173 static inline void raw_disable_errfilter(struct net_device *dev,
174 struct sock *sk,
175 can_err_mask_t err_mask)
178 if (err_mask)
179 can_rx_unregister(dev, 0, err_mask | CAN_ERR_FLAG,
180 raw_rcv, sk);
183 static inline void raw_disable_allfilters(struct net_device *dev,
184 struct sock *sk)
186 struct raw_sock *ro = raw_sk(sk);
188 raw_disable_filters(dev, sk, ro->filter, ro->count);
189 raw_disable_errfilter(dev, sk, ro->err_mask);
192 static int raw_enable_allfilters(struct net_device *dev, struct sock *sk)
194 struct raw_sock *ro = raw_sk(sk);
195 int err;
197 err = raw_enable_filters(dev, sk, ro->filter, ro->count);
198 if (!err) {
199 err = raw_enable_errfilter(dev, sk, ro->err_mask);
200 if (err)
201 raw_disable_filters(dev, sk, ro->filter, ro->count);
204 return err;
207 static int raw_notifier(struct notifier_block *nb,
208 unsigned long msg, void *data)
210 struct net_device *dev = (struct net_device *)data;
211 struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
212 struct sock *sk = &ro->sk;
214 if (!net_eq(dev_net(dev), &init_net))
215 return NOTIFY_DONE;
217 if (dev->type != ARPHRD_CAN)
218 return NOTIFY_DONE;
220 if (ro->ifindex != dev->ifindex)
221 return NOTIFY_DONE;
223 switch (msg) {
225 case NETDEV_UNREGISTER:
226 lock_sock(sk);
227 /* remove current filters & unregister */
228 if (ro->bound)
229 raw_disable_allfilters(dev, sk);
231 if (ro->count > 1)
232 kfree(ro->filter);
234 ro->ifindex = 0;
235 ro->bound = 0;
236 ro->count = 0;
237 release_sock(sk);
239 sk->sk_err = ENODEV;
240 if (!sock_flag(sk, SOCK_DEAD))
241 sk->sk_error_report(sk);
242 break;
244 case NETDEV_DOWN:
245 sk->sk_err = ENETDOWN;
246 if (!sock_flag(sk, SOCK_DEAD))
247 sk->sk_error_report(sk);
248 break;
251 return NOTIFY_DONE;
254 static int raw_init(struct sock *sk)
256 struct raw_sock *ro = raw_sk(sk);
258 ro->bound = 0;
259 ro->ifindex = 0;
261 /* set default filter to single entry dfilter */
262 ro->dfilter.can_id = 0;
263 ro->dfilter.can_mask = MASK_ALL;
264 ro->filter = &ro->dfilter;
265 ro->count = 1;
267 /* set default loopback behaviour */
268 ro->loopback = 1;
269 ro->recv_own_msgs = 0;
271 /* set notifier */
272 ro->notifier.notifier_call = raw_notifier;
274 register_netdevice_notifier(&ro->notifier);
276 return 0;
279 static int raw_release(struct socket *sock)
281 struct sock *sk = sock->sk;
282 struct raw_sock *ro = raw_sk(sk);
284 unregister_netdevice_notifier(&ro->notifier);
286 lock_sock(sk);
288 /* remove current filters & unregister */
289 if (ro->bound) {
290 if (ro->ifindex) {
291 struct net_device *dev;
293 dev = dev_get_by_index(&init_net, ro->ifindex);
294 if (dev) {
295 raw_disable_allfilters(dev, sk);
296 dev_put(dev);
298 } else
299 raw_disable_allfilters(NULL, sk);
302 if (ro->count > 1)
303 kfree(ro->filter);
305 ro->ifindex = 0;
306 ro->bound = 0;
307 ro->count = 0;
309 release_sock(sk);
310 sock_put(sk);
312 return 0;
315 static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
317 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
318 struct sock *sk = sock->sk;
319 struct raw_sock *ro = raw_sk(sk);
320 int ifindex;
321 int err = 0;
322 int notify_enetdown = 0;
324 if (len < sizeof(*addr))
325 return -EINVAL;
327 lock_sock(sk);
329 if (ro->bound && addr->can_ifindex == ro->ifindex)
330 goto out;
332 if (addr->can_ifindex) {
333 struct net_device *dev;
335 dev = dev_get_by_index(&init_net, addr->can_ifindex);
336 if (!dev) {
337 err = -ENODEV;
338 goto out;
340 if (dev->type != ARPHRD_CAN) {
341 dev_put(dev);
342 err = -ENODEV;
343 goto out;
345 if (!(dev->flags & IFF_UP))
346 notify_enetdown = 1;
348 ifindex = dev->ifindex;
350 /* filters set by default/setsockopt */
351 err = raw_enable_allfilters(dev, sk);
352 dev_put(dev);
353 } else {
354 ifindex = 0;
356 /* filters set by default/setsockopt */
357 err = raw_enable_allfilters(NULL, sk);
360 if (!err) {
361 if (ro->bound) {
362 /* unregister old filters */
363 if (ro->ifindex) {
364 struct net_device *dev;
366 dev = dev_get_by_index(&init_net, ro->ifindex);
367 if (dev) {
368 raw_disable_allfilters(dev, sk);
369 dev_put(dev);
371 } else
372 raw_disable_allfilters(NULL, sk);
374 ro->ifindex = ifindex;
375 ro->bound = 1;
378 out:
379 release_sock(sk);
381 if (notify_enetdown) {
382 sk->sk_err = ENETDOWN;
383 if (!sock_flag(sk, SOCK_DEAD))
384 sk->sk_error_report(sk);
387 return err;
390 static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
391 int *len, int peer)
393 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
394 struct sock *sk = sock->sk;
395 struct raw_sock *ro = raw_sk(sk);
397 if (peer)
398 return -EOPNOTSUPP;
400 addr->can_family = AF_CAN;
401 addr->can_ifindex = ro->ifindex;
403 *len = sizeof(*addr);
405 return 0;
408 static int raw_setsockopt(struct socket *sock, int level, int optname,
409 char __user *optval, int optlen)
411 struct sock *sk = sock->sk;
412 struct raw_sock *ro = raw_sk(sk);
413 struct can_filter *filter = NULL; /* dyn. alloc'ed filters */
414 struct can_filter sfilter; /* single filter */
415 struct net_device *dev = NULL;
416 can_err_mask_t err_mask = 0;
417 int count = 0;
418 int err = 0;
420 if (level != SOL_CAN_RAW)
421 return -EINVAL;
422 if (optlen < 0)
423 return -EINVAL;
425 switch (optname) {
427 case CAN_RAW_FILTER:
428 if (optlen % sizeof(struct can_filter) != 0)
429 return -EINVAL;
431 count = optlen / sizeof(struct can_filter);
433 if (count > 1) {
434 /* filter does not fit into dfilter => alloc space */
435 filter = kmalloc(optlen, GFP_KERNEL);
436 if (!filter)
437 return -ENOMEM;
439 if (copy_from_user(filter, optval, optlen)) {
440 kfree(filter);
441 return -EFAULT;
443 } else if (count == 1) {
444 if (copy_from_user(&sfilter, optval, optlen))
445 return -EFAULT;
448 lock_sock(sk);
450 if (ro->bound && ro->ifindex)
451 dev = dev_get_by_index(&init_net, ro->ifindex);
453 if (ro->bound) {
454 /* (try to) register the new filters */
455 if (count == 1)
456 err = raw_enable_filters(dev, sk, &sfilter, 1);
457 else
458 err = raw_enable_filters(dev, sk, filter,
459 count);
460 if (err) {
461 if (count > 1)
462 kfree(filter);
463 goto out_fil;
466 /* remove old filter registrations */
467 raw_disable_filters(dev, sk, ro->filter, ro->count);
470 /* remove old filter space */
471 if (ro->count > 1)
472 kfree(ro->filter);
474 /* link new filters to the socket */
475 if (count == 1) {
476 /* copy filter data for single filter */
477 ro->dfilter = sfilter;
478 filter = &ro->dfilter;
480 ro->filter = filter;
481 ro->count = count;
483 out_fil:
484 if (dev)
485 dev_put(dev);
487 release_sock(sk);
489 break;
491 case CAN_RAW_ERR_FILTER:
492 if (optlen != sizeof(err_mask))
493 return -EINVAL;
495 if (copy_from_user(&err_mask, optval, optlen))
496 return -EFAULT;
498 err_mask &= CAN_ERR_MASK;
500 lock_sock(sk);
502 if (ro->bound && ro->ifindex)
503 dev = dev_get_by_index(&init_net, ro->ifindex);
505 /* remove current error mask */
506 if (ro->bound) {
507 /* (try to) register the new err_mask */
508 err = raw_enable_errfilter(dev, sk, err_mask);
510 if (err)
511 goto out_err;
513 /* remove old err_mask registration */
514 raw_disable_errfilter(dev, sk, ro->err_mask);
517 /* link new err_mask to the socket */
518 ro->err_mask = err_mask;
520 out_err:
521 if (dev)
522 dev_put(dev);
524 release_sock(sk);
526 break;
528 case CAN_RAW_LOOPBACK:
529 if (optlen != sizeof(ro->loopback))
530 return -EINVAL;
532 if (copy_from_user(&ro->loopback, optval, optlen))
533 return -EFAULT;
535 break;
537 case CAN_RAW_RECV_OWN_MSGS:
538 if (optlen != sizeof(ro->recv_own_msgs))
539 return -EINVAL;
541 if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
542 return -EFAULT;
544 break;
546 default:
547 return -ENOPROTOOPT;
549 return err;
552 static int raw_getsockopt(struct socket *sock, int level, int optname,
553 char __user *optval, int __user *optlen)
555 struct sock *sk = sock->sk;
556 struct raw_sock *ro = raw_sk(sk);
557 int len;
558 void *val;
559 int err = 0;
561 if (level != SOL_CAN_RAW)
562 return -EINVAL;
563 if (get_user(len, optlen))
564 return -EFAULT;
565 if (len < 0)
566 return -EINVAL;
568 switch (optname) {
570 case CAN_RAW_FILTER:
571 lock_sock(sk);
572 if (ro->count > 0) {
573 int fsize = ro->count * sizeof(struct can_filter);
574 if (len > fsize)
575 len = fsize;
576 if (copy_to_user(optval, ro->filter, len))
577 err = -EFAULT;
578 } else
579 len = 0;
580 release_sock(sk);
582 if (!err)
583 err = put_user(len, optlen);
584 return err;
586 case CAN_RAW_ERR_FILTER:
587 if (len > sizeof(can_err_mask_t))
588 len = sizeof(can_err_mask_t);
589 val = &ro->err_mask;
590 break;
592 case CAN_RAW_LOOPBACK:
593 if (len > sizeof(int))
594 len = sizeof(int);
595 val = &ro->loopback;
596 break;
598 case CAN_RAW_RECV_OWN_MSGS:
599 if (len > sizeof(int))
600 len = sizeof(int);
601 val = &ro->recv_own_msgs;
602 break;
604 default:
605 return -ENOPROTOOPT;
608 if (put_user(len, optlen))
609 return -EFAULT;
610 if (copy_to_user(optval, val, len))
611 return -EFAULT;
612 return 0;
615 static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
616 struct msghdr *msg, size_t size)
618 struct sock *sk = sock->sk;
619 struct raw_sock *ro = raw_sk(sk);
620 struct sk_buff *skb;
621 struct net_device *dev;
622 int ifindex;
623 int err;
625 if (msg->msg_name) {
626 struct sockaddr_can *addr =
627 (struct sockaddr_can *)msg->msg_name;
629 if (addr->can_family != AF_CAN)
630 return -EINVAL;
632 ifindex = addr->can_ifindex;
633 } else
634 ifindex = ro->ifindex;
636 if (size != sizeof(struct can_frame))
637 return -EINVAL;
639 dev = dev_get_by_index(&init_net, ifindex);
640 if (!dev)
641 return -ENXIO;
643 skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT,
644 &err);
645 if (!skb)
646 goto put_dev;
648 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
649 if (err < 0)
650 goto free_skb;
651 err = sock_tx_timestamp(msg, sk, skb_tx(skb));
652 if (err < 0)
653 goto free_skb;
654 skb->dev = dev;
655 skb->sk = sk;
657 err = can_send(skb, ro->loopback);
659 dev_put(dev);
661 if (err)
662 goto send_failed;
664 return size;
666 free_skb:
667 kfree_skb(skb);
668 put_dev:
669 dev_put(dev);
670 send_failed:
671 return err;
674 static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
675 struct msghdr *msg, size_t size, int flags)
677 struct sock *sk = sock->sk;
678 struct sk_buff *skb;
679 int err = 0;
680 int noblock;
682 noblock = flags & MSG_DONTWAIT;
683 flags &= ~MSG_DONTWAIT;
685 skb = skb_recv_datagram(sk, flags, noblock, &err);
686 if (!skb)
687 return err;
689 if (size < skb->len)
690 msg->msg_flags |= MSG_TRUNC;
691 else
692 size = skb->len;
694 err = memcpy_toiovec(msg->msg_iov, skb->data, size);
695 if (err < 0) {
696 skb_free_datagram(sk, skb);
697 return err;
700 sock_recv_timestamp(msg, sk, skb);
702 if (msg->msg_name) {
703 msg->msg_namelen = sizeof(struct sockaddr_can);
704 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
707 skb_free_datagram(sk, skb);
709 return size;
712 static struct proto_ops raw_ops __read_mostly = {
713 .family = PF_CAN,
714 .release = raw_release,
715 .bind = raw_bind,
716 .connect = sock_no_connect,
717 .socketpair = sock_no_socketpair,
718 .accept = sock_no_accept,
719 .getname = raw_getname,
720 .poll = datagram_poll,
721 .ioctl = NULL, /* use can_ioctl() from af_can.c */
722 .listen = sock_no_listen,
723 .shutdown = sock_no_shutdown,
724 .setsockopt = raw_setsockopt,
725 .getsockopt = raw_getsockopt,
726 .sendmsg = raw_sendmsg,
727 .recvmsg = raw_recvmsg,
728 .mmap = sock_no_mmap,
729 .sendpage = sock_no_sendpage,
732 static struct proto raw_proto __read_mostly = {
733 .name = "CAN_RAW",
734 .owner = THIS_MODULE,
735 .obj_size = sizeof(struct raw_sock),
736 .init = raw_init,
739 static struct can_proto raw_can_proto __read_mostly = {
740 .type = SOCK_RAW,
741 .protocol = CAN_RAW,
742 .capability = -1,
743 .ops = &raw_ops,
744 .prot = &raw_proto,
747 static __init int raw_module_init(void)
749 int err;
751 printk(banner);
753 err = can_proto_register(&raw_can_proto);
754 if (err < 0)
755 printk(KERN_ERR "can: registration of raw protocol failed\n");
757 return err;
760 static __exit void raw_module_exit(void)
762 can_proto_unregister(&raw_can_proto);
765 module_init(raw_module_init);
766 module_exit(raw_module_exit);