Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6.git] / net / ipv6 / mcast.c
blobd18f9f903db62333983d3fad0e1ccb9298762c98
1 /*
2 * Multicast support for IPv6
3 * Linux INET6 implementation
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 /* Changes:
18 * yoshfuji : fix format of router-alert option
19 * YOSHIFUJI Hideaki @USAGI:
20 * Fixed source address for MLD message based on
21 * <draft-ietf-magma-mld-source-05.txt>.
22 * YOSHIFUJI Hideaki @USAGI:
23 * - Ignore Queries for invalid addresses.
24 * - MLD for link-local addresses.
25 * David L Stevens <dlstevens@us.ibm.com>:
26 * - MLDv2 support
29 #include <linux/module.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/socket.h>
34 #include <linux/sockios.h>
35 #include <linux/jiffies.h>
36 #include <linux/times.h>
37 #include <linux/net.h>
38 #include <linux/in.h>
39 #include <linux/in6.h>
40 #include <linux/netdevice.h>
41 #include <linux/if_arp.h>
42 #include <linux/route.h>
43 #include <linux/init.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/slab.h>
47 #include <linux/pkt_sched.h>
48 #include <net/mld.h>
50 #include <linux/netfilter.h>
51 #include <linux/netfilter_ipv6.h>
53 #include <net/net_namespace.h>
54 #include <net/sock.h>
55 #include <net/snmp.h>
57 #include <net/ipv6.h>
58 #include <net/protocol.h>
59 #include <net/if_inet6.h>
60 #include <net/ndisc.h>
61 #include <net/addrconf.h>
62 #include <net/ip6_route.h>
63 #include <net/inet_common.h>
65 #include <net/ip6_checksum.h>
67 /* Set to 3 to get tracing... */
68 #define MCAST_DEBUG 2
70 #if MCAST_DEBUG >= 3
71 #define MDBG(x) printk x
72 #else
73 #define MDBG(x)
74 #endif
76 /* Ensure that we have struct in6_addr aligned on 32bit word. */
77 static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
78 BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4),
79 BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4),
80 BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4)
83 static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
85 /* Big mc list lock for all the sockets */
86 static DEFINE_SPINLOCK(ipv6_sk_mc_lock);
88 static void igmp6_join_group(struct ifmcaddr6 *ma);
89 static void igmp6_leave_group(struct ifmcaddr6 *ma);
90 static void igmp6_timer_handler(unsigned long data);
92 static void mld_gq_timer_expire(unsigned long data);
93 static void mld_ifc_timer_expire(unsigned long data);
94 static void mld_ifc_event(struct inet6_dev *idev);
95 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
96 static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
97 static void mld_clear_delrec(struct inet6_dev *idev);
98 static bool mld_in_v1_mode(const struct inet6_dev *idev);
99 static int sf_setstate(struct ifmcaddr6 *pmc);
100 static void sf_markstate(struct ifmcaddr6 *pmc);
101 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
102 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
103 int sfmode, int sfcount, const struct in6_addr *psfsrc,
104 int delta);
105 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
106 int sfmode, int sfcount, const struct in6_addr *psfsrc,
107 int delta);
108 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
109 struct inet6_dev *idev);
111 #define MLD_QRV_DEFAULT 2
112 /* RFC3810, 9.2. Query Interval */
113 #define MLD_QI_DEFAULT (125 * HZ)
114 /* RFC3810, 9.3. Query Response Interval */
115 #define MLD_QRI_DEFAULT (10 * HZ)
117 /* RFC3810, 8.1 Query Version Distinctions */
118 #define MLD_V1_QUERY_LEN 24
119 #define MLD_V2_QUERY_LEN_MIN 28
121 #define IPV6_MLD_MAX_MSF 64
123 int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
126 * socket join on multicast group
129 #define for_each_pmc_rcu(np, pmc) \
130 for (pmc = rcu_dereference(np->ipv6_mc_list); \
131 pmc != NULL; \
132 pmc = rcu_dereference(pmc->next))
134 static int unsolicited_report_interval(struct inet6_dev *idev)
136 int iv;
138 if (mld_in_v1_mode(idev))
139 iv = idev->cnf.mldv1_unsolicited_report_interval;
140 else
141 iv = idev->cnf.mldv2_unsolicited_report_interval;
143 return iv > 0 ? iv : 1;
146 int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
148 struct net_device *dev = NULL;
149 struct ipv6_mc_socklist *mc_lst;
150 struct ipv6_pinfo *np = inet6_sk(sk);
151 struct net *net = sock_net(sk);
152 int err;
154 if (!ipv6_addr_is_multicast(addr))
155 return -EINVAL;
157 rcu_read_lock();
158 for_each_pmc_rcu(np, mc_lst) {
159 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
160 ipv6_addr_equal(&mc_lst->addr, addr)) {
161 rcu_read_unlock();
162 return -EADDRINUSE;
165 rcu_read_unlock();
167 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
169 if (mc_lst == NULL)
170 return -ENOMEM;
172 mc_lst->next = NULL;
173 mc_lst->addr = *addr;
175 rcu_read_lock();
176 if (ifindex == 0) {
177 struct rt6_info *rt;
178 rt = rt6_lookup(net, addr, NULL, 0, 0);
179 if (rt) {
180 dev = rt->dst.dev;
181 ip6_rt_put(rt);
183 } else
184 dev = dev_get_by_index_rcu(net, ifindex);
186 if (dev == NULL) {
187 rcu_read_unlock();
188 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
189 return -ENODEV;
192 mc_lst->ifindex = dev->ifindex;
193 mc_lst->sfmode = MCAST_EXCLUDE;
194 rwlock_init(&mc_lst->sflock);
195 mc_lst->sflist = NULL;
198 * now add/increase the group membership on the device
201 err = ipv6_dev_mc_inc(dev, addr);
203 if (err) {
204 rcu_read_unlock();
205 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
206 return err;
209 spin_lock(&ipv6_sk_mc_lock);
210 mc_lst->next = np->ipv6_mc_list;
211 rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
212 spin_unlock(&ipv6_sk_mc_lock);
214 rcu_read_unlock();
216 return 0;
220 * socket leave on multicast group
222 int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
224 struct ipv6_pinfo *np = inet6_sk(sk);
225 struct ipv6_mc_socklist *mc_lst;
226 struct ipv6_mc_socklist __rcu **lnk;
227 struct net *net = sock_net(sk);
229 if (!ipv6_addr_is_multicast(addr))
230 return -EINVAL;
232 spin_lock(&ipv6_sk_mc_lock);
233 for (lnk = &np->ipv6_mc_list;
234 (mc_lst = rcu_dereference_protected(*lnk,
235 lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ;
236 lnk = &mc_lst->next) {
237 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
238 ipv6_addr_equal(&mc_lst->addr, addr)) {
239 struct net_device *dev;
241 *lnk = mc_lst->next;
242 spin_unlock(&ipv6_sk_mc_lock);
244 rcu_read_lock();
245 dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
246 if (dev != NULL) {
247 struct inet6_dev *idev = __in6_dev_get(dev);
249 (void) ip6_mc_leave_src(sk, mc_lst, idev);
250 if (idev)
251 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
252 } else
253 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
254 rcu_read_unlock();
255 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
256 kfree_rcu(mc_lst, rcu);
257 return 0;
260 spin_unlock(&ipv6_sk_mc_lock);
262 return -EADDRNOTAVAIL;
265 /* called with rcu_read_lock() */
266 static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
267 const struct in6_addr *group,
268 int ifindex)
270 struct net_device *dev = NULL;
271 struct inet6_dev *idev = NULL;
273 if (ifindex == 0) {
274 struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0);
276 if (rt) {
277 dev = rt->dst.dev;
278 ip6_rt_put(rt);
280 } else
281 dev = dev_get_by_index_rcu(net, ifindex);
283 if (!dev)
284 return NULL;
285 idev = __in6_dev_get(dev);
286 if (!idev)
287 return NULL;
288 read_lock_bh(&idev->lock);
289 if (idev->dead) {
290 read_unlock_bh(&idev->lock);
291 return NULL;
293 return idev;
296 void ipv6_sock_mc_close(struct sock *sk)
298 struct ipv6_pinfo *np = inet6_sk(sk);
299 struct ipv6_mc_socklist *mc_lst;
300 struct net *net = sock_net(sk);
302 if (!rcu_access_pointer(np->ipv6_mc_list))
303 return;
305 spin_lock(&ipv6_sk_mc_lock);
306 while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
307 lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
308 struct net_device *dev;
310 np->ipv6_mc_list = mc_lst->next;
311 spin_unlock(&ipv6_sk_mc_lock);
313 rcu_read_lock();
314 dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
315 if (dev) {
316 struct inet6_dev *idev = __in6_dev_get(dev);
318 (void) ip6_mc_leave_src(sk, mc_lst, idev);
319 if (idev)
320 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
321 } else
322 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
323 rcu_read_unlock();
325 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
326 kfree_rcu(mc_lst, rcu);
328 spin_lock(&ipv6_sk_mc_lock);
330 spin_unlock(&ipv6_sk_mc_lock);
333 int ip6_mc_source(int add, int omode, struct sock *sk,
334 struct group_source_req *pgsr)
336 struct in6_addr *source, *group;
337 struct ipv6_mc_socklist *pmc;
338 struct inet6_dev *idev;
339 struct ipv6_pinfo *inet6 = inet6_sk(sk);
340 struct ip6_sf_socklist *psl;
341 struct net *net = sock_net(sk);
342 int i, j, rv;
343 int leavegroup = 0;
344 int pmclocked = 0;
345 int err;
347 source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
348 group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
350 if (!ipv6_addr_is_multicast(group))
351 return -EINVAL;
353 rcu_read_lock();
354 idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface);
355 if (!idev) {
356 rcu_read_unlock();
357 return -ENODEV;
360 err = -EADDRNOTAVAIL;
362 for_each_pmc_rcu(inet6, pmc) {
363 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
364 continue;
365 if (ipv6_addr_equal(&pmc->addr, group))
366 break;
368 if (!pmc) { /* must have a prior join */
369 err = -EINVAL;
370 goto done;
372 /* if a source filter was set, must be the same mode as before */
373 if (pmc->sflist) {
374 if (pmc->sfmode != omode) {
375 err = -EINVAL;
376 goto done;
378 } else if (pmc->sfmode != omode) {
379 /* allow mode switches for empty-set filters */
380 ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
381 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
382 pmc->sfmode = omode;
385 write_lock(&pmc->sflock);
386 pmclocked = 1;
388 psl = pmc->sflist;
389 if (!add) {
390 if (!psl)
391 goto done; /* err = -EADDRNOTAVAIL */
392 rv = !0;
393 for (i=0; i<psl->sl_count; i++) {
394 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
395 if (rv == 0)
396 break;
398 if (rv) /* source not found */
399 goto done; /* err = -EADDRNOTAVAIL */
401 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
402 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
403 leavegroup = 1;
404 goto done;
407 /* update the interface filter */
408 ip6_mc_del_src(idev, group, omode, 1, source, 1);
410 for (j=i+1; j<psl->sl_count; j++)
411 psl->sl_addr[j-1] = psl->sl_addr[j];
412 psl->sl_count--;
413 err = 0;
414 goto done;
416 /* else, add a new source to the filter */
418 if (psl && psl->sl_count >= sysctl_mld_max_msf) {
419 err = -ENOBUFS;
420 goto done;
422 if (!psl || psl->sl_count == psl->sl_max) {
423 struct ip6_sf_socklist *newpsl;
424 int count = IP6_SFBLOCK;
426 if (psl)
427 count += psl->sl_max;
428 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC);
429 if (!newpsl) {
430 err = -ENOBUFS;
431 goto done;
433 newpsl->sl_max = count;
434 newpsl->sl_count = count - IP6_SFBLOCK;
435 if (psl) {
436 for (i=0; i<psl->sl_count; i++)
437 newpsl->sl_addr[i] = psl->sl_addr[i];
438 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
440 pmc->sflist = psl = newpsl;
442 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
443 for (i=0; i<psl->sl_count; i++) {
444 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
445 if (rv == 0) /* There is an error in the address. */
446 goto done;
448 for (j=psl->sl_count-1; j>=i; j--)
449 psl->sl_addr[j+1] = psl->sl_addr[j];
450 psl->sl_addr[i] = *source;
451 psl->sl_count++;
452 err = 0;
453 /* update the interface list */
454 ip6_mc_add_src(idev, group, omode, 1, source, 1);
455 done:
456 if (pmclocked)
457 write_unlock(&pmc->sflock);
458 read_unlock_bh(&idev->lock);
459 rcu_read_unlock();
460 if (leavegroup)
461 return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
462 return err;
465 int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
467 const struct in6_addr *group;
468 struct ipv6_mc_socklist *pmc;
469 struct inet6_dev *idev;
470 struct ipv6_pinfo *inet6 = inet6_sk(sk);
471 struct ip6_sf_socklist *newpsl, *psl;
472 struct net *net = sock_net(sk);
473 int leavegroup = 0;
474 int i, err;
476 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
478 if (!ipv6_addr_is_multicast(group))
479 return -EINVAL;
480 if (gsf->gf_fmode != MCAST_INCLUDE &&
481 gsf->gf_fmode != MCAST_EXCLUDE)
482 return -EINVAL;
484 rcu_read_lock();
485 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
487 if (!idev) {
488 rcu_read_unlock();
489 return -ENODEV;
492 err = 0;
494 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
495 leavegroup = 1;
496 goto done;
499 for_each_pmc_rcu(inet6, pmc) {
500 if (pmc->ifindex != gsf->gf_interface)
501 continue;
502 if (ipv6_addr_equal(&pmc->addr, group))
503 break;
505 if (!pmc) { /* must have a prior join */
506 err = -EINVAL;
507 goto done;
509 if (gsf->gf_numsrc) {
510 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),
511 GFP_ATOMIC);
512 if (!newpsl) {
513 err = -ENOBUFS;
514 goto done;
516 newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
517 for (i=0; i<newpsl->sl_count; ++i) {
518 struct sockaddr_in6 *psin6;
520 psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i];
521 newpsl->sl_addr[i] = psin6->sin6_addr;
523 err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
524 newpsl->sl_count, newpsl->sl_addr, 0);
525 if (err) {
526 sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max));
527 goto done;
529 } else {
530 newpsl = NULL;
531 (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
534 write_lock(&pmc->sflock);
535 psl = pmc->sflist;
536 if (psl) {
537 (void) ip6_mc_del_src(idev, group, pmc->sfmode,
538 psl->sl_count, psl->sl_addr, 0);
539 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
540 } else
541 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
542 pmc->sflist = newpsl;
543 pmc->sfmode = gsf->gf_fmode;
544 write_unlock(&pmc->sflock);
545 err = 0;
546 done:
547 read_unlock_bh(&idev->lock);
548 rcu_read_unlock();
549 if (leavegroup)
550 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
551 return err;
554 int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
555 struct group_filter __user *optval, int __user *optlen)
557 int err, i, count, copycount;
558 const struct in6_addr *group;
559 struct ipv6_mc_socklist *pmc;
560 struct inet6_dev *idev;
561 struct ipv6_pinfo *inet6 = inet6_sk(sk);
562 struct ip6_sf_socklist *psl;
563 struct net *net = sock_net(sk);
565 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
567 if (!ipv6_addr_is_multicast(group))
568 return -EINVAL;
570 rcu_read_lock();
571 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
573 if (!idev) {
574 rcu_read_unlock();
575 return -ENODEV;
578 err = -EADDRNOTAVAIL;
580 * changes to the ipv6_mc_list require the socket lock and
581 * a read lock on ip6_sk_mc_lock. We have the socket lock,
582 * so reading the list is safe.
585 for_each_pmc_rcu(inet6, pmc) {
586 if (pmc->ifindex != gsf->gf_interface)
587 continue;
588 if (ipv6_addr_equal(group, &pmc->addr))
589 break;
591 if (!pmc) /* must have a prior join */
592 goto done;
593 gsf->gf_fmode = pmc->sfmode;
594 psl = pmc->sflist;
595 count = psl ? psl->sl_count : 0;
596 read_unlock_bh(&idev->lock);
597 rcu_read_unlock();
599 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
600 gsf->gf_numsrc = count;
601 if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
602 copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
603 return -EFAULT;
605 /* changes to psl require the socket lock, a read lock on
606 * on ipv6_sk_mc_lock and a write lock on pmc->sflock. We
607 * have the socket lock, so reading here is safe.
609 for (i=0; i<copycount; i++) {
610 struct sockaddr_in6 *psin6;
611 struct sockaddr_storage ss;
613 psin6 = (struct sockaddr_in6 *)&ss;
614 memset(&ss, 0, sizeof(ss));
615 psin6->sin6_family = AF_INET6;
616 psin6->sin6_addr = psl->sl_addr[i];
617 if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
618 return -EFAULT;
620 return 0;
621 done:
622 read_unlock_bh(&idev->lock);
623 rcu_read_unlock();
624 return err;
627 bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
628 const struct in6_addr *src_addr)
630 struct ipv6_pinfo *np = inet6_sk(sk);
631 struct ipv6_mc_socklist *mc;
632 struct ip6_sf_socklist *psl;
633 bool rv = true;
635 rcu_read_lock();
636 for_each_pmc_rcu(np, mc) {
637 if (ipv6_addr_equal(&mc->addr, mc_addr))
638 break;
640 if (!mc) {
641 rcu_read_unlock();
642 return true;
644 read_lock(&mc->sflock);
645 psl = mc->sflist;
646 if (!psl) {
647 rv = mc->sfmode == MCAST_EXCLUDE;
648 } else {
649 int i;
651 for (i=0; i<psl->sl_count; i++) {
652 if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
653 break;
655 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
656 rv = false;
657 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
658 rv = false;
660 read_unlock(&mc->sflock);
661 rcu_read_unlock();
663 return rv;
666 static void ma_put(struct ifmcaddr6 *mc)
668 if (atomic_dec_and_test(&mc->mca_refcnt)) {
669 in6_dev_put(mc->idev);
670 kfree(mc);
674 static void igmp6_group_added(struct ifmcaddr6 *mc)
676 struct net_device *dev = mc->idev->dev;
677 char buf[MAX_ADDR_LEN];
679 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
680 IPV6_ADDR_SCOPE_LINKLOCAL)
681 return;
683 spin_lock_bh(&mc->mca_lock);
684 if (!(mc->mca_flags&MAF_LOADED)) {
685 mc->mca_flags |= MAF_LOADED;
686 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
687 dev_mc_add(dev, buf);
689 spin_unlock_bh(&mc->mca_lock);
691 if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
692 return;
694 if (mld_in_v1_mode(mc->idev)) {
695 igmp6_join_group(mc);
696 return;
698 /* else v2 */
700 mc->mca_crcount = mc->idev->mc_qrv;
701 mld_ifc_event(mc->idev);
704 static void igmp6_group_dropped(struct ifmcaddr6 *mc)
706 struct net_device *dev = mc->idev->dev;
707 char buf[MAX_ADDR_LEN];
709 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
710 IPV6_ADDR_SCOPE_LINKLOCAL)
711 return;
713 spin_lock_bh(&mc->mca_lock);
714 if (mc->mca_flags&MAF_LOADED) {
715 mc->mca_flags &= ~MAF_LOADED;
716 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
717 dev_mc_del(dev, buf);
720 if (mc->mca_flags & MAF_NOREPORT)
721 goto done;
722 spin_unlock_bh(&mc->mca_lock);
724 if (!mc->idev->dead)
725 igmp6_leave_group(mc);
727 spin_lock_bh(&mc->mca_lock);
728 if (del_timer(&mc->mca_timer))
729 atomic_dec(&mc->mca_refcnt);
730 done:
731 ip6_mc_clear_src(mc);
732 spin_unlock_bh(&mc->mca_lock);
736 * deleted ifmcaddr6 manipulation
738 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
740 struct ifmcaddr6 *pmc;
742 /* this is an "ifmcaddr6" for convenience; only the fields below
743 * are actually used. In particular, the refcnt and users are not
744 * used for management of the delete list. Using the same structure
745 * for deleted items allows change reports to use common code with
746 * non-deleted or query-response MCA's.
748 pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC);
749 if (!pmc)
750 return;
752 spin_lock_bh(&im->mca_lock);
753 spin_lock_init(&pmc->mca_lock);
754 pmc->idev = im->idev;
755 in6_dev_hold(idev);
756 pmc->mca_addr = im->mca_addr;
757 pmc->mca_crcount = idev->mc_qrv;
758 pmc->mca_sfmode = im->mca_sfmode;
759 if (pmc->mca_sfmode == MCAST_INCLUDE) {
760 struct ip6_sf_list *psf;
762 pmc->mca_tomb = im->mca_tomb;
763 pmc->mca_sources = im->mca_sources;
764 im->mca_tomb = im->mca_sources = NULL;
765 for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
766 psf->sf_crcount = pmc->mca_crcount;
768 spin_unlock_bh(&im->mca_lock);
770 spin_lock_bh(&idev->mc_lock);
771 pmc->next = idev->mc_tomb;
772 idev->mc_tomb = pmc;
773 spin_unlock_bh(&idev->mc_lock);
776 static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
778 struct ifmcaddr6 *pmc, *pmc_prev;
779 struct ip6_sf_list *psf, *psf_next;
781 spin_lock_bh(&idev->mc_lock);
782 pmc_prev = NULL;
783 for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) {
784 if (ipv6_addr_equal(&pmc->mca_addr, pmca))
785 break;
786 pmc_prev = pmc;
788 if (pmc) {
789 if (pmc_prev)
790 pmc_prev->next = pmc->next;
791 else
792 idev->mc_tomb = pmc->next;
794 spin_unlock_bh(&idev->mc_lock);
796 if (pmc) {
797 for (psf=pmc->mca_tomb; psf; psf=psf_next) {
798 psf_next = psf->sf_next;
799 kfree(psf);
801 in6_dev_put(pmc->idev);
802 kfree(pmc);
806 static void mld_clear_delrec(struct inet6_dev *idev)
808 struct ifmcaddr6 *pmc, *nextpmc;
810 spin_lock_bh(&idev->mc_lock);
811 pmc = idev->mc_tomb;
812 idev->mc_tomb = NULL;
813 spin_unlock_bh(&idev->mc_lock);
815 for (; pmc; pmc = nextpmc) {
816 nextpmc = pmc->next;
817 ip6_mc_clear_src(pmc);
818 in6_dev_put(pmc->idev);
819 kfree(pmc);
822 /* clear dead sources, too */
823 read_lock_bh(&idev->lock);
824 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
825 struct ip6_sf_list *psf, *psf_next;
827 spin_lock_bh(&pmc->mca_lock);
828 psf = pmc->mca_tomb;
829 pmc->mca_tomb = NULL;
830 spin_unlock_bh(&pmc->mca_lock);
831 for (; psf; psf=psf_next) {
832 psf_next = psf->sf_next;
833 kfree(psf);
836 read_unlock_bh(&idev->lock);
841 * device multicast group inc (add if not found)
843 int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
845 struct ifmcaddr6 *mc;
846 struct inet6_dev *idev;
848 /* we need to take a reference on idev */
849 idev = in6_dev_get(dev);
851 if (idev == NULL)
852 return -EINVAL;
854 write_lock_bh(&idev->lock);
855 if (idev->dead) {
856 write_unlock_bh(&idev->lock);
857 in6_dev_put(idev);
858 return -ENODEV;
861 for (mc = idev->mc_list; mc; mc = mc->next) {
862 if (ipv6_addr_equal(&mc->mca_addr, addr)) {
863 mc->mca_users++;
864 write_unlock_bh(&idev->lock);
865 ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0,
866 NULL, 0);
867 in6_dev_put(idev);
868 return 0;
873 * not found: create a new one.
876 mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC);
878 if (mc == NULL) {
879 write_unlock_bh(&idev->lock);
880 in6_dev_put(idev);
881 return -ENOMEM;
884 setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
886 mc->mca_addr = *addr;
887 mc->idev = idev; /* (reference taken) */
888 mc->mca_users = 1;
889 /* mca_stamp should be updated upon changes */
890 mc->mca_cstamp = mc->mca_tstamp = jiffies;
891 atomic_set(&mc->mca_refcnt, 2);
892 spin_lock_init(&mc->mca_lock);
894 /* initial mode is (EX, empty) */
895 mc->mca_sfmode = MCAST_EXCLUDE;
896 mc->mca_sfcount[MCAST_EXCLUDE] = 1;
898 if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
899 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
900 mc->mca_flags |= MAF_NOREPORT;
902 mc->next = idev->mc_list;
903 idev->mc_list = mc;
904 write_unlock_bh(&idev->lock);
906 mld_del_delrec(idev, &mc->mca_addr);
907 igmp6_group_added(mc);
908 ma_put(mc);
909 return 0;
913 * device multicast group del
915 int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
917 struct ifmcaddr6 *ma, **map;
919 write_lock_bh(&idev->lock);
920 for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
921 if (ipv6_addr_equal(&ma->mca_addr, addr)) {
922 if (--ma->mca_users == 0) {
923 *map = ma->next;
924 write_unlock_bh(&idev->lock);
926 igmp6_group_dropped(ma);
928 ma_put(ma);
929 return 0;
931 write_unlock_bh(&idev->lock);
932 return 0;
935 write_unlock_bh(&idev->lock);
937 return -ENOENT;
940 int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
942 struct inet6_dev *idev;
943 int err;
945 rcu_read_lock();
947 idev = __in6_dev_get(dev);
948 if (!idev)
949 err = -ENODEV;
950 else
951 err = __ipv6_dev_mc_dec(idev, addr);
953 rcu_read_unlock();
954 return err;
958 * check if the interface/address pair is valid
960 bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
961 const struct in6_addr *src_addr)
963 struct inet6_dev *idev;
964 struct ifmcaddr6 *mc;
965 bool rv = false;
967 rcu_read_lock();
968 idev = __in6_dev_get(dev);
969 if (idev) {
970 read_lock_bh(&idev->lock);
971 for (mc = idev->mc_list; mc; mc=mc->next) {
972 if (ipv6_addr_equal(&mc->mca_addr, group))
973 break;
975 if (mc) {
976 if (src_addr && !ipv6_addr_any(src_addr)) {
977 struct ip6_sf_list *psf;
979 spin_lock_bh(&mc->mca_lock);
980 for (psf=mc->mca_sources;psf;psf=psf->sf_next) {
981 if (ipv6_addr_equal(&psf->sf_addr, src_addr))
982 break;
984 if (psf)
985 rv = psf->sf_count[MCAST_INCLUDE] ||
986 psf->sf_count[MCAST_EXCLUDE] !=
987 mc->mca_sfcount[MCAST_EXCLUDE];
988 else
989 rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0;
990 spin_unlock_bh(&mc->mca_lock);
991 } else
992 rv = true; /* don't filter unspecified source */
994 read_unlock_bh(&idev->lock);
996 rcu_read_unlock();
997 return rv;
1000 static void mld_gq_start_timer(struct inet6_dev *idev)
1002 unsigned long tv = net_random() % idev->mc_maxdelay;
1004 idev->mc_gq_running = 1;
1005 if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
1006 in6_dev_hold(idev);
1009 static void mld_gq_stop_timer(struct inet6_dev *idev)
1011 idev->mc_gq_running = 0;
1012 if (del_timer(&idev->mc_gq_timer))
1013 __in6_dev_put(idev);
1016 static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay)
1018 unsigned long tv = net_random() % delay;
1020 if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
1021 in6_dev_hold(idev);
1024 static void mld_ifc_stop_timer(struct inet6_dev *idev)
1026 idev->mc_ifc_count = 0;
1027 if (del_timer(&idev->mc_ifc_timer))
1028 __in6_dev_put(idev);
1031 static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay)
1033 unsigned long tv = net_random() % delay;
1035 if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2))
1036 in6_dev_hold(idev);
1039 static void mld_dad_stop_timer(struct inet6_dev *idev)
1041 if (del_timer(&idev->mc_dad_timer))
1042 __in6_dev_put(idev);
1046 * IGMP handling (alias multicast ICMPv6 messages)
1049 static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1051 unsigned long delay = resptime;
1053 /* Do not start timer for these addresses */
1054 if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
1055 IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1056 return;
1058 if (del_timer(&ma->mca_timer)) {
1059 atomic_dec(&ma->mca_refcnt);
1060 delay = ma->mca_timer.expires - jiffies;
1063 if (delay >= resptime)
1064 delay = net_random() % resptime;
1066 ma->mca_timer.expires = jiffies + delay;
1067 if (!mod_timer(&ma->mca_timer, jiffies + delay))
1068 atomic_inc(&ma->mca_refcnt);
1069 ma->mca_flags |= MAF_TIMER_RUNNING;
1072 /* mark EXCLUDE-mode sources */
1073 static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1074 const struct in6_addr *srcs)
1076 struct ip6_sf_list *psf;
1077 int i, scount;
1079 scount = 0;
1080 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1081 if (scount == nsrcs)
1082 break;
1083 for (i=0; i<nsrcs; i++) {
1084 /* skip inactive filters */
1085 if (psf->sf_count[MCAST_INCLUDE] ||
1086 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1087 psf->sf_count[MCAST_EXCLUDE])
1088 break;
1089 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1090 scount++;
1091 break;
1095 pmc->mca_flags &= ~MAF_GSQUERY;
1096 if (scount == nsrcs) /* all sources excluded */
1097 return false;
1098 return true;
1101 static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1102 const struct in6_addr *srcs)
1104 struct ip6_sf_list *psf;
1105 int i, scount;
1107 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1108 return mld_xmarksources(pmc, nsrcs, srcs);
1110 /* mark INCLUDE-mode sources */
1112 scount = 0;
1113 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1114 if (scount == nsrcs)
1115 break;
1116 for (i=0; i<nsrcs; i++) {
1117 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1118 psf->sf_gsresp = 1;
1119 scount++;
1120 break;
1124 if (!scount) {
1125 pmc->mca_flags &= ~MAF_GSQUERY;
1126 return false;
1128 pmc->mca_flags |= MAF_GSQUERY;
1129 return true;
1132 static int mld_force_mld_version(const struct inet6_dev *idev)
1134 /* Normally, both are 0 here. If enforcement to a particular is
1135 * being used, individual device enforcement will have a lower
1136 * precedence over 'all' device (.../conf/all/force_mld_version).
1139 if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
1140 return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
1141 else
1142 return idev->cnf.force_mld_version;
1145 static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1147 return mld_force_mld_version(idev) == 2;
1150 static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1152 return mld_force_mld_version(idev) == 1;
1155 static bool mld_in_v1_mode(const struct inet6_dev *idev)
1157 if (mld_in_v2_mode_only(idev))
1158 return false;
1159 if (mld_in_v1_mode_only(idev))
1160 return true;
1161 if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1162 return true;
1164 return false;
1167 static void mld_set_v1_mode(struct inet6_dev *idev)
1169 /* RFC3810, relevant sections:
1170 * - 9.1. Robustness Variable
1171 * - 9.2. Query Interval
1172 * - 9.3. Query Response Interval
1173 * - 9.12. Older Version Querier Present Timeout
1175 unsigned long switchback;
1177 switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1179 idev->mc_v1_seen = jiffies + switchback;
1182 static void mld_update_qrv(struct inet6_dev *idev,
1183 const struct mld2_query *mlh2)
1185 /* RFC3810, relevant sections:
1186 * - 5.1.8. QRV (Querier's Robustness Variable)
1187 * - 9.1. Robustness Variable
1190 /* The value of the Robustness Variable MUST NOT be zero,
1191 * and SHOULD NOT be one. Catch this here if we ever run
1192 * into such a case in future.
1194 WARN_ON(idev->mc_qrv == 0);
1196 if (mlh2->mld2q_qrv > 0)
1197 idev->mc_qrv = mlh2->mld2q_qrv;
1199 if (unlikely(idev->mc_qrv < 2)) {
1200 net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
1201 idev->mc_qrv, MLD_QRV_DEFAULT);
1202 idev->mc_qrv = MLD_QRV_DEFAULT;
1206 static void mld_update_qi(struct inet6_dev *idev,
1207 const struct mld2_query *mlh2)
1209 /* RFC3810, relevant sections:
1210 * - 5.1.9. QQIC (Querier's Query Interval Code)
1211 * - 9.2. Query Interval
1212 * - 9.12. Older Version Querier Present Timeout
1213 * (the [Query Interval] in the last Query received)
1215 unsigned long mc_qqi;
1217 if (mlh2->mld2q_qqic < 128) {
1218 mc_qqi = mlh2->mld2q_qqic;
1219 } else {
1220 unsigned long mc_man, mc_exp;
1222 mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
1223 mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
1225 mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
1228 idev->mc_qi = mc_qqi * HZ;
1231 static void mld_update_qri(struct inet6_dev *idev,
1232 const struct mld2_query *mlh2)
1234 /* RFC3810, relevant sections:
1235 * - 5.1.3. Maximum Response Code
1236 * - 9.3. Query Response Interval
1238 idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
1241 static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1242 unsigned long *max_delay)
1244 unsigned long mldv1_md;
1246 /* Ignore v1 queries */
1247 if (mld_in_v2_mode_only(idev))
1248 return -EINVAL;
1250 /* MLDv1 router present */
1251 mldv1_md = ntohs(mld->mld_maxdelay);
1252 *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
1254 mld_set_v1_mode(idev);
1256 /* cancel MLDv2 report timer */
1257 mld_gq_stop_timer(idev);
1258 /* cancel the interface change timer */
1259 mld_ifc_stop_timer(idev);
1260 /* clear deleted report items */
1261 mld_clear_delrec(idev);
1263 return 0;
1266 static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1267 unsigned long *max_delay)
1269 /* hosts need to stay in MLDv1 mode, discard MLDv2 queries */
1270 if (mld_in_v1_mode(idev))
1271 return -EINVAL;
1273 *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
1275 mld_update_qrv(idev, mld);
1276 mld_update_qi(idev, mld);
1277 mld_update_qri(idev, mld);
1279 idev->mc_maxdelay = *max_delay;
1281 return 0;
1284 /* called with rcu_read_lock() */
1285 int igmp6_event_query(struct sk_buff *skb)
1287 struct mld2_query *mlh2 = NULL;
1288 struct ifmcaddr6 *ma;
1289 const struct in6_addr *group;
1290 unsigned long max_delay;
1291 struct inet6_dev *idev;
1292 struct mld_msg *mld;
1293 int group_type;
1294 int mark = 0;
1295 int len, err;
1297 if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
1298 return -EINVAL;
1300 /* compute payload length excluding extension headers */
1301 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1302 len -= skb_network_header_len(skb);
1304 /* Drop queries with not link local source */
1305 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
1306 return -EINVAL;
1308 idev = __in6_dev_get(skb->dev);
1309 if (idev == NULL)
1310 return 0;
1312 mld = (struct mld_msg *)icmp6_hdr(skb);
1313 group = &mld->mld_mca;
1314 group_type = ipv6_addr_type(group);
1316 if (group_type != IPV6_ADDR_ANY &&
1317 !(group_type&IPV6_ADDR_MULTICAST))
1318 return -EINVAL;
1320 if (len == MLD_V1_QUERY_LEN) {
1321 err = mld_process_v1(idev, mld, &max_delay);
1322 if (err < 0)
1323 return err;
1324 } else if (len >= MLD_V2_QUERY_LEN_MIN) {
1325 int srcs_offset = sizeof(struct mld2_query) -
1326 sizeof(struct icmp6hdr);
1328 if (!pskb_may_pull(skb, srcs_offset))
1329 return -EINVAL;
1331 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1333 err = mld_process_v2(idev, mlh2, &max_delay);
1334 if (err < 0)
1335 return err;
1337 if (group_type == IPV6_ADDR_ANY) { /* general query */
1338 if (mlh2->mld2q_nsrcs)
1339 return -EINVAL; /* no sources allowed */
1341 mld_gq_start_timer(idev);
1342 return 0;
1344 /* mark sources to include, if group & source-specific */
1345 if (mlh2->mld2q_nsrcs != 0) {
1346 if (!pskb_may_pull(skb, srcs_offset +
1347 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1348 return -EINVAL;
1350 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1351 mark = 1;
1353 } else
1354 return -EINVAL;
1356 read_lock_bh(&idev->lock);
1357 if (group_type == IPV6_ADDR_ANY) {
1358 for (ma = idev->mc_list; ma; ma=ma->next) {
1359 spin_lock_bh(&ma->mca_lock);
1360 igmp6_group_queried(ma, max_delay);
1361 spin_unlock_bh(&ma->mca_lock);
1363 } else {
1364 for (ma = idev->mc_list; ma; ma=ma->next) {
1365 if (!ipv6_addr_equal(group, &ma->mca_addr))
1366 continue;
1367 spin_lock_bh(&ma->mca_lock);
1368 if (ma->mca_flags & MAF_TIMER_RUNNING) {
1369 /* gsquery <- gsquery && mark */
1370 if (!mark)
1371 ma->mca_flags &= ~MAF_GSQUERY;
1372 } else {
1373 /* gsquery <- mark */
1374 if (mark)
1375 ma->mca_flags |= MAF_GSQUERY;
1376 else
1377 ma->mca_flags &= ~MAF_GSQUERY;
1379 if (!(ma->mca_flags & MAF_GSQUERY) ||
1380 mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1381 igmp6_group_queried(ma, max_delay);
1382 spin_unlock_bh(&ma->mca_lock);
1383 break;
1386 read_unlock_bh(&idev->lock);
1388 return 0;
1391 /* called with rcu_read_lock() */
1392 int igmp6_event_report(struct sk_buff *skb)
1394 struct ifmcaddr6 *ma;
1395 struct inet6_dev *idev;
1396 struct mld_msg *mld;
1397 int addr_type;
1399 /* Our own report looped back. Ignore it. */
1400 if (skb->pkt_type == PACKET_LOOPBACK)
1401 return 0;
1403 /* send our report if the MC router may not have heard this report */
1404 if (skb->pkt_type != PACKET_MULTICAST &&
1405 skb->pkt_type != PACKET_BROADCAST)
1406 return 0;
1408 if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1409 return -EINVAL;
1411 mld = (struct mld_msg *)icmp6_hdr(skb);
1413 /* Drop reports with not link local source */
1414 addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
1415 if (addr_type != IPV6_ADDR_ANY &&
1416 !(addr_type&IPV6_ADDR_LINKLOCAL))
1417 return -EINVAL;
1419 idev = __in6_dev_get(skb->dev);
1420 if (idev == NULL)
1421 return -ENODEV;
1424 * Cancel the timer for this group
1427 read_lock_bh(&idev->lock);
1428 for (ma = idev->mc_list; ma; ma=ma->next) {
1429 if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1430 spin_lock(&ma->mca_lock);
1431 if (del_timer(&ma->mca_timer))
1432 atomic_dec(&ma->mca_refcnt);
1433 ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING);
1434 spin_unlock(&ma->mca_lock);
1435 break;
1438 read_unlock_bh(&idev->lock);
1439 return 0;
1442 static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1443 int gdeleted, int sdeleted)
1445 switch (type) {
1446 case MLD2_MODE_IS_INCLUDE:
1447 case MLD2_MODE_IS_EXCLUDE:
1448 if (gdeleted || sdeleted)
1449 return false;
1450 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1451 if (pmc->mca_sfmode == MCAST_INCLUDE)
1452 return true;
1453 /* don't include if this source is excluded
1454 * in all filters
1456 if (psf->sf_count[MCAST_INCLUDE])
1457 return type == MLD2_MODE_IS_INCLUDE;
1458 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1459 psf->sf_count[MCAST_EXCLUDE];
1461 return false;
1462 case MLD2_CHANGE_TO_INCLUDE:
1463 if (gdeleted || sdeleted)
1464 return false;
1465 return psf->sf_count[MCAST_INCLUDE] != 0;
1466 case MLD2_CHANGE_TO_EXCLUDE:
1467 if (gdeleted || sdeleted)
1468 return false;
1469 if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1470 psf->sf_count[MCAST_INCLUDE])
1471 return false;
1472 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1473 psf->sf_count[MCAST_EXCLUDE];
1474 case MLD2_ALLOW_NEW_SOURCES:
1475 if (gdeleted || !psf->sf_crcount)
1476 return false;
1477 return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1478 case MLD2_BLOCK_OLD_SOURCES:
1479 if (pmc->mca_sfmode == MCAST_INCLUDE)
1480 return gdeleted || (psf->sf_crcount && sdeleted);
1481 return psf->sf_crcount && !gdeleted && !sdeleted;
1483 return false;
1486 static int
1487 mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1489 struct ip6_sf_list *psf;
1490 int scount = 0;
1492 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1493 if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1494 continue;
1495 scount++;
1497 return scount;
1500 static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
1501 struct net_device *dev,
1502 const struct in6_addr *saddr,
1503 const struct in6_addr *daddr,
1504 int proto, int len)
1506 struct ipv6hdr *hdr;
1508 skb->protocol = htons(ETH_P_IPV6);
1509 skb->dev = dev;
1511 skb_reset_network_header(skb);
1512 skb_put(skb, sizeof(struct ipv6hdr));
1513 hdr = ipv6_hdr(skb);
1515 ip6_flow_hdr(hdr, 0, 0);
1517 hdr->payload_len = htons(len);
1518 hdr->nexthdr = proto;
1519 hdr->hop_limit = inet6_sk(sk)->hop_limit;
1521 hdr->saddr = *saddr;
1522 hdr->daddr = *daddr;
1525 static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
1527 struct net_device *dev = idev->dev;
1528 struct net *net = dev_net(dev);
1529 struct sock *sk = net->ipv6.igmp_sk;
1530 struct sk_buff *skb;
1531 struct mld2_report *pmr;
1532 struct in6_addr addr_buf;
1533 const struct in6_addr *saddr;
1534 int hlen = LL_RESERVED_SPACE(dev);
1535 int tlen = dev->needed_tailroom;
1536 int err;
1537 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1538 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1539 IPV6_TLV_PADN, 0 };
1541 /* we assume size > sizeof(ra) here */
1542 size += hlen + tlen;
1543 /* limit our allocations to order-0 page */
1544 size = min_t(int, size, SKB_MAX_ORDER(0, 0));
1545 skb = sock_alloc_send_skb(sk, size, 1, &err);
1547 if (!skb)
1548 return NULL;
1550 skb->priority = TC_PRIO_CONTROL;
1551 skb_reserve(skb, hlen);
1553 if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
1554 /* <draft-ietf-magma-mld-source-05.txt>:
1555 * use unspecified address as the source address
1556 * when a valid link-local address is not available.
1558 saddr = &in6addr_any;
1559 } else
1560 saddr = &addr_buf;
1562 ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1564 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1566 skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1567 skb_put(skb, sizeof(*pmr));
1568 pmr = (struct mld2_report *)skb_transport_header(skb);
1569 pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1570 pmr->mld2r_resv1 = 0;
1571 pmr->mld2r_cksum = 0;
1572 pmr->mld2r_resv2 = 0;
1573 pmr->mld2r_ngrec = 0;
1574 return skb;
1577 static void mld_sendpack(struct sk_buff *skb)
1579 struct ipv6hdr *pip6 = ipv6_hdr(skb);
1580 struct mld2_report *pmr =
1581 (struct mld2_report *)skb_transport_header(skb);
1582 int payload_len, mldlen;
1583 struct inet6_dev *idev;
1584 struct net *net = dev_net(skb->dev);
1585 int err;
1586 struct flowi6 fl6;
1587 struct dst_entry *dst;
1589 rcu_read_lock();
1590 idev = __in6_dev_get(skb->dev);
1591 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1593 payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
1594 sizeof(*pip6);
1595 mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1596 pip6->payload_len = htons(payload_len);
1598 pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1599 IPPROTO_ICMPV6,
1600 csum_partial(skb_transport_header(skb),
1601 mldlen, 0));
1603 icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
1604 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1605 skb->dev->ifindex);
1606 dst = icmp6_dst_alloc(skb->dev, &fl6);
1608 err = 0;
1609 if (IS_ERR(dst)) {
1610 err = PTR_ERR(dst);
1611 dst = NULL;
1613 skb_dst_set(skb, dst);
1614 if (err)
1615 goto err_out;
1617 payload_len = skb->len;
1619 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1620 dst_output);
1621 out:
1622 if (!err) {
1623 ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
1624 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
1625 IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1626 } else
1627 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
1629 rcu_read_unlock();
1630 return;
1632 err_out:
1633 kfree_skb(skb);
1634 goto out;
1637 static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1639 return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1642 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1643 int type, struct mld2_grec **ppgr)
1645 struct net_device *dev = pmc->idev->dev;
1646 struct mld2_report *pmr;
1647 struct mld2_grec *pgr;
1649 if (!skb)
1650 skb = mld_newpack(pmc->idev, dev->mtu);
1651 if (!skb)
1652 return NULL;
1653 pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
1654 pgr->grec_type = type;
1655 pgr->grec_auxwords = 0;
1656 pgr->grec_nsrcs = 0;
1657 pgr->grec_mca = pmc->mca_addr; /* structure copy */
1658 pmr = (struct mld2_report *)skb_transport_header(skb);
1659 pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1660 *ppgr = pgr;
1661 return skb;
1664 #define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \
1665 skb_tailroom(skb)) : 0)
1667 static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1668 int type, int gdeleted, int sdeleted)
1670 struct inet6_dev *idev = pmc->idev;
1671 struct net_device *dev = idev->dev;
1672 struct mld2_report *pmr;
1673 struct mld2_grec *pgr = NULL;
1674 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
1675 int scount, stotal, first, isquery, truncate;
1677 if (pmc->mca_flags & MAF_NOREPORT)
1678 return skb;
1680 isquery = type == MLD2_MODE_IS_INCLUDE ||
1681 type == MLD2_MODE_IS_EXCLUDE;
1682 truncate = type == MLD2_MODE_IS_EXCLUDE ||
1683 type == MLD2_CHANGE_TO_EXCLUDE;
1685 stotal = scount = 0;
1687 psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1689 if (!*psf_list)
1690 goto empty_source;
1692 pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1694 /* EX and TO_EX get a fresh packet, if needed */
1695 if (truncate) {
1696 if (pmr && pmr->mld2r_ngrec &&
1697 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1698 if (skb)
1699 mld_sendpack(skb);
1700 skb = mld_newpack(idev, dev->mtu);
1703 first = 1;
1704 psf_prev = NULL;
1705 for (psf=*psf_list; psf; psf=psf_next) {
1706 struct in6_addr *psrc;
1708 psf_next = psf->sf_next;
1710 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
1711 psf_prev = psf;
1712 continue;
1715 /* clear marks on query responses */
1716 if (isquery)
1717 psf->sf_gsresp = 0;
1719 if (AVAILABLE(skb) < sizeof(*psrc) +
1720 first*sizeof(struct mld2_grec)) {
1721 if (truncate && !first)
1722 break; /* truncate these */
1723 if (pgr)
1724 pgr->grec_nsrcs = htons(scount);
1725 if (skb)
1726 mld_sendpack(skb);
1727 skb = mld_newpack(idev, dev->mtu);
1728 first = 1;
1729 scount = 0;
1731 if (first) {
1732 skb = add_grhead(skb, pmc, type, &pgr);
1733 first = 0;
1735 if (!skb)
1736 return NULL;
1737 psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc));
1738 *psrc = psf->sf_addr;
1739 scount++; stotal++;
1740 if ((type == MLD2_ALLOW_NEW_SOURCES ||
1741 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
1742 psf->sf_crcount--;
1743 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
1744 if (psf_prev)
1745 psf_prev->sf_next = psf->sf_next;
1746 else
1747 *psf_list = psf->sf_next;
1748 kfree(psf);
1749 continue;
1752 psf_prev = psf;
1755 empty_source:
1756 if (!stotal) {
1757 if (type == MLD2_ALLOW_NEW_SOURCES ||
1758 type == MLD2_BLOCK_OLD_SOURCES)
1759 return skb;
1760 if (pmc->mca_crcount || isquery) {
1761 /* make sure we have room for group header */
1762 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
1763 mld_sendpack(skb);
1764 skb = NULL; /* add_grhead will get a new one */
1766 skb = add_grhead(skb, pmc, type, &pgr);
1769 if (pgr)
1770 pgr->grec_nsrcs = htons(scount);
1772 if (isquery)
1773 pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */
1774 return skb;
1777 static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
1779 struct sk_buff *skb = NULL;
1780 int type;
1782 read_lock_bh(&idev->lock);
1783 if (!pmc) {
1784 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
1785 if (pmc->mca_flags & MAF_NOREPORT)
1786 continue;
1787 spin_lock_bh(&pmc->mca_lock);
1788 if (pmc->mca_sfcount[MCAST_EXCLUDE])
1789 type = MLD2_MODE_IS_EXCLUDE;
1790 else
1791 type = MLD2_MODE_IS_INCLUDE;
1792 skb = add_grec(skb, pmc, type, 0, 0);
1793 spin_unlock_bh(&pmc->mca_lock);
1795 } else {
1796 spin_lock_bh(&pmc->mca_lock);
1797 if (pmc->mca_sfcount[MCAST_EXCLUDE])
1798 type = MLD2_MODE_IS_EXCLUDE;
1799 else
1800 type = MLD2_MODE_IS_INCLUDE;
1801 skb = add_grec(skb, pmc, type, 0, 0);
1802 spin_unlock_bh(&pmc->mca_lock);
1804 read_unlock_bh(&idev->lock);
1805 if (skb)
1806 mld_sendpack(skb);
1810 * remove zero-count source records from a source filter list
1812 static void mld_clear_zeros(struct ip6_sf_list **ppsf)
1814 struct ip6_sf_list *psf_prev, *psf_next, *psf;
1816 psf_prev = NULL;
1817 for (psf=*ppsf; psf; psf = psf_next) {
1818 psf_next = psf->sf_next;
1819 if (psf->sf_crcount == 0) {
1820 if (psf_prev)
1821 psf_prev->sf_next = psf->sf_next;
1822 else
1823 *ppsf = psf->sf_next;
1824 kfree(psf);
1825 } else
1826 psf_prev = psf;
1830 static void mld_send_cr(struct inet6_dev *idev)
1832 struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
1833 struct sk_buff *skb = NULL;
1834 int type, dtype;
1836 read_lock_bh(&idev->lock);
1837 spin_lock(&idev->mc_lock);
1839 /* deleted MCA's */
1840 pmc_prev = NULL;
1841 for (pmc=idev->mc_tomb; pmc; pmc=pmc_next) {
1842 pmc_next = pmc->next;
1843 if (pmc->mca_sfmode == MCAST_INCLUDE) {
1844 type = MLD2_BLOCK_OLD_SOURCES;
1845 dtype = MLD2_BLOCK_OLD_SOURCES;
1846 skb = add_grec(skb, pmc, type, 1, 0);
1847 skb = add_grec(skb, pmc, dtype, 1, 1);
1849 if (pmc->mca_crcount) {
1850 if (pmc->mca_sfmode == MCAST_EXCLUDE) {
1851 type = MLD2_CHANGE_TO_INCLUDE;
1852 skb = add_grec(skb, pmc, type, 1, 0);
1854 pmc->mca_crcount--;
1855 if (pmc->mca_crcount == 0) {
1856 mld_clear_zeros(&pmc->mca_tomb);
1857 mld_clear_zeros(&pmc->mca_sources);
1860 if (pmc->mca_crcount == 0 && !pmc->mca_tomb &&
1861 !pmc->mca_sources) {
1862 if (pmc_prev)
1863 pmc_prev->next = pmc_next;
1864 else
1865 idev->mc_tomb = pmc_next;
1866 in6_dev_put(pmc->idev);
1867 kfree(pmc);
1868 } else
1869 pmc_prev = pmc;
1871 spin_unlock(&idev->mc_lock);
1873 /* change recs */
1874 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
1875 spin_lock_bh(&pmc->mca_lock);
1876 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
1877 type = MLD2_BLOCK_OLD_SOURCES;
1878 dtype = MLD2_ALLOW_NEW_SOURCES;
1879 } else {
1880 type = MLD2_ALLOW_NEW_SOURCES;
1881 dtype = MLD2_BLOCK_OLD_SOURCES;
1883 skb = add_grec(skb, pmc, type, 0, 0);
1884 skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */
1886 /* filter mode changes */
1887 if (pmc->mca_crcount) {
1888 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1889 type = MLD2_CHANGE_TO_EXCLUDE;
1890 else
1891 type = MLD2_CHANGE_TO_INCLUDE;
1892 skb = add_grec(skb, pmc, type, 0, 0);
1893 pmc->mca_crcount--;
1895 spin_unlock_bh(&pmc->mca_lock);
1897 read_unlock_bh(&idev->lock);
1898 if (!skb)
1899 return;
1900 (void) mld_sendpack(skb);
1903 static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1905 struct net *net = dev_net(dev);
1906 struct sock *sk = net->ipv6.igmp_sk;
1907 struct inet6_dev *idev;
1908 struct sk_buff *skb;
1909 struct mld_msg *hdr;
1910 const struct in6_addr *snd_addr, *saddr;
1911 struct in6_addr addr_buf;
1912 int hlen = LL_RESERVED_SPACE(dev);
1913 int tlen = dev->needed_tailroom;
1914 int err, len, payload_len, full_len;
1915 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1916 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1917 IPV6_TLV_PADN, 0 };
1918 struct flowi6 fl6;
1919 struct dst_entry *dst;
1921 if (type == ICMPV6_MGM_REDUCTION)
1922 snd_addr = &in6addr_linklocal_allrouters;
1923 else
1924 snd_addr = addr;
1926 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
1927 payload_len = len + sizeof(ra);
1928 full_len = sizeof(struct ipv6hdr) + payload_len;
1930 rcu_read_lock();
1931 IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
1932 IPSTATS_MIB_OUT, full_len);
1933 rcu_read_unlock();
1935 skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
1937 if (skb == NULL) {
1938 rcu_read_lock();
1939 IP6_INC_STATS(net, __in6_dev_get(dev),
1940 IPSTATS_MIB_OUTDISCARDS);
1941 rcu_read_unlock();
1942 return;
1944 skb->priority = TC_PRIO_CONTROL;
1945 skb_reserve(skb, hlen);
1947 if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
1948 /* <draft-ietf-magma-mld-source-05.txt>:
1949 * use unspecified address as the source address
1950 * when a valid link-local address is not available.
1952 saddr = &in6addr_any;
1953 } else
1954 saddr = &addr_buf;
1956 ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
1958 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1960 hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
1961 memset(hdr, 0, sizeof(struct mld_msg));
1962 hdr->mld_type = type;
1963 hdr->mld_mca = *addr;
1965 hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
1966 IPPROTO_ICMPV6,
1967 csum_partial(hdr, len, 0));
1969 rcu_read_lock();
1970 idev = __in6_dev_get(skb->dev);
1972 icmpv6_flow_init(sk, &fl6, type,
1973 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1974 skb->dev->ifindex);
1975 dst = icmp6_dst_alloc(skb->dev, &fl6);
1976 if (IS_ERR(dst)) {
1977 err = PTR_ERR(dst);
1978 goto err_out;
1981 skb_dst_set(skb, dst);
1982 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1983 dst_output);
1984 out:
1985 if (!err) {
1986 ICMP6MSGOUT_INC_STATS(net, idev, type);
1987 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1988 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
1989 } else
1990 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1992 rcu_read_unlock();
1993 return;
1995 err_out:
1996 kfree_skb(skb);
1997 goto out;
2000 static void mld_resend_report(struct inet6_dev *idev)
2002 if (mld_in_v1_mode(idev)) {
2003 struct ifmcaddr6 *mcaddr;
2004 read_lock_bh(&idev->lock);
2005 for (mcaddr = idev->mc_list; mcaddr; mcaddr = mcaddr->next) {
2006 if (!(mcaddr->mca_flags & MAF_NOREPORT))
2007 igmp6_send(&mcaddr->mca_addr, idev->dev,
2008 ICMPV6_MGM_REPORT);
2010 read_unlock_bh(&idev->lock);
2011 } else {
2012 mld_send_report(idev, NULL);
2016 void ipv6_mc_dad_complete(struct inet6_dev *idev)
2018 idev->mc_dad_count = idev->mc_qrv;
2019 if (idev->mc_dad_count) {
2020 mld_resend_report(idev);
2021 idev->mc_dad_count--;
2022 if (idev->mc_dad_count)
2023 mld_dad_start_timer(idev, idev->mc_maxdelay);
2027 static void mld_dad_timer_expire(unsigned long data)
2029 struct inet6_dev *idev = (struct inet6_dev *)data;
2031 mld_resend_report(idev);
2032 if (idev->mc_dad_count) {
2033 idev->mc_dad_count--;
2034 if (idev->mc_dad_count)
2035 mld_dad_start_timer(idev, idev->mc_maxdelay);
2037 in6_dev_put(idev);
2040 static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
2041 const struct in6_addr *psfsrc)
2043 struct ip6_sf_list *psf, *psf_prev;
2044 int rv = 0;
2046 psf_prev = NULL;
2047 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
2048 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2049 break;
2050 psf_prev = psf;
2052 if (!psf || psf->sf_count[sfmode] == 0) {
2053 /* source filter not found, or count wrong => bug */
2054 return -ESRCH;
2056 psf->sf_count[sfmode]--;
2057 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
2058 struct inet6_dev *idev = pmc->idev;
2060 /* no more filters for this source */
2061 if (psf_prev)
2062 psf_prev->sf_next = psf->sf_next;
2063 else
2064 pmc->mca_sources = psf->sf_next;
2065 if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
2066 !mld_in_v1_mode(idev)) {
2067 psf->sf_crcount = idev->mc_qrv;
2068 psf->sf_next = pmc->mca_tomb;
2069 pmc->mca_tomb = psf;
2070 rv = 1;
2071 } else
2072 kfree(psf);
2074 return rv;
2077 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2078 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2079 int delta)
2081 struct ifmcaddr6 *pmc;
2082 int changerec = 0;
2083 int i, err;
2085 if (!idev)
2086 return -ENODEV;
2087 read_lock_bh(&idev->lock);
2088 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
2089 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2090 break;
2092 if (!pmc) {
2093 /* MCA not found?? bug */
2094 read_unlock_bh(&idev->lock);
2095 return -ESRCH;
2097 spin_lock_bh(&pmc->mca_lock);
2098 sf_markstate(pmc);
2099 if (!delta) {
2100 if (!pmc->mca_sfcount[sfmode]) {
2101 spin_unlock_bh(&pmc->mca_lock);
2102 read_unlock_bh(&idev->lock);
2103 return -EINVAL;
2105 pmc->mca_sfcount[sfmode]--;
2107 err = 0;
2108 for (i=0; i<sfcount; i++) {
2109 int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
2111 changerec |= rv > 0;
2112 if (!err && rv < 0)
2113 err = rv;
2115 if (pmc->mca_sfmode == MCAST_EXCLUDE &&
2116 pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
2117 pmc->mca_sfcount[MCAST_INCLUDE]) {
2118 struct ip6_sf_list *psf;
2120 /* filter mode change */
2121 pmc->mca_sfmode = MCAST_INCLUDE;
2122 pmc->mca_crcount = idev->mc_qrv;
2123 idev->mc_ifc_count = pmc->mca_crcount;
2124 for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
2125 psf->sf_crcount = 0;
2126 mld_ifc_event(pmc->idev);
2127 } else if (sf_setstate(pmc) || changerec)
2128 mld_ifc_event(pmc->idev);
2129 spin_unlock_bh(&pmc->mca_lock);
2130 read_unlock_bh(&idev->lock);
2131 return err;
2135 * Add multicast single-source filter to the interface list
2137 static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
2138 const struct in6_addr *psfsrc)
2140 struct ip6_sf_list *psf, *psf_prev;
2142 psf_prev = NULL;
2143 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
2144 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2145 break;
2146 psf_prev = psf;
2148 if (!psf) {
2149 psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
2150 if (!psf)
2151 return -ENOBUFS;
2153 psf->sf_addr = *psfsrc;
2154 if (psf_prev) {
2155 psf_prev->sf_next = psf;
2156 } else
2157 pmc->mca_sources = psf;
2159 psf->sf_count[sfmode]++;
2160 return 0;
2163 static void sf_markstate(struct ifmcaddr6 *pmc)
2165 struct ip6_sf_list *psf;
2166 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2168 for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
2169 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2170 psf->sf_oldin = mca_xcount ==
2171 psf->sf_count[MCAST_EXCLUDE] &&
2172 !psf->sf_count[MCAST_INCLUDE];
2173 } else
2174 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
2177 static int sf_setstate(struct ifmcaddr6 *pmc)
2179 struct ip6_sf_list *psf, *dpsf;
2180 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2181 int qrv = pmc->idev->mc_qrv;
2182 int new_in, rv;
2184 rv = 0;
2185 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
2186 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2187 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2188 !psf->sf_count[MCAST_INCLUDE];
2189 } else
2190 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2191 if (new_in) {
2192 if (!psf->sf_oldin) {
2193 struct ip6_sf_list *prev = NULL;
2195 for (dpsf=pmc->mca_tomb; dpsf;
2196 dpsf=dpsf->sf_next) {
2197 if (ipv6_addr_equal(&dpsf->sf_addr,
2198 &psf->sf_addr))
2199 break;
2200 prev = dpsf;
2202 if (dpsf) {
2203 if (prev)
2204 prev->sf_next = dpsf->sf_next;
2205 else
2206 pmc->mca_tomb = dpsf->sf_next;
2207 kfree(dpsf);
2209 psf->sf_crcount = qrv;
2210 rv++;
2212 } else if (psf->sf_oldin) {
2213 psf->sf_crcount = 0;
2215 * add or update "delete" records if an active filter
2216 * is now inactive
2218 for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next)
2219 if (ipv6_addr_equal(&dpsf->sf_addr,
2220 &psf->sf_addr))
2221 break;
2222 if (!dpsf) {
2223 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
2224 if (!dpsf)
2225 continue;
2226 *dpsf = *psf;
2227 /* pmc->mca_lock held by callers */
2228 dpsf->sf_next = pmc->mca_tomb;
2229 pmc->mca_tomb = dpsf;
2231 dpsf->sf_crcount = qrv;
2232 rv++;
2235 return rv;
2239 * Add multicast source filter list to the interface list
2241 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2242 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2243 int delta)
2245 struct ifmcaddr6 *pmc;
2246 int isexclude;
2247 int i, err;
2249 if (!idev)
2250 return -ENODEV;
2251 read_lock_bh(&idev->lock);
2252 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
2253 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2254 break;
2256 if (!pmc) {
2257 /* MCA not found?? bug */
2258 read_unlock_bh(&idev->lock);
2259 return -ESRCH;
2261 spin_lock_bh(&pmc->mca_lock);
2263 sf_markstate(pmc);
2264 isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2265 if (!delta)
2266 pmc->mca_sfcount[sfmode]++;
2267 err = 0;
2268 for (i=0; i<sfcount; i++) {
2269 err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2270 if (err)
2271 break;
2273 if (err) {
2274 int j;
2276 if (!delta)
2277 pmc->mca_sfcount[sfmode]--;
2278 for (j=0; j<i; j++)
2279 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2280 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2281 struct ip6_sf_list *psf;
2283 /* filter mode change */
2284 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2285 pmc->mca_sfmode = MCAST_EXCLUDE;
2286 else if (pmc->mca_sfcount[MCAST_INCLUDE])
2287 pmc->mca_sfmode = MCAST_INCLUDE;
2288 /* else no filters; keep old mode for reports */
2290 pmc->mca_crcount = idev->mc_qrv;
2291 idev->mc_ifc_count = pmc->mca_crcount;
2292 for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
2293 psf->sf_crcount = 0;
2294 mld_ifc_event(idev);
2295 } else if (sf_setstate(pmc))
2296 mld_ifc_event(idev);
2297 spin_unlock_bh(&pmc->mca_lock);
2298 read_unlock_bh(&idev->lock);
2299 return err;
2302 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2304 struct ip6_sf_list *psf, *nextpsf;
2306 for (psf=pmc->mca_tomb; psf; psf=nextpsf) {
2307 nextpsf = psf->sf_next;
2308 kfree(psf);
2310 pmc->mca_tomb = NULL;
2311 for (psf=pmc->mca_sources; psf; psf=nextpsf) {
2312 nextpsf = psf->sf_next;
2313 kfree(psf);
2315 pmc->mca_sources = NULL;
2316 pmc->mca_sfmode = MCAST_EXCLUDE;
2317 pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2318 pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
2322 static void igmp6_join_group(struct ifmcaddr6 *ma)
2324 unsigned long delay;
2326 if (ma->mca_flags & MAF_NOREPORT)
2327 return;
2329 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2331 delay = net_random() % unsolicited_report_interval(ma->idev);
2333 spin_lock_bh(&ma->mca_lock);
2334 if (del_timer(&ma->mca_timer)) {
2335 atomic_dec(&ma->mca_refcnt);
2336 delay = ma->mca_timer.expires - jiffies;
2339 if (!mod_timer(&ma->mca_timer, jiffies + delay))
2340 atomic_inc(&ma->mca_refcnt);
2341 ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
2342 spin_unlock_bh(&ma->mca_lock);
2345 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2346 struct inet6_dev *idev)
2348 int err;
2350 /* callers have the socket lock and a write lock on ipv6_sk_mc_lock,
2351 * so no other readers or writers of iml or its sflist
2353 if (!iml->sflist) {
2354 /* any-source empty exclude case */
2355 return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2357 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2358 iml->sflist->sl_count, iml->sflist->sl_addr, 0);
2359 sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
2360 iml->sflist = NULL;
2361 return err;
2364 static void igmp6_leave_group(struct ifmcaddr6 *ma)
2366 if (mld_in_v1_mode(ma->idev)) {
2367 if (ma->mca_flags & MAF_LAST_REPORTER)
2368 igmp6_send(&ma->mca_addr, ma->idev->dev,
2369 ICMPV6_MGM_REDUCTION);
2370 } else {
2371 mld_add_delrec(ma->idev, ma);
2372 mld_ifc_event(ma->idev);
2376 static void mld_gq_timer_expire(unsigned long data)
2378 struct inet6_dev *idev = (struct inet6_dev *)data;
2380 idev->mc_gq_running = 0;
2381 mld_send_report(idev, NULL);
2382 in6_dev_put(idev);
2385 static void mld_ifc_timer_expire(unsigned long data)
2387 struct inet6_dev *idev = (struct inet6_dev *)data;
2389 mld_send_cr(idev);
2390 if (idev->mc_ifc_count) {
2391 idev->mc_ifc_count--;
2392 if (idev->mc_ifc_count)
2393 mld_ifc_start_timer(idev, idev->mc_maxdelay);
2395 in6_dev_put(idev);
2398 static void mld_ifc_event(struct inet6_dev *idev)
2400 if (mld_in_v1_mode(idev))
2401 return;
2402 idev->mc_ifc_count = idev->mc_qrv;
2403 mld_ifc_start_timer(idev, 1);
2407 static void igmp6_timer_handler(unsigned long data)
2409 struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
2411 if (mld_in_v1_mode(ma->idev))
2412 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2413 else
2414 mld_send_report(ma->idev, ma);
2416 spin_lock(&ma->mca_lock);
2417 ma->mca_flags |= MAF_LAST_REPORTER;
2418 ma->mca_flags &= ~MAF_TIMER_RUNNING;
2419 spin_unlock(&ma->mca_lock);
2420 ma_put(ma);
2423 /* Device changing type */
2425 void ipv6_mc_unmap(struct inet6_dev *idev)
2427 struct ifmcaddr6 *i;
2429 /* Install multicast list, except for all-nodes (already installed) */
2431 read_lock_bh(&idev->lock);
2432 for (i = idev->mc_list; i; i = i->next)
2433 igmp6_group_dropped(i);
2434 read_unlock_bh(&idev->lock);
2437 void ipv6_mc_remap(struct inet6_dev *idev)
2439 ipv6_mc_up(idev);
2442 /* Device going down */
2444 void ipv6_mc_down(struct inet6_dev *idev)
2446 struct ifmcaddr6 *i;
2448 /* Withdraw multicast list */
2450 read_lock_bh(&idev->lock);
2451 mld_ifc_stop_timer(idev);
2452 mld_gq_stop_timer(idev);
2453 mld_dad_stop_timer(idev);
2455 for (i = idev->mc_list; i; i=i->next)
2456 igmp6_group_dropped(i);
2457 read_unlock_bh(&idev->lock);
2459 mld_clear_delrec(idev);
2463 /* Device going up */
2465 void ipv6_mc_up(struct inet6_dev *idev)
2467 struct ifmcaddr6 *i;
2469 /* Install multicast list, except for all-nodes (already installed) */
2471 read_lock_bh(&idev->lock);
2472 for (i = idev->mc_list; i; i=i->next)
2473 igmp6_group_added(i);
2474 read_unlock_bh(&idev->lock);
2477 /* IPv6 device initialization. */
2479 void ipv6_mc_init_dev(struct inet6_dev *idev)
2481 write_lock_bh(&idev->lock);
2482 spin_lock_init(&idev->mc_lock);
2483 idev->mc_gq_running = 0;
2484 setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
2485 (unsigned long)idev);
2486 idev->mc_tomb = NULL;
2487 idev->mc_ifc_count = 0;
2488 setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire,
2489 (unsigned long)idev);
2490 setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire,
2491 (unsigned long)idev);
2493 idev->mc_qrv = MLD_QRV_DEFAULT;
2494 idev->mc_qi = MLD_QI_DEFAULT;
2495 idev->mc_qri = MLD_QRI_DEFAULT;
2497 idev->mc_maxdelay = unsolicited_report_interval(idev);
2498 idev->mc_v1_seen = 0;
2499 write_unlock_bh(&idev->lock);
2503 * Device is about to be destroyed: clean up.
2506 void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2508 struct ifmcaddr6 *i;
2510 /* Deactivate timers */
2511 ipv6_mc_down(idev);
2513 /* Delete all-nodes address. */
2514 /* We cannot call ipv6_dev_mc_dec() directly, our caller in
2515 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2516 * fail.
2518 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2520 if (idev->cnf.forwarding)
2521 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2523 write_lock_bh(&idev->lock);
2524 while ((i = idev->mc_list) != NULL) {
2525 idev->mc_list = i->next;
2526 write_unlock_bh(&idev->lock);
2528 igmp6_group_dropped(i);
2529 ma_put(i);
2531 write_lock_bh(&idev->lock);
2533 write_unlock_bh(&idev->lock);
2536 #ifdef CONFIG_PROC_FS
2537 struct igmp6_mc_iter_state {
2538 struct seq_net_private p;
2539 struct net_device *dev;
2540 struct inet6_dev *idev;
2543 #define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private)
2545 static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2547 struct ifmcaddr6 *im = NULL;
2548 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2549 struct net *net = seq_file_net(seq);
2551 state->idev = NULL;
2552 for_each_netdev_rcu(net, state->dev) {
2553 struct inet6_dev *idev;
2554 idev = __in6_dev_get(state->dev);
2555 if (!idev)
2556 continue;
2557 read_lock_bh(&idev->lock);
2558 im = idev->mc_list;
2559 if (im) {
2560 state->idev = idev;
2561 break;
2563 read_unlock_bh(&idev->lock);
2565 return im;
2568 static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2570 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2572 im = im->next;
2573 while (!im) {
2574 if (likely(state->idev != NULL))
2575 read_unlock_bh(&state->idev->lock);
2577 state->dev = next_net_device_rcu(state->dev);
2578 if (!state->dev) {
2579 state->idev = NULL;
2580 break;
2582 state->idev = __in6_dev_get(state->dev);
2583 if (!state->idev)
2584 continue;
2585 read_lock_bh(&state->idev->lock);
2586 im = state->idev->mc_list;
2588 return im;
2591 static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2593 struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2594 if (im)
2595 while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2596 --pos;
2597 return pos ? NULL : im;
2600 static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2601 __acquires(RCU)
2603 rcu_read_lock();
2604 return igmp6_mc_get_idx(seq, *pos);
2607 static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2609 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2611 ++*pos;
2612 return im;
2615 static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2616 __releases(RCU)
2618 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2620 if (likely(state->idev != NULL)) {
2621 read_unlock_bh(&state->idev->lock);
2622 state->idev = NULL;
2624 state->dev = NULL;
2625 rcu_read_unlock();
2628 static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2630 struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2631 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2633 seq_printf(seq,
2634 "%-4d %-15s %pi6 %5d %08X %ld\n",
2635 state->dev->ifindex, state->dev->name,
2636 &im->mca_addr,
2637 im->mca_users, im->mca_flags,
2638 (im->mca_flags&MAF_TIMER_RUNNING) ?
2639 jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0);
2640 return 0;
2643 static const struct seq_operations igmp6_mc_seq_ops = {
2644 .start = igmp6_mc_seq_start,
2645 .next = igmp6_mc_seq_next,
2646 .stop = igmp6_mc_seq_stop,
2647 .show = igmp6_mc_seq_show,
2650 static int igmp6_mc_seq_open(struct inode *inode, struct file *file)
2652 return seq_open_net(inode, file, &igmp6_mc_seq_ops,
2653 sizeof(struct igmp6_mc_iter_state));
2656 static const struct file_operations igmp6_mc_seq_fops = {
2657 .owner = THIS_MODULE,
2658 .open = igmp6_mc_seq_open,
2659 .read = seq_read,
2660 .llseek = seq_lseek,
2661 .release = seq_release_net,
2664 struct igmp6_mcf_iter_state {
2665 struct seq_net_private p;
2666 struct net_device *dev;
2667 struct inet6_dev *idev;
2668 struct ifmcaddr6 *im;
2671 #define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private)
2673 static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2675 struct ip6_sf_list *psf = NULL;
2676 struct ifmcaddr6 *im = NULL;
2677 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2678 struct net *net = seq_file_net(seq);
2680 state->idev = NULL;
2681 state->im = NULL;
2682 for_each_netdev_rcu(net, state->dev) {
2683 struct inet6_dev *idev;
2684 idev = __in6_dev_get(state->dev);
2685 if (unlikely(idev == NULL))
2686 continue;
2687 read_lock_bh(&idev->lock);
2688 im = idev->mc_list;
2689 if (likely(im != NULL)) {
2690 spin_lock_bh(&im->mca_lock);
2691 psf = im->mca_sources;
2692 if (likely(psf != NULL)) {
2693 state->im = im;
2694 state->idev = idev;
2695 break;
2697 spin_unlock_bh(&im->mca_lock);
2699 read_unlock_bh(&idev->lock);
2701 return psf;
2704 static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
2706 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2708 psf = psf->sf_next;
2709 while (!psf) {
2710 spin_unlock_bh(&state->im->mca_lock);
2711 state->im = state->im->next;
2712 while (!state->im) {
2713 if (likely(state->idev != NULL))
2714 read_unlock_bh(&state->idev->lock);
2716 state->dev = next_net_device_rcu(state->dev);
2717 if (!state->dev) {
2718 state->idev = NULL;
2719 goto out;
2721 state->idev = __in6_dev_get(state->dev);
2722 if (!state->idev)
2723 continue;
2724 read_lock_bh(&state->idev->lock);
2725 state->im = state->idev->mc_list;
2727 if (!state->im)
2728 break;
2729 spin_lock_bh(&state->im->mca_lock);
2730 psf = state->im->mca_sources;
2732 out:
2733 return psf;
2736 static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
2738 struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
2739 if (psf)
2740 while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
2741 --pos;
2742 return pos ? NULL : psf;
2745 static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2746 __acquires(RCU)
2748 rcu_read_lock();
2749 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2752 static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2754 struct ip6_sf_list *psf;
2755 if (v == SEQ_START_TOKEN)
2756 psf = igmp6_mcf_get_first(seq);
2757 else
2758 psf = igmp6_mcf_get_next(seq, v);
2759 ++*pos;
2760 return psf;
2763 static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2764 __releases(RCU)
2766 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2767 if (likely(state->im != NULL)) {
2768 spin_unlock_bh(&state->im->mca_lock);
2769 state->im = NULL;
2771 if (likely(state->idev != NULL)) {
2772 read_unlock_bh(&state->idev->lock);
2773 state->idev = NULL;
2775 state->dev = NULL;
2776 rcu_read_unlock();
2779 static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
2781 struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
2782 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2784 if (v == SEQ_START_TOKEN) {
2785 seq_printf(seq,
2786 "%3s %6s "
2787 "%32s %32s %6s %6s\n", "Idx",
2788 "Device", "Multicast Address",
2789 "Source Address", "INC", "EXC");
2790 } else {
2791 seq_printf(seq,
2792 "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
2793 state->dev->ifindex, state->dev->name,
2794 &state->im->mca_addr,
2795 &psf->sf_addr,
2796 psf->sf_count[MCAST_INCLUDE],
2797 psf->sf_count[MCAST_EXCLUDE]);
2799 return 0;
2802 static const struct seq_operations igmp6_mcf_seq_ops = {
2803 .start = igmp6_mcf_seq_start,
2804 .next = igmp6_mcf_seq_next,
2805 .stop = igmp6_mcf_seq_stop,
2806 .show = igmp6_mcf_seq_show,
2809 static int igmp6_mcf_seq_open(struct inode *inode, struct file *file)
2811 return seq_open_net(inode, file, &igmp6_mcf_seq_ops,
2812 sizeof(struct igmp6_mcf_iter_state));
2815 static const struct file_operations igmp6_mcf_seq_fops = {
2816 .owner = THIS_MODULE,
2817 .open = igmp6_mcf_seq_open,
2818 .read = seq_read,
2819 .llseek = seq_lseek,
2820 .release = seq_release_net,
2823 static int __net_init igmp6_proc_init(struct net *net)
2825 int err;
2827 err = -ENOMEM;
2828 if (!proc_create("igmp6", S_IRUGO, net->proc_net, &igmp6_mc_seq_fops))
2829 goto out;
2830 if (!proc_create("mcfilter6", S_IRUGO, net->proc_net,
2831 &igmp6_mcf_seq_fops))
2832 goto out_proc_net_igmp6;
2834 err = 0;
2835 out:
2836 return err;
2838 out_proc_net_igmp6:
2839 remove_proc_entry("igmp6", net->proc_net);
2840 goto out;
2843 static void __net_exit igmp6_proc_exit(struct net *net)
2845 remove_proc_entry("mcfilter6", net->proc_net);
2846 remove_proc_entry("igmp6", net->proc_net);
2848 #else
2849 static inline int igmp6_proc_init(struct net *net)
2851 return 0;
2853 static inline void igmp6_proc_exit(struct net *net)
2856 #endif
2858 static int __net_init igmp6_net_init(struct net *net)
2860 int err;
2862 err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
2863 SOCK_RAW, IPPROTO_ICMPV6, net);
2864 if (err < 0) {
2865 pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
2866 err);
2867 goto out;
2870 inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
2872 err = igmp6_proc_init(net);
2873 if (err)
2874 goto out_sock_create;
2875 out:
2876 return err;
2878 out_sock_create:
2879 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2880 goto out;
2883 static void __net_exit igmp6_net_exit(struct net *net)
2885 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2886 igmp6_proc_exit(net);
2889 static struct pernet_operations igmp6_net_ops = {
2890 .init = igmp6_net_init,
2891 .exit = igmp6_net_exit,
2894 int __init igmp6_init(void)
2896 return register_pernet_subsys(&igmp6_net_ops);
2899 void igmp6_cleanup(void)
2901 unregister_pernet_subsys(&igmp6_net_ops);