iwlwifi: split the regulatory rules when the bandwidth flags require it
[linux-2.6/btrfs-unstable.git] / net / tipc / name_table.c
blobbd0aac87b41ac627e5c897256a79150226926514
1 /*
2 * net/tipc/name_table.c: TIPC name table code
4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2014, Wind River Systems
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <net/sock.h>
38 #include "core.h"
39 #include "netlink.h"
40 #include "name_table.h"
41 #include "name_distr.h"
42 #include "subscr.h"
43 #include "bcast.h"
44 #include "addr.h"
45 #include "node.h"
46 #include <net/genetlink.h>
48 #define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
50 /**
51 * struct name_info - name sequence publication info
52 * @node_list: circular list of publications made by own node
53 * @cluster_list: circular list of publications made by own cluster
54 * @zone_list: circular list of publications made by own zone
55 * @node_list_size: number of entries in "node_list"
56 * @cluster_list_size: number of entries in "cluster_list"
57 * @zone_list_size: number of entries in "zone_list"
59 * Note: The zone list always contains at least one entry, since all
60 * publications of the associated name sequence belong to it.
61 * (The cluster and node lists may be empty.)
63 struct name_info {
64 struct list_head node_list;
65 struct list_head cluster_list;
66 struct list_head zone_list;
67 u32 node_list_size;
68 u32 cluster_list_size;
69 u32 zone_list_size;
72 /**
73 * struct sub_seq - container for all published instances of a name sequence
74 * @lower: name sequence lower bound
75 * @upper: name sequence upper bound
76 * @info: pointer to name sequence publication info
78 struct sub_seq {
79 u32 lower;
80 u32 upper;
81 struct name_info *info;
84 /**
85 * struct name_seq - container for all published instances of a name type
86 * @type: 32 bit 'type' value for name sequence
87 * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
88 * sub-sequences are sorted in ascending order
89 * @alloc: number of sub-sequences currently in array
90 * @first_free: array index of first unused sub-sequence entry
91 * @ns_list: links to adjacent name sequences in hash chain
92 * @subscriptions: list of subscriptions for this 'type'
93 * @lock: spinlock controlling access to publication lists of all sub-sequences
94 * @rcu: RCU callback head used for deferred freeing
96 struct name_seq {
97 u32 type;
98 struct sub_seq *sseqs;
99 u32 alloc;
100 u32 first_free;
101 struct hlist_node ns_list;
102 struct list_head subscriptions;
103 spinlock_t lock;
104 struct rcu_head rcu;
107 static int hash(int x)
109 return x & (TIPC_NAMETBL_SIZE - 1);
113 * publ_create - create a publication structure
115 static struct publication *publ_create(u32 type, u32 lower, u32 upper,
116 u32 scope, u32 node, u32 port_ref,
117 u32 key)
119 struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
120 if (publ == NULL) {
121 pr_warn("Publication creation failure, no memory\n");
122 return NULL;
125 publ->type = type;
126 publ->lower = lower;
127 publ->upper = upper;
128 publ->scope = scope;
129 publ->node = node;
130 publ->ref = port_ref;
131 publ->key = key;
132 INIT_LIST_HEAD(&publ->pport_list);
133 return publ;
137 * tipc_subseq_alloc - allocate a specified number of sub-sequence structures
139 static struct sub_seq *tipc_subseq_alloc(u32 cnt)
141 return kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
145 * tipc_nameseq_create - create a name sequence structure for the specified 'type'
147 * Allocates a single sub-sequence structure and sets it to all 0's.
149 static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
151 struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
152 struct sub_seq *sseq = tipc_subseq_alloc(1);
154 if (!nseq || !sseq) {
155 pr_warn("Name sequence creation failed, no memory\n");
156 kfree(nseq);
157 kfree(sseq);
158 return NULL;
161 spin_lock_init(&nseq->lock);
162 nseq->type = type;
163 nseq->sseqs = sseq;
164 nseq->alloc = 1;
165 INIT_HLIST_NODE(&nseq->ns_list);
166 INIT_LIST_HEAD(&nseq->subscriptions);
167 hlist_add_head_rcu(&nseq->ns_list, seq_head);
168 return nseq;
172 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
174 * Very time-critical, so binary searches through sub-sequence array.
176 static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
177 u32 instance)
179 struct sub_seq *sseqs = nseq->sseqs;
180 int low = 0;
181 int high = nseq->first_free - 1;
182 int mid;
184 while (low <= high) {
185 mid = (low + high) / 2;
186 if (instance < sseqs[mid].lower)
187 high = mid - 1;
188 else if (instance > sseqs[mid].upper)
189 low = mid + 1;
190 else
191 return &sseqs[mid];
193 return NULL;
197 * nameseq_locate_subseq - determine position of name instance in sub-sequence
199 * Returns index in sub-sequence array of the entry that contains the specified
200 * instance value; if no entry contains that value, returns the position
201 * where a new entry for it would be inserted in the array.
203 * Note: Similar to binary search code for locating a sub-sequence.
205 static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
207 struct sub_seq *sseqs = nseq->sseqs;
208 int low = 0;
209 int high = nseq->first_free - 1;
210 int mid;
212 while (low <= high) {
213 mid = (low + high) / 2;
214 if (instance < sseqs[mid].lower)
215 high = mid - 1;
216 else if (instance > sseqs[mid].upper)
217 low = mid + 1;
218 else
219 return mid;
221 return low;
225 * tipc_nameseq_insert_publ
227 static struct publication *tipc_nameseq_insert_publ(struct net *net,
228 struct name_seq *nseq,
229 u32 type, u32 lower,
230 u32 upper, u32 scope,
231 u32 node, u32 port, u32 key)
233 struct tipc_subscription *s;
234 struct tipc_subscription *st;
235 struct publication *publ;
236 struct sub_seq *sseq;
237 struct name_info *info;
238 int created_subseq = 0;
240 sseq = nameseq_find_subseq(nseq, lower);
241 if (sseq) {
243 /* Lower end overlaps existing entry => need an exact match */
244 if ((sseq->lower != lower) || (sseq->upper != upper)) {
245 return NULL;
248 info = sseq->info;
250 /* Check if an identical publication already exists */
251 list_for_each_entry(publ, &info->zone_list, zone_list) {
252 if ((publ->ref == port) && (publ->key == key) &&
253 (!publ->node || (publ->node == node)))
254 return NULL;
256 } else {
257 u32 inspos;
258 struct sub_seq *freesseq;
260 /* Find where lower end should be inserted */
261 inspos = nameseq_locate_subseq(nseq, lower);
263 /* Fail if upper end overlaps into an existing entry */
264 if ((inspos < nseq->first_free) &&
265 (upper >= nseq->sseqs[inspos].lower)) {
266 return NULL;
269 /* Ensure there is space for new sub-sequence */
270 if (nseq->first_free == nseq->alloc) {
271 struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
273 if (!sseqs) {
274 pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
275 type, lower, upper);
276 return NULL;
278 memcpy(sseqs, nseq->sseqs,
279 nseq->alloc * sizeof(struct sub_seq));
280 kfree(nseq->sseqs);
281 nseq->sseqs = sseqs;
282 nseq->alloc *= 2;
285 info = kzalloc(sizeof(*info), GFP_ATOMIC);
286 if (!info) {
287 pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
288 type, lower, upper);
289 return NULL;
292 INIT_LIST_HEAD(&info->node_list);
293 INIT_LIST_HEAD(&info->cluster_list);
294 INIT_LIST_HEAD(&info->zone_list);
296 /* Insert new sub-sequence */
297 sseq = &nseq->sseqs[inspos];
298 freesseq = &nseq->sseqs[nseq->first_free];
299 memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq));
300 memset(sseq, 0, sizeof(*sseq));
301 nseq->first_free++;
302 sseq->lower = lower;
303 sseq->upper = upper;
304 sseq->info = info;
305 created_subseq = 1;
308 /* Insert a publication */
309 publ = publ_create(type, lower, upper, scope, node, port, key);
310 if (!publ)
311 return NULL;
313 list_add(&publ->zone_list, &info->zone_list);
314 info->zone_list_size++;
316 if (in_own_cluster(net, node)) {
317 list_add(&publ->cluster_list, &info->cluster_list);
318 info->cluster_list_size++;
321 if (in_own_node(net, node)) {
322 list_add(&publ->node_list, &info->node_list);
323 info->node_list_size++;
326 /* Any subscriptions waiting for notification? */
327 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
328 tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
329 TIPC_PUBLISHED, publ->ref,
330 publ->node, created_subseq);
332 return publ;
336 * tipc_nameseq_remove_publ
338 * NOTE: There may be cases where TIPC is asked to remove a publication
339 * that is not in the name table. For example, if another node issues a
340 * publication for a name sequence that overlaps an existing name sequence
341 * the publication will not be recorded, which means the publication won't
342 * be found when the name sequence is later withdrawn by that node.
343 * A failed withdraw request simply returns a failure indication and lets the
344 * caller issue any error or warning messages associated with such a problem.
346 static struct publication *tipc_nameseq_remove_publ(struct net *net,
347 struct name_seq *nseq,
348 u32 inst, u32 node,
349 u32 ref, u32 key)
351 struct publication *publ;
352 struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
353 struct name_info *info;
354 struct sub_seq *free;
355 struct tipc_subscription *s, *st;
356 int removed_subseq = 0;
358 if (!sseq)
359 return NULL;
361 info = sseq->info;
363 /* Locate publication, if it exists */
364 list_for_each_entry(publ, &info->zone_list, zone_list) {
365 if ((publ->key == key) && (publ->ref == ref) &&
366 (!publ->node || (publ->node == node)))
367 goto found;
369 return NULL;
371 found:
372 /* Remove publication from zone scope list */
373 list_del(&publ->zone_list);
374 info->zone_list_size--;
376 /* Remove publication from cluster scope list, if present */
377 if (in_own_cluster(net, node)) {
378 list_del(&publ->cluster_list);
379 info->cluster_list_size--;
382 /* Remove publication from node scope list, if present */
383 if (in_own_node(net, node)) {
384 list_del(&publ->node_list);
385 info->node_list_size--;
388 /* Contract subseq list if no more publications for that subseq */
389 if (list_empty(&info->zone_list)) {
390 kfree(info);
391 free = &nseq->sseqs[nseq->first_free--];
392 memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof(*sseq));
393 removed_subseq = 1;
396 /* Notify any waiting subscriptions */
397 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
398 tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
399 TIPC_WITHDRAWN, publ->ref,
400 publ->node, removed_subseq);
403 return publ;
407 * tipc_nameseq_subscribe - attach a subscription, and issue
408 * the prescribed number of events if there is any sub-
409 * sequence overlapping with the requested sequence
411 static void tipc_nameseq_subscribe(struct name_seq *nseq,
412 struct tipc_subscription *s)
414 struct sub_seq *sseq = nseq->sseqs;
415 struct tipc_name_seq ns;
417 tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns);
419 tipc_subscrp_get(s);
420 list_add(&s->nameseq_list, &nseq->subscriptions);
422 if (!sseq)
423 return;
425 while (sseq != &nseq->sseqs[nseq->first_free]) {
426 if (tipc_subscrp_check_overlap(&ns, sseq->lower, sseq->upper)) {
427 struct publication *crs;
428 struct name_info *info = sseq->info;
429 int must_report = 1;
431 list_for_each_entry(crs, &info->zone_list, zone_list) {
432 tipc_subscrp_report_overlap(s, sseq->lower,
433 sseq->upper,
434 TIPC_PUBLISHED,
435 crs->ref, crs->node,
436 must_report);
437 must_report = 0;
440 sseq++;
444 static struct name_seq *nametbl_find_seq(struct net *net, u32 type)
446 struct tipc_net *tn = net_generic(net, tipc_net_id);
447 struct hlist_head *seq_head;
448 struct name_seq *ns;
450 seq_head = &tn->nametbl->seq_hlist[hash(type)];
451 hlist_for_each_entry_rcu(ns, seq_head, ns_list) {
452 if (ns->type == type)
453 return ns;
456 return NULL;
459 struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
460 u32 lower, u32 upper, u32 scope,
461 u32 node, u32 port, u32 key)
463 struct tipc_net *tn = net_generic(net, tipc_net_id);
464 struct publication *publ;
465 struct name_seq *seq = nametbl_find_seq(net, type);
466 int index = hash(type);
468 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
469 (lower > upper)) {
470 pr_debug("Failed to publish illegal {%u,%u,%u} with scope %u\n",
471 type, lower, upper, scope);
472 return NULL;
475 if (!seq)
476 seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
477 if (!seq)
478 return NULL;
480 spin_lock_bh(&seq->lock);
481 publ = tipc_nameseq_insert_publ(net, seq, type, lower, upper,
482 scope, node, port, key);
483 spin_unlock_bh(&seq->lock);
484 return publ;
487 struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
488 u32 lower, u32 node, u32 ref,
489 u32 key)
491 struct publication *publ;
492 struct name_seq *seq = nametbl_find_seq(net, type);
494 if (!seq)
495 return NULL;
497 spin_lock_bh(&seq->lock);
498 publ = tipc_nameseq_remove_publ(net, seq, lower, node, ref, key);
499 if (!seq->first_free && list_empty(&seq->subscriptions)) {
500 hlist_del_init_rcu(&seq->ns_list);
501 kfree(seq->sseqs);
502 spin_unlock_bh(&seq->lock);
503 kfree_rcu(seq, rcu);
504 return publ;
506 spin_unlock_bh(&seq->lock);
507 return publ;
511 * tipc_nametbl_translate - perform name translation
513 * On entry, 'destnode' is the search domain used during translation.
515 * On exit:
516 * - if name translation is deferred to another node/cluster/zone,
517 * leaves 'destnode' unchanged (will be non-zero) and returns 0
518 * - if name translation is attempted and succeeds, sets 'destnode'
519 * to publishing node and returns port reference (will be non-zero)
520 * - if name translation is attempted and fails, sets 'destnode' to 0
521 * and returns 0
523 u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance,
524 u32 *destnode)
526 struct tipc_net *tn = net_generic(net, tipc_net_id);
527 struct sub_seq *sseq;
528 struct name_info *info;
529 struct publication *publ;
530 struct name_seq *seq;
531 u32 ref = 0;
532 u32 node = 0;
534 if (!tipc_in_scope(*destnode, tn->own_addr))
535 return 0;
537 rcu_read_lock();
538 seq = nametbl_find_seq(net, type);
539 if (unlikely(!seq))
540 goto not_found;
541 spin_lock_bh(&seq->lock);
542 sseq = nameseq_find_subseq(seq, instance);
543 if (unlikely(!sseq))
544 goto no_match;
545 info = sseq->info;
547 /* Closest-First Algorithm */
548 if (likely(!*destnode)) {
549 if (!list_empty(&info->node_list)) {
550 publ = list_first_entry(&info->node_list,
551 struct publication,
552 node_list);
553 list_move_tail(&publ->node_list,
554 &info->node_list);
555 } else if (!list_empty(&info->cluster_list)) {
556 publ = list_first_entry(&info->cluster_list,
557 struct publication,
558 cluster_list);
559 list_move_tail(&publ->cluster_list,
560 &info->cluster_list);
561 } else {
562 publ = list_first_entry(&info->zone_list,
563 struct publication,
564 zone_list);
565 list_move_tail(&publ->zone_list,
566 &info->zone_list);
570 /* Round-Robin Algorithm */
571 else if (*destnode == tn->own_addr) {
572 if (list_empty(&info->node_list))
573 goto no_match;
574 publ = list_first_entry(&info->node_list, struct publication,
575 node_list);
576 list_move_tail(&publ->node_list, &info->node_list);
577 } else if (in_own_cluster_exact(net, *destnode)) {
578 if (list_empty(&info->cluster_list))
579 goto no_match;
580 publ = list_first_entry(&info->cluster_list, struct publication,
581 cluster_list);
582 list_move_tail(&publ->cluster_list, &info->cluster_list);
583 } else {
584 publ = list_first_entry(&info->zone_list, struct publication,
585 zone_list);
586 list_move_tail(&publ->zone_list, &info->zone_list);
589 ref = publ->ref;
590 node = publ->node;
591 no_match:
592 spin_unlock_bh(&seq->lock);
593 not_found:
594 rcu_read_unlock();
595 *destnode = node;
596 return ref;
600 * tipc_nametbl_mc_translate - find multicast destinations
602 * Creates list of all local ports that overlap the given multicast address;
603 * also determines if any off-node ports overlap.
605 * Note: Publications with a scope narrower than 'limit' are ignored.
606 * (i.e. local node-scope publications mustn't receive messages arriving
607 * from another node, even if the multcast link brought it here)
609 * Returns non-zero if any off-node ports overlap
611 int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
612 u32 limit, struct list_head *dports)
614 struct name_seq *seq;
615 struct sub_seq *sseq;
616 struct sub_seq *sseq_stop;
617 struct name_info *info;
618 int res = 0;
620 rcu_read_lock();
621 seq = nametbl_find_seq(net, type);
622 if (!seq)
623 goto exit;
625 spin_lock_bh(&seq->lock);
626 sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
627 sseq_stop = seq->sseqs + seq->first_free;
628 for (; sseq != sseq_stop; sseq++) {
629 struct publication *publ;
631 if (sseq->lower > upper)
632 break;
634 info = sseq->info;
635 list_for_each_entry(publ, &info->node_list, node_list) {
636 if (publ->scope <= limit)
637 u32_push(dports, publ->ref);
640 if (info->cluster_list_size != info->node_list_size)
641 res = 1;
643 spin_unlock_bh(&seq->lock);
644 exit:
645 rcu_read_unlock();
646 return res;
649 /* tipc_nametbl_lookup_dst_nodes - find broadcast destination nodes
650 * - Creates list of nodes that overlap the given multicast address
651 * - Determines if any node local ports overlap
653 void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
654 u32 upper, u32 domain,
655 struct tipc_nlist *nodes)
657 struct sub_seq *sseq, *stop;
658 struct publication *publ;
659 struct name_info *info;
660 struct name_seq *seq;
662 rcu_read_lock();
663 seq = nametbl_find_seq(net, type);
664 if (!seq)
665 goto exit;
667 spin_lock_bh(&seq->lock);
668 sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
669 stop = seq->sseqs + seq->first_free;
670 for (; sseq->lower <= upper && sseq != stop; sseq++) {
671 info = sseq->info;
672 list_for_each_entry(publ, &info->zone_list, zone_list) {
673 if (tipc_in_scope(domain, publ->node))
674 tipc_nlist_add(nodes, publ->node);
677 spin_unlock_bh(&seq->lock);
678 exit:
679 rcu_read_unlock();
683 * tipc_nametbl_publish - add name publication to network name tables
685 struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
686 u32 upper, u32 scope, u32 port_ref,
687 u32 key)
689 struct publication *publ;
690 struct sk_buff *buf = NULL;
691 struct tipc_net *tn = net_generic(net, tipc_net_id);
693 spin_lock_bh(&tn->nametbl_lock);
694 if (tn->nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) {
695 pr_warn("Publication failed, local publication limit reached (%u)\n",
696 TIPC_MAX_PUBLICATIONS);
697 spin_unlock_bh(&tn->nametbl_lock);
698 return NULL;
701 publ = tipc_nametbl_insert_publ(net, type, lower, upper, scope,
702 tn->own_addr, port_ref, key);
703 if (likely(publ)) {
704 tn->nametbl->local_publ_count++;
705 buf = tipc_named_publish(net, publ);
706 /* Any pending external events? */
707 tipc_named_process_backlog(net);
709 spin_unlock_bh(&tn->nametbl_lock);
711 if (buf)
712 tipc_node_broadcast(net, buf);
713 return publ;
717 * tipc_nametbl_withdraw - withdraw name publication from network name tables
719 int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
720 u32 key)
722 struct publication *publ;
723 struct sk_buff *skb = NULL;
724 struct tipc_net *tn = net_generic(net, tipc_net_id);
726 spin_lock_bh(&tn->nametbl_lock);
727 publ = tipc_nametbl_remove_publ(net, type, lower, tn->own_addr,
728 ref, key);
729 if (likely(publ)) {
730 tn->nametbl->local_publ_count--;
731 skb = tipc_named_withdraw(net, publ);
732 /* Any pending external events? */
733 tipc_named_process_backlog(net);
734 list_del_init(&publ->pport_list);
735 kfree_rcu(publ, rcu);
736 } else {
737 pr_err("Unable to remove local publication\n"
738 "(type=%u, lower=%u, ref=%u, key=%u)\n",
739 type, lower, ref, key);
741 spin_unlock_bh(&tn->nametbl_lock);
743 if (skb) {
744 tipc_node_broadcast(net, skb);
745 return 1;
747 return 0;
751 * tipc_nametbl_subscribe - add a subscription object to the name table
753 void tipc_nametbl_subscribe(struct tipc_subscription *s)
755 struct tipc_net *tn = net_generic(s->net, tipc_net_id);
756 u32 type = tipc_subscrp_convert_seq_type(s->evt.s.seq.type, s->swap);
757 int index = hash(type);
758 struct name_seq *seq;
759 struct tipc_name_seq ns;
761 spin_lock_bh(&tn->nametbl_lock);
762 seq = nametbl_find_seq(s->net, type);
763 if (!seq)
764 seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
765 if (seq) {
766 spin_lock_bh(&seq->lock);
767 tipc_nameseq_subscribe(seq, s);
768 spin_unlock_bh(&seq->lock);
769 } else {
770 tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns);
771 pr_warn("Failed to create subscription for {%u,%u,%u}\n",
772 ns.type, ns.lower, ns.upper);
774 spin_unlock_bh(&tn->nametbl_lock);
778 * tipc_nametbl_unsubscribe - remove a subscription object from name table
780 void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
782 struct tipc_net *tn = net_generic(s->net, tipc_net_id);
783 struct name_seq *seq;
784 u32 type = tipc_subscrp_convert_seq_type(s->evt.s.seq.type, s->swap);
786 spin_lock_bh(&tn->nametbl_lock);
787 seq = nametbl_find_seq(s->net, type);
788 if (seq != NULL) {
789 spin_lock_bh(&seq->lock);
790 list_del_init(&s->nameseq_list);
791 tipc_subscrp_put(s);
792 if (!seq->first_free && list_empty(&seq->subscriptions)) {
793 hlist_del_init_rcu(&seq->ns_list);
794 kfree(seq->sseqs);
795 spin_unlock_bh(&seq->lock);
796 kfree_rcu(seq, rcu);
797 } else {
798 spin_unlock_bh(&seq->lock);
801 spin_unlock_bh(&tn->nametbl_lock);
804 int tipc_nametbl_init(struct net *net)
806 struct tipc_net *tn = net_generic(net, tipc_net_id);
807 struct name_table *tipc_nametbl;
808 int i;
810 tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC);
811 if (!tipc_nametbl)
812 return -ENOMEM;
814 for (i = 0; i < TIPC_NAMETBL_SIZE; i++)
815 INIT_HLIST_HEAD(&tipc_nametbl->seq_hlist[i]);
817 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
818 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
819 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]);
820 tn->nametbl = tipc_nametbl;
821 spin_lock_init(&tn->nametbl_lock);
822 return 0;
826 * tipc_purge_publications - remove all publications for a given type
828 * tipc_nametbl_lock must be held when calling this function
830 static void tipc_purge_publications(struct net *net, struct name_seq *seq)
832 struct publication *publ, *safe;
833 struct sub_seq *sseq;
834 struct name_info *info;
836 spin_lock_bh(&seq->lock);
837 sseq = seq->sseqs;
838 info = sseq->info;
839 list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
840 tipc_nameseq_remove_publ(net, seq, publ->lower, publ->node,
841 publ->ref, publ->key);
842 kfree_rcu(publ, rcu);
844 hlist_del_init_rcu(&seq->ns_list);
845 kfree(seq->sseqs);
846 spin_unlock_bh(&seq->lock);
848 kfree_rcu(seq, rcu);
851 void tipc_nametbl_stop(struct net *net)
853 u32 i;
854 struct name_seq *seq;
855 struct hlist_head *seq_head;
856 struct tipc_net *tn = net_generic(net, tipc_net_id);
857 struct name_table *tipc_nametbl = tn->nametbl;
859 /* Verify name table is empty and purge any lingering
860 * publications, then release the name table
862 spin_lock_bh(&tn->nametbl_lock);
863 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
864 if (hlist_empty(&tipc_nametbl->seq_hlist[i]))
865 continue;
866 seq_head = &tipc_nametbl->seq_hlist[i];
867 hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
868 tipc_purge_publications(net, seq);
871 spin_unlock_bh(&tn->nametbl_lock);
873 synchronize_net();
874 kfree(tipc_nametbl);
878 static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
879 struct name_seq *seq,
880 struct sub_seq *sseq, u32 *last_publ)
882 void *hdr;
883 struct nlattr *attrs;
884 struct nlattr *publ;
885 struct publication *p;
887 if (*last_publ) {
888 list_for_each_entry(p, &sseq->info->zone_list, zone_list)
889 if (p->key == *last_publ)
890 break;
891 if (p->key != *last_publ)
892 return -EPIPE;
893 } else {
894 p = list_first_entry(&sseq->info->zone_list, struct publication,
895 zone_list);
898 list_for_each_entry_from(p, &sseq->info->zone_list, zone_list) {
899 *last_publ = p->key;
901 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq,
902 &tipc_genl_family, NLM_F_MULTI,
903 TIPC_NL_NAME_TABLE_GET);
904 if (!hdr)
905 return -EMSGSIZE;
907 attrs = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE);
908 if (!attrs)
909 goto msg_full;
911 publ = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE_PUBL);
912 if (!publ)
913 goto attr_msg_full;
915 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, seq->type))
916 goto publ_msg_full;
917 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sseq->lower))
918 goto publ_msg_full;
919 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sseq->upper))
920 goto publ_msg_full;
921 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope))
922 goto publ_msg_full;
923 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->node))
924 goto publ_msg_full;
925 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->ref))
926 goto publ_msg_full;
927 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key))
928 goto publ_msg_full;
930 nla_nest_end(msg->skb, publ);
931 nla_nest_end(msg->skb, attrs);
932 genlmsg_end(msg->skb, hdr);
934 *last_publ = 0;
936 return 0;
938 publ_msg_full:
939 nla_nest_cancel(msg->skb, publ);
940 attr_msg_full:
941 nla_nest_cancel(msg->skb, attrs);
942 msg_full:
943 genlmsg_cancel(msg->skb, hdr);
945 return -EMSGSIZE;
948 static int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq,
949 u32 *last_lower, u32 *last_publ)
951 struct sub_seq *sseq;
952 struct sub_seq *sseq_start;
953 int err;
955 if (*last_lower) {
956 sseq_start = nameseq_find_subseq(seq, *last_lower);
957 if (!sseq_start)
958 return -EPIPE;
959 } else {
960 sseq_start = seq->sseqs;
963 for (sseq = sseq_start; sseq != &seq->sseqs[seq->first_free]; sseq++) {
964 err = __tipc_nl_add_nametable_publ(msg, seq, sseq, last_publ);
965 if (err) {
966 *last_lower = sseq->lower;
967 return err;
970 *last_lower = 0;
972 return 0;
975 static int tipc_nl_seq_list(struct net *net, struct tipc_nl_msg *msg,
976 u32 *last_type, u32 *last_lower, u32 *last_publ)
978 struct tipc_net *tn = net_generic(net, tipc_net_id);
979 struct hlist_head *seq_head;
980 struct name_seq *seq = NULL;
981 int err;
982 int i;
984 if (*last_type)
985 i = hash(*last_type);
986 else
987 i = 0;
989 for (; i < TIPC_NAMETBL_SIZE; i++) {
990 seq_head = &tn->nametbl->seq_hlist[i];
992 if (*last_type) {
993 seq = nametbl_find_seq(net, *last_type);
994 if (!seq)
995 return -EPIPE;
996 } else {
997 hlist_for_each_entry_rcu(seq, seq_head, ns_list)
998 break;
999 if (!seq)
1000 continue;
1003 hlist_for_each_entry_from_rcu(seq, ns_list) {
1004 spin_lock_bh(&seq->lock);
1005 err = __tipc_nl_subseq_list(msg, seq, last_lower,
1006 last_publ);
1008 if (err) {
1009 *last_type = seq->type;
1010 spin_unlock_bh(&seq->lock);
1011 return err;
1013 spin_unlock_bh(&seq->lock);
1015 *last_type = 0;
1017 return 0;
1020 int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
1022 int err;
1023 int done = cb->args[3];
1024 u32 last_type = cb->args[0];
1025 u32 last_lower = cb->args[1];
1026 u32 last_publ = cb->args[2];
1027 struct net *net = sock_net(skb->sk);
1028 struct tipc_nl_msg msg;
1030 if (done)
1031 return 0;
1033 msg.skb = skb;
1034 msg.portid = NETLINK_CB(cb->skb).portid;
1035 msg.seq = cb->nlh->nlmsg_seq;
1037 rcu_read_lock();
1038 err = tipc_nl_seq_list(net, &msg, &last_type, &last_lower, &last_publ);
1039 if (!err) {
1040 done = 1;
1041 } else if (err != -EMSGSIZE) {
1042 /* We never set seq or call nl_dump_check_consistent() this
1043 * means that setting prev_seq here will cause the consistence
1044 * check to fail in the netlink callback handler. Resulting in
1045 * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if
1046 * we got an error.
1048 cb->prev_seq = 1;
1050 rcu_read_unlock();
1052 cb->args[0] = last_type;
1053 cb->args[1] = last_lower;
1054 cb->args[2] = last_publ;
1055 cb->args[3] = done;
1057 return skb->len;
1060 bool u32_find(struct list_head *l, u32 value)
1062 struct u32_item *item;
1064 list_for_each_entry(item, l, list) {
1065 if (item->value == value)
1066 return true;
1068 return false;
1071 bool u32_push(struct list_head *l, u32 value)
1073 struct u32_item *item;
1075 list_for_each_entry(item, l, list) {
1076 if (item->value == value)
1077 return false;
1079 item = kmalloc(sizeof(*item), GFP_ATOMIC);
1080 if (unlikely(!item))
1081 return false;
1083 item->value = value;
1084 list_add(&item->list, l);
1085 return true;
1088 u32 u32_pop(struct list_head *l)
1090 struct u32_item *item;
1091 u32 value = 0;
1093 if (list_empty(l))
1094 return 0;
1095 item = list_first_entry(l, typeof(*item), list);
1096 value = item->value;
1097 list_del(&item->list);
1098 kfree(item);
1099 return value;
1102 bool u32_del(struct list_head *l, u32 value)
1104 struct u32_item *item, *tmp;
1106 list_for_each_entry_safe(item, tmp, l, list) {
1107 if (item->value != value)
1108 continue;
1109 list_del(&item->list);
1110 kfree(item);
1111 return true;
1113 return false;
1116 void u32_list_purge(struct list_head *l)
1118 struct u32_item *item, *tmp;
1120 list_for_each_entry_safe(item, tmp, l, list) {
1121 list_del(&item->list);
1122 kfree(item);
1126 int u32_list_len(struct list_head *l)
1128 struct u32_item *item;
1129 int i = 0;
1131 list_for_each_entry(item, l, list) {
1132 i++;
1134 return i;