btrfs: replace many BUG_ONs with proper error handling
[linux-2.6/btrfs-unstable.git] / net / tipc / bcast.c
blob8eb87b11d10050f7497498e4776064b39c216be9
1 /*
2 * net/tipc/bcast.c: TIPC broadcast code
4 * Copyright (c) 2004-2006, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, 2010-2011, Wind River Systems
7 * All rights reserved.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include "core.h"
39 #include "link.h"
40 #include "port.h"
41 #include "bcast.h"
42 #include "name_distr.h"
44 #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
46 #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
48 /**
49 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
50 * @primary: pointer to primary bearer
51 * @secondary: pointer to secondary bearer
53 * Bearers must have same priority and same set of reachable destinations
54 * to be paired.
57 struct tipc_bcbearer_pair {
58 struct tipc_bearer *primary;
59 struct tipc_bearer *secondary;
62 /**
63 * struct tipc_bcbearer - bearer used by broadcast link
64 * @bearer: (non-standard) broadcast bearer structure
65 * @media: (non-standard) broadcast media structure
66 * @bpairs: array of bearer pairs
67 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
68 * @remains: temporary node map used by tipc_bcbearer_send()
69 * @remains_new: temporary node map used tipc_bcbearer_send()
71 * Note: The fields labelled "temporary" are incorporated into the bearer
72 * to avoid consuming potentially limited stack space through the use of
73 * large local variables within multicast routines. Concurrent access is
74 * prevented through use of the spinlock "bc_lock".
77 struct tipc_bcbearer {
78 struct tipc_bearer bearer;
79 struct tipc_media media;
80 struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
81 struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
82 struct tipc_node_map remains;
83 struct tipc_node_map remains_new;
86 /**
87 * struct tipc_bclink - link used for broadcast messages
88 * @link: (non-standard) broadcast link structure
89 * @node: (non-standard) node structure representing b'cast link's peer node
90 * @bcast_nodes: map of broadcast-capable nodes
91 * @retransmit_to: node that most recently requested a retransmit
93 * Handles sequence numbering, fragmentation, bundling, etc.
96 struct tipc_bclink {
97 struct tipc_link link;
98 struct tipc_node node;
99 struct tipc_node_map bcast_nodes;
100 struct tipc_node *retransmit_to;
103 static struct tipc_bcbearer bcast_bearer;
104 static struct tipc_bclink bcast_link;
106 static struct tipc_bcbearer *bcbearer = &bcast_bearer;
107 static struct tipc_bclink *bclink = &bcast_link;
108 static struct tipc_link *bcl = &bcast_link.link;
110 static DEFINE_SPINLOCK(bc_lock);
112 const char tipc_bclink_name[] = "broadcast-link";
114 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
115 struct tipc_node_map *nm_b,
116 struct tipc_node_map *nm_diff);
118 static u32 bcbuf_acks(struct sk_buff *buf)
120 return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
123 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
125 TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
128 static void bcbuf_decr_acks(struct sk_buff *buf)
130 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
133 void tipc_bclink_add_node(u32 addr)
135 spin_lock_bh(&bc_lock);
136 tipc_nmap_add(&bclink->bcast_nodes, addr);
137 spin_unlock_bh(&bc_lock);
140 void tipc_bclink_remove_node(u32 addr)
142 spin_lock_bh(&bc_lock);
143 tipc_nmap_remove(&bclink->bcast_nodes, addr);
144 spin_unlock_bh(&bc_lock);
147 static void bclink_set_last_sent(void)
149 if (bcl->next_out)
150 bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
151 else
152 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
155 u32 tipc_bclink_get_last_sent(void)
157 return bcl->fsm_msg_cnt;
161 * bclink_set_gap - set gap according to contents of current deferred pkt queue
163 * Called with 'node' locked, bc_lock unlocked
166 static void bclink_set_gap(struct tipc_node *n_ptr)
168 struct sk_buff *buf = n_ptr->bclink.deferred_head;
170 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
171 mod(n_ptr->bclink.last_in);
172 if (unlikely(buf != NULL))
173 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
177 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
179 * This mechanism endeavours to prevent all nodes in network from trying
180 * to ACK or NACK at the same time.
182 * Note: TIPC uses a different trigger to distribute ACKs than it does to
183 * distribute NACKs, but tries to use the same spacing (divide by 16).
186 static int bclink_ack_allowed(u32 n)
188 return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag;
193 * tipc_bclink_retransmit_to - get most recent node to request retransmission
195 * Called with bc_lock locked
198 struct tipc_node *tipc_bclink_retransmit_to(void)
200 return bclink->retransmit_to;
204 * bclink_retransmit_pkt - retransmit broadcast packets
205 * @after: sequence number of last packet to *not* retransmit
206 * @to: sequence number of last packet to retransmit
208 * Called with bc_lock locked
211 static void bclink_retransmit_pkt(u32 after, u32 to)
213 struct sk_buff *buf;
215 buf = bcl->first_out;
216 while (buf && less_eq(buf_seqno(buf), after))
217 buf = buf->next;
218 tipc_link_retransmit(bcl, buf, mod(to - after));
222 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
223 * @n_ptr: node that sent acknowledgement info
224 * @acked: broadcast sequence # that has been acknowledged
226 * Node is locked, bc_lock unlocked.
229 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
231 struct sk_buff *crs;
232 struct sk_buff *next;
233 unsigned int released = 0;
235 spin_lock_bh(&bc_lock);
237 /* Bail out if tx queue is empty (no clean up is required) */
238 crs = bcl->first_out;
239 if (!crs)
240 goto exit;
242 /* Determine which messages need to be acknowledged */
243 if (acked == INVALID_LINK_SEQ) {
245 * Contact with specified node has been lost, so need to
246 * acknowledge sent messages only (if other nodes still exist)
247 * or both sent and unsent messages (otherwise)
249 if (bclink->bcast_nodes.count)
250 acked = bcl->fsm_msg_cnt;
251 else
252 acked = bcl->next_out_no;
253 } else {
255 * Bail out if specified sequence number does not correspond
256 * to a message that has been sent and not yet acknowledged
258 if (less(acked, buf_seqno(crs)) ||
259 less(bcl->fsm_msg_cnt, acked) ||
260 less_eq(acked, n_ptr->bclink.acked))
261 goto exit;
264 /* Skip over packets that node has previously acknowledged */
265 while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
266 crs = crs->next;
268 /* Update packets that node is now acknowledging */
270 while (crs && less_eq(buf_seqno(crs), acked)) {
271 next = crs->next;
273 if (crs != bcl->next_out)
274 bcbuf_decr_acks(crs);
275 else {
276 bcbuf_set_acks(crs, 0);
277 bcl->next_out = next;
278 bclink_set_last_sent();
281 if (bcbuf_acks(crs) == 0) {
282 bcl->first_out = next;
283 bcl->out_queue_size--;
284 buf_discard(crs);
285 released = 1;
287 crs = next;
289 n_ptr->bclink.acked = acked;
291 /* Try resolving broadcast link congestion, if necessary */
293 if (unlikely(bcl->next_out)) {
294 tipc_link_push_queue(bcl);
295 bclink_set_last_sent();
297 if (unlikely(released && !list_empty(&bcl->waiting_ports)))
298 tipc_link_wakeup_ports(bcl, 0);
299 exit:
300 spin_unlock_bh(&bc_lock);
304 * bclink_send_ack - unicast an ACK msg
306 * tipc_net_lock and node lock set
309 static void bclink_send_ack(struct tipc_node *n_ptr)
311 struct tipc_link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
313 if (l_ptr != NULL)
314 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
318 * bclink_send_nack- broadcast a NACK msg
320 * tipc_net_lock and node lock set
323 static void bclink_send_nack(struct tipc_node *n_ptr)
325 struct sk_buff *buf;
326 struct tipc_msg *msg;
328 if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
329 return;
331 buf = tipc_buf_acquire(INT_H_SIZE);
332 if (buf) {
333 msg = buf_msg(buf);
334 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
335 INT_H_SIZE, n_ptr->addr);
336 msg_set_non_seq(msg, 1);
337 msg_set_mc_netid(msg, tipc_net_id);
338 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
339 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
340 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
341 msg_set_bcast_tag(msg, tipc_own_tag);
343 tipc_bearer_send(&bcbearer->bearer, buf, NULL);
344 bcl->stats.sent_nacks++;
345 buf_discard(buf);
348 * Ensure we doesn't send another NACK msg to the node
349 * until 16 more deferred messages arrive from it
350 * (i.e. helps prevent all nodes from NACK'ing at same time)
353 n_ptr->bclink.nack_sync = tipc_own_tag;
358 * tipc_bclink_check_gap - send a NACK if a sequence gap exists
360 * tipc_net_lock and node lock set
363 void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent)
365 if (!n_ptr->bclink.supported ||
366 less_eq(last_sent, mod(n_ptr->bclink.last_in)))
367 return;
369 bclink_set_gap(n_ptr);
370 if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
371 n_ptr->bclink.gap_to = last_sent;
372 bclink_send_nack(n_ptr);
376 * tipc_bclink_peek_nack - process a NACK msg meant for another node
378 * Only tipc_net_lock set.
381 static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
383 struct tipc_node *n_ptr = tipc_node_find(dest);
384 u32 my_after, my_to;
386 if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
387 return;
388 tipc_node_lock(n_ptr);
390 * Modify gap to suppress unnecessary NACKs from this node
392 my_after = n_ptr->bclink.gap_after;
393 my_to = n_ptr->bclink.gap_to;
395 if (less_eq(gap_after, my_after)) {
396 if (less(my_after, gap_to) && less(gap_to, my_to))
397 n_ptr->bclink.gap_after = gap_to;
398 else if (less_eq(my_to, gap_to))
399 n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
400 } else if (less_eq(gap_after, my_to)) {
401 if (less_eq(my_to, gap_to))
402 n_ptr->bclink.gap_to = gap_after;
403 } else {
405 * Expand gap if missing bufs not in deferred queue:
407 struct sk_buff *buf = n_ptr->bclink.deferred_head;
408 u32 prev = n_ptr->bclink.gap_to;
410 for (; buf; buf = buf->next) {
411 u32 seqno = buf_seqno(buf);
413 if (mod(seqno - prev) != 1) {
414 buf = NULL;
415 break;
417 if (seqno == gap_after)
418 break;
419 prev = seqno;
421 if (buf == NULL)
422 n_ptr->bclink.gap_to = gap_after;
425 * Some nodes may send a complementary NACK now:
427 if (bclink_ack_allowed(sender_tag + 1)) {
428 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
429 bclink_send_nack(n_ptr);
430 bclink_set_gap(n_ptr);
433 tipc_node_unlock(n_ptr);
437 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
440 int tipc_bclink_send_msg(struct sk_buff *buf)
442 int res;
444 spin_lock_bh(&bc_lock);
446 if (!bclink->bcast_nodes.count) {
447 res = msg_data_sz(buf_msg(buf));
448 buf_discard(buf);
449 goto exit;
452 res = tipc_link_send_buf(bcl, buf);
453 if (likely(res >= 0)) {
454 bclink_set_last_sent();
455 bcl->stats.queue_sz_counts++;
456 bcl->stats.accu_queue_sz += bcl->out_queue_size;
458 exit:
459 spin_unlock_bh(&bc_lock);
460 return res;
464 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
466 * tipc_net_lock is read_locked, no other locks set
469 void tipc_bclink_recv_pkt(struct sk_buff *buf)
471 struct tipc_msg *msg = buf_msg(buf);
472 struct tipc_node *node;
473 u32 next_in;
474 u32 seqno;
475 struct sk_buff *deferred;
477 /* Screen out unwanted broadcast messages */
479 if (msg_mc_netid(msg) != tipc_net_id)
480 goto exit;
482 node = tipc_node_find(msg_prevnode(msg));
483 if (unlikely(!node))
484 goto exit;
486 tipc_node_lock(node);
487 if (unlikely(!node->bclink.supported))
488 goto unlock;
490 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
491 if (msg_type(msg) != STATE_MSG)
492 goto unlock;
493 if (msg_destnode(msg) == tipc_own_addr) {
494 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
495 tipc_node_unlock(node);
496 spin_lock_bh(&bc_lock);
497 bcl->stats.recv_nacks++;
498 bclink->retransmit_to = node;
499 bclink_retransmit_pkt(msg_bcgap_after(msg),
500 msg_bcgap_to(msg));
501 spin_unlock_bh(&bc_lock);
502 } else {
503 tipc_node_unlock(node);
504 tipc_bclink_peek_nack(msg_destnode(msg),
505 msg_bcast_tag(msg),
506 msg_bcgap_after(msg),
507 msg_bcgap_to(msg));
509 goto exit;
512 /* Handle in-sequence broadcast message */
514 receive:
515 next_in = mod(node->bclink.last_in + 1);
516 seqno = msg_seqno(msg);
518 if (likely(seqno == next_in)) {
519 bcl->stats.recv_info++;
520 node->bclink.last_in++;
521 bclink_set_gap(node);
522 if (unlikely(bclink_ack_allowed(seqno))) {
523 bclink_send_ack(node);
524 bcl->stats.sent_acks++;
526 if (likely(msg_isdata(msg))) {
527 tipc_node_unlock(node);
528 if (likely(msg_mcast(msg)))
529 tipc_port_recv_mcast(buf, NULL);
530 else
531 buf_discard(buf);
532 } else if (msg_user(msg) == MSG_BUNDLER) {
533 bcl->stats.recv_bundles++;
534 bcl->stats.recv_bundled += msg_msgcnt(msg);
535 tipc_node_unlock(node);
536 tipc_link_recv_bundle(buf);
537 } else if (msg_user(msg) == MSG_FRAGMENTER) {
538 bcl->stats.recv_fragments++;
539 if (tipc_link_recv_fragment(&node->bclink.defragm,
540 &buf, &msg))
541 bcl->stats.recv_fragmented++;
542 tipc_node_unlock(node);
543 tipc_net_route_msg(buf);
544 } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
545 tipc_node_unlock(node);
546 tipc_named_recv(buf);
547 } else {
548 tipc_node_unlock(node);
549 buf_discard(buf);
551 buf = NULL;
552 tipc_node_lock(node);
553 deferred = node->bclink.deferred_head;
554 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
555 buf = deferred;
556 msg = buf_msg(buf);
557 node->bclink.deferred_head = deferred->next;
558 goto receive;
560 } else if (less(next_in, seqno)) {
561 u32 gap_after = node->bclink.gap_after;
562 u32 gap_to = node->bclink.gap_to;
564 if (tipc_link_defer_pkt(&node->bclink.deferred_head,
565 &node->bclink.deferred_tail,
566 buf)) {
567 node->bclink.nack_sync++;
568 bcl->stats.deferred_recv++;
569 if (seqno == mod(gap_after + 1))
570 node->bclink.gap_after = seqno;
571 else if (less(gap_after, seqno) && less(seqno, gap_to))
572 node->bclink.gap_to = seqno;
574 buf = NULL;
575 if (bclink_ack_allowed(node->bclink.nack_sync)) {
576 if (gap_to != gap_after)
577 bclink_send_nack(node);
578 bclink_set_gap(node);
580 } else {
581 bcl->stats.duplicates++;
583 unlock:
584 tipc_node_unlock(node);
585 exit:
586 buf_discard(buf);
589 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
591 return (n_ptr->bclink.supported &&
592 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
597 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
599 * Send packet over as many bearers as necessary to reach all nodes
600 * that have joined the broadcast link.
602 * Returns 0 (packet sent successfully) under all circumstances,
603 * since the broadcast link's pseudo-bearer never blocks
606 static int tipc_bcbearer_send(struct sk_buff *buf,
607 struct tipc_bearer *unused1,
608 struct tipc_media_addr *unused2)
610 int bp_index;
613 * Prepare broadcast link message for reliable transmission,
614 * if first time trying to send it;
615 * preparation is skipped for broadcast link protocol messages
616 * since they are sent in an unreliable manner and don't need it
619 if (likely(!msg_non_seq(buf_msg(buf)))) {
620 struct tipc_msg *msg;
622 bcbuf_set_acks(buf, bclink->bcast_nodes.count);
623 msg = buf_msg(buf);
624 msg_set_non_seq(msg, 1);
625 msg_set_mc_netid(msg, tipc_net_id);
626 bcl->stats.sent_info++;
628 if (WARN_ON(!bclink->bcast_nodes.count)) {
629 dump_stack();
630 return 0;
634 /* Send buffer over bearers until all targets reached */
636 bcbearer->remains = bclink->bcast_nodes;
638 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
639 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
640 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
642 if (!p)
643 break; /* no more bearers to try */
645 tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
646 if (bcbearer->remains_new.count == bcbearer->remains.count)
647 continue; /* bearer pair doesn't add anything */
649 if (p->blocked ||
650 p->media->send_msg(buf, p, &p->media->bcast_addr)) {
651 /* unable to send on primary bearer */
652 if (!s || s->blocked ||
653 s->media->send_msg(buf, s,
654 &s->media->bcast_addr)) {
655 /* unable to send on either bearer */
656 continue;
660 if (s) {
661 bcbearer->bpairs[bp_index].primary = s;
662 bcbearer->bpairs[bp_index].secondary = p;
665 if (bcbearer->remains_new.count == 0)
666 break; /* all targets reached */
668 bcbearer->remains = bcbearer->remains_new;
671 return 0;
675 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
678 void tipc_bcbearer_sort(void)
680 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
681 struct tipc_bcbearer_pair *bp_curr;
682 int b_index;
683 int pri;
685 spin_lock_bh(&bc_lock);
687 /* Group bearers by priority (can assume max of two per priority) */
689 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
691 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
692 struct tipc_bearer *b = &tipc_bearers[b_index];
694 if (!b->active || !b->nodes.count)
695 continue;
697 if (!bp_temp[b->priority].primary)
698 bp_temp[b->priority].primary = b;
699 else
700 bp_temp[b->priority].secondary = b;
703 /* Create array of bearer pairs for broadcasting */
705 bp_curr = bcbearer->bpairs;
706 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
708 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
710 if (!bp_temp[pri].primary)
711 continue;
713 bp_curr->primary = bp_temp[pri].primary;
715 if (bp_temp[pri].secondary) {
716 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
717 &bp_temp[pri].secondary->nodes)) {
718 bp_curr->secondary = bp_temp[pri].secondary;
719 } else {
720 bp_curr++;
721 bp_curr->primary = bp_temp[pri].secondary;
725 bp_curr++;
728 spin_unlock_bh(&bc_lock);
732 int tipc_bclink_stats(char *buf, const u32 buf_size)
734 struct print_buf pb;
736 if (!bcl)
737 return 0;
739 tipc_printbuf_init(&pb, buf, buf_size);
741 spin_lock_bh(&bc_lock);
743 tipc_printf(&pb, "Link <%s>\n"
744 " Window:%u packets\n",
745 bcl->name, bcl->queue_limit[0]);
746 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
747 bcl->stats.recv_info,
748 bcl->stats.recv_fragments,
749 bcl->stats.recv_fragmented,
750 bcl->stats.recv_bundles,
751 bcl->stats.recv_bundled);
752 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
753 bcl->stats.sent_info,
754 bcl->stats.sent_fragments,
755 bcl->stats.sent_fragmented,
756 bcl->stats.sent_bundles,
757 bcl->stats.sent_bundled);
758 tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
759 bcl->stats.recv_nacks,
760 bcl->stats.deferred_recv,
761 bcl->stats.duplicates);
762 tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
763 bcl->stats.sent_nacks,
764 bcl->stats.sent_acks,
765 bcl->stats.retransmitted);
766 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
767 bcl->stats.bearer_congs,
768 bcl->stats.link_congs,
769 bcl->stats.max_queue_sz,
770 bcl->stats.queue_sz_counts
771 ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
772 : 0);
774 spin_unlock_bh(&bc_lock);
775 return tipc_printbuf_validate(&pb);
778 int tipc_bclink_reset_stats(void)
780 if (!bcl)
781 return -ENOPROTOOPT;
783 spin_lock_bh(&bc_lock);
784 memset(&bcl->stats, 0, sizeof(bcl->stats));
785 spin_unlock_bh(&bc_lock);
786 return 0;
789 int tipc_bclink_set_queue_limits(u32 limit)
791 if (!bcl)
792 return -ENOPROTOOPT;
793 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
794 return -EINVAL;
796 spin_lock_bh(&bc_lock);
797 tipc_link_set_queue_limits(bcl, limit);
798 spin_unlock_bh(&bc_lock);
799 return 0;
802 void tipc_bclink_init(void)
804 INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
805 bcbearer->bearer.media = &bcbearer->media;
806 bcbearer->media.send_msg = tipc_bcbearer_send;
807 sprintf(bcbearer->media.name, "tipc-broadcast");
809 INIT_LIST_HEAD(&bcl->waiting_ports);
810 bcl->next_out_no = 1;
811 spin_lock_init(&bclink->node.lock);
812 bcl->owner = &bclink->node;
813 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
814 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
815 bcl->b_ptr = &bcbearer->bearer;
816 bcl->state = WORKING_WORKING;
817 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
820 void tipc_bclink_stop(void)
822 spin_lock_bh(&bc_lock);
823 tipc_link_stop(bcl);
824 spin_unlock_bh(&bc_lock);
826 memset(bclink, 0, sizeof(*bclink));
827 memset(bcbearer, 0, sizeof(*bcbearer));
832 * tipc_nmap_add - add a node to a node map
835 void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
837 int n = tipc_node(node);
838 int w = n / WSIZE;
839 u32 mask = (1 << (n % WSIZE));
841 if ((nm_ptr->map[w] & mask) == 0) {
842 nm_ptr->count++;
843 nm_ptr->map[w] |= mask;
848 * tipc_nmap_remove - remove a node from a node map
851 void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
853 int n = tipc_node(node);
854 int w = n / WSIZE;
855 u32 mask = (1 << (n % WSIZE));
857 if ((nm_ptr->map[w] & mask) != 0) {
858 nm_ptr->map[w] &= ~mask;
859 nm_ptr->count--;
864 * tipc_nmap_diff - find differences between node maps
865 * @nm_a: input node map A
866 * @nm_b: input node map B
867 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
870 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
871 struct tipc_node_map *nm_b,
872 struct tipc_node_map *nm_diff)
874 int stop = ARRAY_SIZE(nm_a->map);
875 int w;
876 int b;
877 u32 map;
879 memset(nm_diff, 0, sizeof(*nm_diff));
880 for (w = 0; w < stop; w++) {
881 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
882 nm_diff->map[w] = map;
883 if (map != 0) {
884 for (b = 0 ; b < WSIZE; b++) {
885 if (map & (1 << b))
886 nm_diff->count++;
893 * tipc_port_list_add - add a port to a port list, ensuring no duplicates
896 void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
898 struct tipc_port_list *item = pl_ptr;
899 int i;
900 int item_sz = PLSIZE;
901 int cnt = pl_ptr->count;
903 for (; ; cnt -= item_sz, item = item->next) {
904 if (cnt < PLSIZE)
905 item_sz = cnt;
906 for (i = 0; i < item_sz; i++)
907 if (item->ports[i] == port)
908 return;
909 if (i < PLSIZE) {
910 item->ports[i] = port;
911 pl_ptr->count++;
912 return;
914 if (!item->next) {
915 item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
916 if (!item->next) {
917 warn("Incomplete multicast delivery, no memory\n");
918 return;
920 item->next->next = NULL;
926 * tipc_port_list_free - free dynamically created entries in port_list chain
930 void tipc_port_list_free(struct tipc_port_list *pl_ptr)
932 struct tipc_port_list *item;
933 struct tipc_port_list *next;
935 for (item = pl_ptr->next; item; item = next) {
936 next = item->next;
937 kfree(item);