2 * net/tipc/bcast.c: TIPC broadcast code
4 * Copyright (c) 2004-2006, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, Wind River Systems
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
46 #include "node_subscr.h"
47 #include "name_distr.h"
49 #include "name_table.h"
52 #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
54 #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
56 #define BCLINK_LOG_BUF_SIZE 0
59 * Loss rate for incoming broadcast frames; used to test retransmission code.
60 * Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
63 #define TIPC_BCAST_LOSS_RATE 0
66 * struct bcbearer_pair - a pair of bearers used by broadcast link
67 * @primary: pointer to primary bearer
68 * @secondary: pointer to secondary bearer
70 * Bearers must have same priority and same set of reachable destinations
74 struct bcbearer_pair
{
75 struct bearer
*primary
;
76 struct bearer
*secondary
;
80 * struct bcbearer - bearer used by broadcast link
81 * @bearer: (non-standard) broadcast bearer structure
82 * @media: (non-standard) broadcast media structure
83 * @bpairs: array of bearer pairs
84 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
85 * @remains: temporary node map used by tipc_bcbearer_send()
86 * @remains_new: temporary node map used tipc_bcbearer_send()
88 * Note: The fields labelled "temporary" are incorporated into the bearer
89 * to avoid consuming potentially limited stack space through the use of
90 * large local variables within multicast routines. Concurrent access is
91 * prevented through use of the spinlock "bc_lock".
97 struct bcbearer_pair bpairs
[MAX_BEARERS
];
98 struct bcbearer_pair bpairs_temp
[TIPC_MAX_LINK_PRI
+ 1];
99 struct tipc_node_map remains
;
100 struct tipc_node_map remains_new
;
104 * struct bclink - link used for broadcast messages
105 * @link: (non-standard) broadcast link structure
106 * @node: (non-standard) node structure representing b'cast link's peer node
108 * Handles sequence numbering, fragmentation, bundling, etc.
113 struct tipc_node node
;
117 static struct bcbearer
*bcbearer
= NULL
;
118 static struct bclink
*bclink
= NULL
;
119 static struct link
*bcl
= NULL
;
120 static DEFINE_SPINLOCK(bc_lock
);
122 const char tipc_bclink_name
[] = "broadcast-link";
124 static void tipc_nmap_diff(struct tipc_node_map
*nm_a
,
125 struct tipc_node_map
*nm_b
,
126 struct tipc_node_map
*nm_diff
);
128 static u32
buf_seqno(struct sk_buff
*buf
)
130 return msg_seqno(buf_msg(buf
));
133 static u32
bcbuf_acks(struct sk_buff
*buf
)
135 return (u32
)(unsigned long)TIPC_SKB_CB(buf
)->handle
;
138 static void bcbuf_set_acks(struct sk_buff
*buf
, u32 acks
)
140 TIPC_SKB_CB(buf
)->handle
= (void *)(unsigned long)acks
;
143 static void bcbuf_decr_acks(struct sk_buff
*buf
)
145 bcbuf_set_acks(buf
, bcbuf_acks(buf
) - 1);
149 static void bclink_set_last_sent(void)
152 bcl
->fsm_msg_cnt
= mod(buf_seqno(bcl
->next_out
) - 1);
154 bcl
->fsm_msg_cnt
= mod(bcl
->next_out_no
- 1);
157 u32
tipc_bclink_get_last_sent(void)
159 return bcl
->fsm_msg_cnt
;
163 * bclink_set_gap - set gap according to contents of current deferred pkt queue
165 * Called with 'node' locked, bc_lock unlocked
168 static void bclink_set_gap(struct tipc_node
*n_ptr
)
170 struct sk_buff
*buf
= n_ptr
->bclink
.deferred_head
;
172 n_ptr
->bclink
.gap_after
= n_ptr
->bclink
.gap_to
=
173 mod(n_ptr
->bclink
.last_in
);
174 if (unlikely(buf
!= NULL
))
175 n_ptr
->bclink
.gap_to
= mod(buf_seqno(buf
) - 1);
179 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
181 * This mechanism endeavours to prevent all nodes in network from trying
182 * to ACK or NACK at the same time.
184 * Note: TIPC uses a different trigger to distribute ACKs than it does to
185 * distribute NACKs, but tries to use the same spacing (divide by 16).
188 static int bclink_ack_allowed(u32 n
)
190 return (n
% TIPC_MIN_LINK_WIN
) == tipc_own_tag
;
195 * bclink_retransmit_pkt - retransmit broadcast packets
196 * @after: sequence number of last packet to *not* retransmit
197 * @to: sequence number of last packet to retransmit
199 * Called with bc_lock locked
202 static void bclink_retransmit_pkt(u32 after
, u32 to
)
206 buf
= bcl
->first_out
;
207 while (buf
&& less_eq(buf_seqno(buf
), after
)) {
210 tipc_link_retransmit(bcl
, buf
, mod(to
- after
));
214 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
215 * @n_ptr: node that sent acknowledgement info
216 * @acked: broadcast sequence # that has been acknowledged
218 * Node is locked, bc_lock unlocked.
221 void tipc_bclink_acknowledge(struct tipc_node
*n_ptr
, u32 acked
)
224 struct sk_buff
*next
;
225 unsigned int released
= 0;
227 if (less_eq(acked
, n_ptr
->bclink
.acked
))
230 spin_lock_bh(&bc_lock
);
232 /* Skip over packets that node has previously acknowledged */
234 crs
= bcl
->first_out
;
235 while (crs
&& less_eq(buf_seqno(crs
), n_ptr
->bclink
.acked
)) {
239 /* Update packets that node is now acknowledging */
241 while (crs
&& less_eq(buf_seqno(crs
), acked
)) {
243 bcbuf_decr_acks(crs
);
244 if (bcbuf_acks(crs
) == 0) {
245 bcl
->first_out
= next
;
246 bcl
->out_queue_size
--;
252 n_ptr
->bclink
.acked
= acked
;
254 /* Try resolving broadcast link congestion, if necessary */
256 if (unlikely(bcl
->next_out
)) {
257 tipc_link_push_queue(bcl
);
258 bclink_set_last_sent();
260 if (unlikely(released
&& !list_empty(&bcl
->waiting_ports
)))
261 tipc_link_wakeup_ports(bcl
, 0);
262 spin_unlock_bh(&bc_lock
);
266 * bclink_send_ack - unicast an ACK msg
268 * tipc_net_lock and node lock set
271 static void bclink_send_ack(struct tipc_node
*n_ptr
)
273 struct link
*l_ptr
= n_ptr
->active_links
[n_ptr
->addr
& 1];
276 tipc_link_send_proto_msg(l_ptr
, STATE_MSG
, 0, 0, 0, 0, 0);
280 * bclink_send_nack- broadcast a NACK msg
282 * tipc_net_lock and node lock set
285 static void bclink_send_nack(struct tipc_node
*n_ptr
)
288 struct tipc_msg
*msg
;
290 if (!less(n_ptr
->bclink
.gap_after
, n_ptr
->bclink
.gap_to
))
293 buf
= tipc_buf_acquire(INT_H_SIZE
);
296 tipc_msg_init(msg
, BCAST_PROTOCOL
, STATE_MSG
,
297 INT_H_SIZE
, n_ptr
->addr
);
298 msg_set_mc_netid(msg
, tipc_net_id
);
299 msg_set_bcast_ack(msg
, mod(n_ptr
->bclink
.last_in
));
300 msg_set_bcgap_after(msg
, n_ptr
->bclink
.gap_after
);
301 msg_set_bcgap_to(msg
, n_ptr
->bclink
.gap_to
);
302 msg_set_bcast_tag(msg
, tipc_own_tag
);
304 if (tipc_bearer_send(&bcbearer
->bearer
, buf
, NULL
)) {
305 bcl
->stats
.sent_nacks
++;
308 tipc_bearer_schedule(bcl
->b_ptr
, bcl
);
309 bcl
->proto_msg_queue
= buf
;
310 bcl
->stats
.bearer_congs
++;
314 * Ensure we doesn't send another NACK msg to the node
315 * until 16 more deferred messages arrive from it
316 * (i.e. helps prevent all nodes from NACK'ing at same time)
319 n_ptr
->bclink
.nack_sync
= tipc_own_tag
;
324 * tipc_bclink_check_gap - send a NACK if a sequence gap exists
326 * tipc_net_lock and node lock set
329 void tipc_bclink_check_gap(struct tipc_node
*n_ptr
, u32 last_sent
)
331 if (!n_ptr
->bclink
.supported
||
332 less_eq(last_sent
, mod(n_ptr
->bclink
.last_in
)))
335 bclink_set_gap(n_ptr
);
336 if (n_ptr
->bclink
.gap_after
== n_ptr
->bclink
.gap_to
)
337 n_ptr
->bclink
.gap_to
= last_sent
;
338 bclink_send_nack(n_ptr
);
342 * tipc_bclink_peek_nack - process a NACK msg meant for another node
344 * Only tipc_net_lock set.
347 static void tipc_bclink_peek_nack(u32 dest
, u32 sender_tag
, u32 gap_after
, u32 gap_to
)
349 struct tipc_node
*n_ptr
= tipc_node_find(dest
);
352 if (unlikely(!n_ptr
|| !tipc_node_is_up(n_ptr
)))
354 tipc_node_lock(n_ptr
);
356 * Modify gap to suppress unnecessary NACKs from this node
358 my_after
= n_ptr
->bclink
.gap_after
;
359 my_to
= n_ptr
->bclink
.gap_to
;
361 if (less_eq(gap_after
, my_after
)) {
362 if (less(my_after
, gap_to
) && less(gap_to
, my_to
))
363 n_ptr
->bclink
.gap_after
= gap_to
;
364 else if (less_eq(my_to
, gap_to
))
365 n_ptr
->bclink
.gap_to
= n_ptr
->bclink
.gap_after
;
366 } else if (less_eq(gap_after
, my_to
)) {
367 if (less_eq(my_to
, gap_to
))
368 n_ptr
->bclink
.gap_to
= gap_after
;
371 * Expand gap if missing bufs not in deferred queue:
373 struct sk_buff
*buf
= n_ptr
->bclink
.deferred_head
;
374 u32 prev
= n_ptr
->bclink
.gap_to
;
376 for (; buf
; buf
= buf
->next
) {
377 u32 seqno
= buf_seqno(buf
);
379 if (mod(seqno
- prev
) != 1) {
383 if (seqno
== gap_after
)
388 n_ptr
->bclink
.gap_to
= gap_after
;
391 * Some nodes may send a complementary NACK now:
393 if (bclink_ack_allowed(sender_tag
+ 1)) {
394 if (n_ptr
->bclink
.gap_to
!= n_ptr
->bclink
.gap_after
) {
395 bclink_send_nack(n_ptr
);
396 bclink_set_gap(n_ptr
);
399 tipc_node_unlock(n_ptr
);
403 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
406 int tipc_bclink_send_msg(struct sk_buff
*buf
)
410 spin_lock_bh(&bc_lock
);
412 res
= tipc_link_send_buf(bcl
, buf
);
413 if (unlikely(res
== -ELINKCONG
))
416 bclink_set_last_sent();
418 if (bcl
->out_queue_size
> bcl
->stats
.max_queue_sz
)
419 bcl
->stats
.max_queue_sz
= bcl
->out_queue_size
;
420 bcl
->stats
.queue_sz_counts
++;
421 bcl
->stats
.accu_queue_sz
+= bcl
->out_queue_size
;
423 spin_unlock_bh(&bc_lock
);
428 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
430 * tipc_net_lock is read_locked, no other locks set
433 void tipc_bclink_recv_pkt(struct sk_buff
*buf
)
435 #if (TIPC_BCAST_LOSS_RATE)
436 static int rx_count
= 0;
438 struct tipc_msg
*msg
= buf_msg(buf
);
439 struct tipc_node
* node
= tipc_node_find(msg_prevnode(msg
));
442 struct sk_buff
*deferred
;
444 msg_dbg(msg
, "<BC<<<");
446 if (unlikely(!node
|| !tipc_node_is_up(node
) || !node
->bclink
.supported
||
447 (msg_mc_netid(msg
) != tipc_net_id
))) {
452 if (unlikely(msg_user(msg
) == BCAST_PROTOCOL
)) {
453 msg_dbg(msg
, "<BCNACK<<<");
454 if (msg_destnode(msg
) == tipc_own_addr
) {
455 tipc_node_lock(node
);
456 tipc_bclink_acknowledge(node
, msg_bcast_ack(msg
));
457 tipc_node_unlock(node
);
458 spin_lock_bh(&bc_lock
);
459 bcl
->stats
.recv_nacks
++;
460 bcl
->owner
->next
= node
; /* remember requestor */
461 bclink_retransmit_pkt(msg_bcgap_after(msg
),
463 bcl
->owner
->next
= NULL
;
464 spin_unlock_bh(&bc_lock
);
466 tipc_bclink_peek_nack(msg_destnode(msg
),
468 msg_bcgap_after(msg
),
475 #if (TIPC_BCAST_LOSS_RATE)
476 if (++rx_count
== TIPC_BCAST_LOSS_RATE
) {
483 tipc_node_lock(node
);
485 deferred
= node
->bclink
.deferred_head
;
486 next_in
= mod(node
->bclink
.last_in
+ 1);
487 seqno
= msg_seqno(msg
);
489 if (likely(seqno
== next_in
)) {
490 bcl
->stats
.recv_info
++;
491 node
->bclink
.last_in
++;
492 bclink_set_gap(node
);
493 if (unlikely(bclink_ack_allowed(seqno
))) {
494 bclink_send_ack(node
);
495 bcl
->stats
.sent_acks
++;
497 if (likely(msg_isdata(msg
))) {
498 tipc_node_unlock(node
);
499 tipc_port_recv_mcast(buf
, NULL
);
500 } else if (msg_user(msg
) == MSG_BUNDLER
) {
501 bcl
->stats
.recv_bundles
++;
502 bcl
->stats
.recv_bundled
+= msg_msgcnt(msg
);
503 tipc_node_unlock(node
);
504 tipc_link_recv_bundle(buf
);
505 } else if (msg_user(msg
) == MSG_FRAGMENTER
) {
506 bcl
->stats
.recv_fragments
++;
507 if (tipc_link_recv_fragment(&node
->bclink
.defragm
,
509 bcl
->stats
.recv_fragmented
++;
510 tipc_node_unlock(node
);
511 tipc_net_route_msg(buf
);
513 tipc_node_unlock(node
);
514 tipc_net_route_msg(buf
);
516 if (deferred
&& (buf_seqno(deferred
) == mod(next_in
+ 1))) {
517 tipc_node_lock(node
);
520 node
->bclink
.deferred_head
= deferred
->next
;
524 } else if (less(next_in
, seqno
)) {
525 u32 gap_after
= node
->bclink
.gap_after
;
526 u32 gap_to
= node
->bclink
.gap_to
;
528 if (tipc_link_defer_pkt(&node
->bclink
.deferred_head
,
529 &node
->bclink
.deferred_tail
,
531 node
->bclink
.nack_sync
++;
532 bcl
->stats
.deferred_recv
++;
533 if (seqno
== mod(gap_after
+ 1))
534 node
->bclink
.gap_after
= seqno
;
535 else if (less(gap_after
, seqno
) && less(seqno
, gap_to
))
536 node
->bclink
.gap_to
= seqno
;
538 if (bclink_ack_allowed(node
->bclink
.nack_sync
)) {
539 if (gap_to
!= gap_after
)
540 bclink_send_nack(node
);
541 bclink_set_gap(node
);
544 bcl
->stats
.duplicates
++;
547 tipc_node_unlock(node
);
550 u32
tipc_bclink_acks_missing(struct tipc_node
*n_ptr
)
552 return (n_ptr
->bclink
.supported
&&
553 (tipc_bclink_get_last_sent() != n_ptr
->bclink
.acked
));
558 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
560 * Send through as many bearers as necessary to reach all nodes
561 * that support TIPC multicasting.
563 * Returns 0 if packet sent successfully, non-zero if not
566 static int tipc_bcbearer_send(struct sk_buff
*buf
,
567 struct tipc_bearer
*unused1
,
568 struct tipc_media_addr
*unused2
)
572 /* Prepare buffer for broadcasting (if first time trying to send it) */
574 if (likely(!msg_non_seq(buf_msg(buf
)))) {
575 struct tipc_msg
*msg
;
577 assert(tipc_cltr_bcast_nodes
.count
!= 0);
578 bcbuf_set_acks(buf
, tipc_cltr_bcast_nodes
.count
);
580 msg_set_non_seq(msg
, 1);
581 msg_set_mc_netid(msg
, tipc_net_id
);
582 bcl
->stats
.sent_info
++;
585 /* Send buffer over bearers until all targets reached */
587 bcbearer
->remains
= tipc_cltr_bcast_nodes
;
589 for (bp_index
= 0; bp_index
< MAX_BEARERS
; bp_index
++) {
590 struct bearer
*p
= bcbearer
->bpairs
[bp_index
].primary
;
591 struct bearer
*s
= bcbearer
->bpairs
[bp_index
].secondary
;
594 break; /* no more bearers to try */
596 tipc_nmap_diff(&bcbearer
->remains
, &p
->nodes
, &bcbearer
->remains_new
);
597 if (bcbearer
->remains_new
.count
== bcbearer
->remains
.count
)
598 continue; /* bearer pair doesn't add anything */
600 if (p
->publ
.blocked
||
601 p
->media
->send_msg(buf
, &p
->publ
, &p
->media
->bcast_addr
)) {
602 /* unable to send on primary bearer */
603 if (!s
|| s
->publ
.blocked
||
604 s
->media
->send_msg(buf
, &s
->publ
,
605 &s
->media
->bcast_addr
)) {
606 /* unable to send on either bearer */
612 bcbearer
->bpairs
[bp_index
].primary
= s
;
613 bcbearer
->bpairs
[bp_index
].secondary
= p
;
616 if (bcbearer
->remains_new
.count
== 0)
619 bcbearer
->remains
= bcbearer
->remains_new
;
623 * Unable to reach all targets (indicate success, since currently
624 * there isn't code in place to properly block & unblock the
625 * pseudo-bearer used by the broadcast link)
632 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
635 void tipc_bcbearer_sort(void)
637 struct bcbearer_pair
*bp_temp
= bcbearer
->bpairs_temp
;
638 struct bcbearer_pair
*bp_curr
;
642 spin_lock_bh(&bc_lock
);
644 /* Group bearers by priority (can assume max of two per priority) */
646 memset(bp_temp
, 0, sizeof(bcbearer
->bpairs_temp
));
648 for (b_index
= 0; b_index
< MAX_BEARERS
; b_index
++) {
649 struct bearer
*b
= &tipc_bearers
[b_index
];
651 if (!b
->active
|| !b
->nodes
.count
)
654 if (!bp_temp
[b
->priority
].primary
)
655 bp_temp
[b
->priority
].primary
= b
;
657 bp_temp
[b
->priority
].secondary
= b
;
660 /* Create array of bearer pairs for broadcasting */
662 bp_curr
= bcbearer
->bpairs
;
663 memset(bcbearer
->bpairs
, 0, sizeof(bcbearer
->bpairs
));
665 for (pri
= TIPC_MAX_LINK_PRI
; pri
>= 0; pri
--) {
667 if (!bp_temp
[pri
].primary
)
670 bp_curr
->primary
= bp_temp
[pri
].primary
;
672 if (bp_temp
[pri
].secondary
) {
673 if (tipc_nmap_equal(&bp_temp
[pri
].primary
->nodes
,
674 &bp_temp
[pri
].secondary
->nodes
)) {
675 bp_curr
->secondary
= bp_temp
[pri
].secondary
;
678 bp_curr
->primary
= bp_temp
[pri
].secondary
;
685 spin_unlock_bh(&bc_lock
);
689 * tipc_bcbearer_push - resolve bearer congestion
691 * Forces bclink to push out any unsent packets, until all packets are gone
692 * or congestion reoccurs.
693 * No locks set when function called
696 void tipc_bcbearer_push(void)
698 struct bearer
*b_ptr
;
700 spin_lock_bh(&bc_lock
);
701 b_ptr
= &bcbearer
->bearer
;
702 if (b_ptr
->publ
.blocked
) {
703 b_ptr
->publ
.blocked
= 0;
704 tipc_bearer_lock_push(b_ptr
);
706 spin_unlock_bh(&bc_lock
);
710 int tipc_bclink_stats(char *buf
, const u32 buf_size
)
717 tipc_printbuf_init(&pb
, buf
, buf_size
);
719 spin_lock_bh(&bc_lock
);
721 tipc_printf(&pb
, "Link <%s>\n"
722 " Window:%u packets\n",
723 bcl
->name
, bcl
->queue_limit
[0]);
724 tipc_printf(&pb
, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
725 bcl
->stats
.recv_info
,
726 bcl
->stats
.recv_fragments
,
727 bcl
->stats
.recv_fragmented
,
728 bcl
->stats
.recv_bundles
,
729 bcl
->stats
.recv_bundled
);
730 tipc_printf(&pb
, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
731 bcl
->stats
.sent_info
,
732 bcl
->stats
.sent_fragments
,
733 bcl
->stats
.sent_fragmented
,
734 bcl
->stats
.sent_bundles
,
735 bcl
->stats
.sent_bundled
);
736 tipc_printf(&pb
, " RX naks:%u defs:%u dups:%u\n",
737 bcl
->stats
.recv_nacks
,
738 bcl
->stats
.deferred_recv
,
739 bcl
->stats
.duplicates
);
740 tipc_printf(&pb
, " TX naks:%u acks:%u dups:%u\n",
741 bcl
->stats
.sent_nacks
,
742 bcl
->stats
.sent_acks
,
743 bcl
->stats
.retransmitted
);
744 tipc_printf(&pb
, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
745 bcl
->stats
.bearer_congs
,
746 bcl
->stats
.link_congs
,
747 bcl
->stats
.max_queue_sz
,
748 bcl
->stats
.queue_sz_counts
749 ? (bcl
->stats
.accu_queue_sz
/ bcl
->stats
.queue_sz_counts
)
752 spin_unlock_bh(&bc_lock
);
753 return tipc_printbuf_validate(&pb
);
756 int tipc_bclink_reset_stats(void)
761 spin_lock_bh(&bc_lock
);
762 memset(&bcl
->stats
, 0, sizeof(bcl
->stats
));
763 spin_unlock_bh(&bc_lock
);
767 int tipc_bclink_set_queue_limits(u32 limit
)
771 if ((limit
< TIPC_MIN_LINK_WIN
) || (limit
> TIPC_MAX_LINK_WIN
))
774 spin_lock_bh(&bc_lock
);
775 tipc_link_set_queue_limits(bcl
, limit
);
776 spin_unlock_bh(&bc_lock
);
780 int tipc_bclink_init(void)
782 bcbearer
= kzalloc(sizeof(*bcbearer
), GFP_ATOMIC
);
783 bclink
= kzalloc(sizeof(*bclink
), GFP_ATOMIC
);
784 if (!bcbearer
|| !bclink
) {
786 warn("Multicast link creation failed, no memory\n");
794 INIT_LIST_HEAD(&bcbearer
->bearer
.cong_links
);
795 bcbearer
->bearer
.media
= &bcbearer
->media
;
796 bcbearer
->media
.send_msg
= tipc_bcbearer_send
;
797 sprintf(bcbearer
->media
.name
, "tipc-multicast");
800 INIT_LIST_HEAD(&bcl
->waiting_ports
);
801 bcl
->next_out_no
= 1;
802 spin_lock_init(&bclink
->node
.lock
);
803 bcl
->owner
= &bclink
->node
;
804 bcl
->max_pkt
= MAX_PKT_DEFAULT_MCAST
;
805 tipc_link_set_queue_limits(bcl
, BCLINK_WIN_DEFAULT
);
806 bcl
->b_ptr
= &bcbearer
->bearer
;
807 bcl
->state
= WORKING_WORKING
;
808 strlcpy(bcl
->name
, tipc_bclink_name
, TIPC_MAX_LINK_NAME
);
810 if (BCLINK_LOG_BUF_SIZE
) {
811 char *pb
= kmalloc(BCLINK_LOG_BUF_SIZE
, GFP_ATOMIC
);
815 tipc_printbuf_init(&bcl
->print_buf
, pb
, BCLINK_LOG_BUF_SIZE
);
821 void tipc_bclink_stop(void)
823 spin_lock_bh(&bc_lock
);
826 if (BCLINK_LOG_BUF_SIZE
)
827 kfree(bcl
->print_buf
.buf
);
834 spin_unlock_bh(&bc_lock
);
839 * tipc_nmap_add - add a node to a node map
842 void tipc_nmap_add(struct tipc_node_map
*nm_ptr
, u32 node
)
844 int n
= tipc_node(node
);
846 u32 mask
= (1 << (n
% WSIZE
));
848 if ((nm_ptr
->map
[w
] & mask
) == 0) {
850 nm_ptr
->map
[w
] |= mask
;
855 * tipc_nmap_remove - remove a node from a node map
858 void tipc_nmap_remove(struct tipc_node_map
*nm_ptr
, u32 node
)
860 int n
= tipc_node(node
);
862 u32 mask
= (1 << (n
% WSIZE
));
864 if ((nm_ptr
->map
[w
] & mask
) != 0) {
865 nm_ptr
->map
[w
] &= ~mask
;
871 * tipc_nmap_diff - find differences between node maps
872 * @nm_a: input node map A
873 * @nm_b: input node map B
874 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
877 static void tipc_nmap_diff(struct tipc_node_map
*nm_a
,
878 struct tipc_node_map
*nm_b
,
879 struct tipc_node_map
*nm_diff
)
881 int stop
= ARRAY_SIZE(nm_a
->map
);
886 memset(nm_diff
, 0, sizeof(*nm_diff
));
887 for (w
= 0; w
< stop
; w
++) {
888 map
= nm_a
->map
[w
] ^ (nm_a
->map
[w
] & nm_b
->map
[w
]);
889 nm_diff
->map
[w
] = map
;
891 for (b
= 0 ; b
< WSIZE
; b
++) {
900 * tipc_port_list_add - add a port to a port list, ensuring no duplicates
903 void tipc_port_list_add(struct port_list
*pl_ptr
, u32 port
)
905 struct port_list
*item
= pl_ptr
;
907 int item_sz
= PLSIZE
;
908 int cnt
= pl_ptr
->count
;
910 for (; ; cnt
-= item_sz
, item
= item
->next
) {
913 for (i
= 0; i
< item_sz
; i
++)
914 if (item
->ports
[i
] == port
)
917 item
->ports
[i
] = port
;
922 item
->next
= kmalloc(sizeof(*item
), GFP_ATOMIC
);
924 warn("Incomplete multicast delivery, no memory\n");
927 item
->next
->next
= NULL
;
933 * tipc_port_list_free - free dynamically created entries in port_list chain
937 void tipc_port_list_free(struct port_list
*pl_ptr
)
939 struct port_list
*item
;
940 struct port_list
*next
;
942 for (item
= pl_ptr
->next
; item
; item
= next
) {