tipc: let group member stay in JOINED mode if unable to reclaim
[linux-2.6/btrfs-unstable.git] / net / tipc / group.c
blob652fa66a87f67155cf881a7e20647ea69e35cec3
1 /*
2 * net/tipc/group.c: TIPC group messaging code
4 * Copyright (c) 2017, Ericsson AB
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the names of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") version 2 as published by the Free
21 * Software Foundation.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
36 #include "core.h"
37 #include "addr.h"
38 #include "group.h"
39 #include "bcast.h"
40 #include "server.h"
41 #include "msg.h"
42 #include "socket.h"
43 #include "node.h"
44 #include "name_table.h"
45 #include "subscr.h"
47 #define ADV_UNIT (((MAX_MSG_SIZE + MAX_H_SIZE) / FLOWCTL_BLK_SZ) + 1)
48 #define ADV_IDLE ADV_UNIT
49 #define ADV_ACTIVE (ADV_UNIT * 12)
51 enum mbr_state {
52 MBR_QUARANTINED,
53 MBR_DISCOVERED,
54 MBR_JOINING,
55 MBR_PUBLISHED,
56 MBR_JOINED,
57 MBR_PENDING,
58 MBR_ACTIVE,
59 MBR_RECLAIMING,
60 MBR_REMITTED,
61 MBR_LEAVING
64 struct tipc_member {
65 struct rb_node tree_node;
66 struct list_head list;
67 struct list_head small_win;
68 struct sk_buff *event_msg;
69 struct sk_buff_head deferredq;
70 struct tipc_group *group;
71 u32 node;
72 u32 port;
73 u32 instance;
74 enum mbr_state state;
75 u16 advertised;
76 u16 window;
77 u16 bc_rcv_nxt;
78 u16 bc_syncpt;
79 u16 bc_acked;
80 bool usr_pending;
83 struct tipc_group {
84 struct rb_root members;
85 struct list_head small_win;
86 struct list_head pending;
87 struct list_head active;
88 struct tipc_nlist dests;
89 struct net *net;
90 int subid;
91 u32 type;
92 u32 instance;
93 u32 domain;
94 u32 scope;
95 u32 portid;
96 u16 member_cnt;
97 u16 active_cnt;
98 u16 max_active;
99 u16 bc_snd_nxt;
100 u16 bc_ackers;
101 bool loopback;
102 bool events;
105 static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
106 int mtyp, struct sk_buff_head *xmitq);
108 static void tipc_group_decr_active(struct tipc_group *grp,
109 struct tipc_member *m)
111 if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING ||
112 m->state == MBR_REMITTED)
113 grp->active_cnt--;
116 static int tipc_group_rcvbuf_limit(struct tipc_group *grp)
118 int max_active, active_pool, idle_pool;
119 int mcnt = grp->member_cnt + 1;
121 /* Limit simultaneous reception from other members */
122 max_active = min(mcnt / 8, 64);
123 max_active = max(max_active, 16);
124 grp->max_active = max_active;
126 /* Reserve blocks for active and idle members */
127 active_pool = max_active * ADV_ACTIVE;
128 idle_pool = (mcnt - max_active) * ADV_IDLE;
130 /* Scale to bytes, considering worst-case truesize/msgsize ratio */
131 return (active_pool + idle_pool) * FLOWCTL_BLK_SZ * 4;
134 u16 tipc_group_bc_snd_nxt(struct tipc_group *grp)
136 return grp->bc_snd_nxt;
139 static bool tipc_group_is_receiver(struct tipc_member *m)
141 return m->state != MBR_QUARANTINED && m->state != MBR_LEAVING;
144 static bool tipc_group_is_sender(struct tipc_member *m)
146 return m && m->state >= MBR_JOINED;
149 u32 tipc_group_exclude(struct tipc_group *grp)
151 if (!grp->loopback)
152 return grp->portid;
153 return 0;
156 int tipc_group_size(struct tipc_group *grp)
158 return grp->member_cnt;
161 struct tipc_group *tipc_group_create(struct net *net, u32 portid,
162 struct tipc_group_req *mreq)
164 struct tipc_group *grp;
165 u32 type = mreq->type;
167 grp = kzalloc(sizeof(*grp), GFP_ATOMIC);
168 if (!grp)
169 return NULL;
170 tipc_nlist_init(&grp->dests, tipc_own_addr(net));
171 INIT_LIST_HEAD(&grp->small_win);
172 INIT_LIST_HEAD(&grp->active);
173 INIT_LIST_HEAD(&grp->pending);
174 grp->members = RB_ROOT;
175 grp->net = net;
176 grp->portid = portid;
177 grp->domain = addr_domain(net, mreq->scope);
178 grp->type = type;
179 grp->instance = mreq->instance;
180 grp->scope = mreq->scope;
181 grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
182 grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
183 if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, &grp->subid))
184 return grp;
185 kfree(grp);
186 return NULL;
189 void tipc_group_delete(struct net *net, struct tipc_group *grp)
191 struct rb_root *tree = &grp->members;
192 struct tipc_member *m, *tmp;
193 struct sk_buff_head xmitq;
195 __skb_queue_head_init(&xmitq);
197 rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
198 tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq);
199 list_del(&m->list);
200 kfree(m);
202 tipc_node_distr_xmit(net, &xmitq);
203 tipc_nlist_purge(&grp->dests);
204 tipc_topsrv_kern_unsubscr(net, grp->subid);
205 kfree(grp);
208 struct tipc_member *tipc_group_find_member(struct tipc_group *grp,
209 u32 node, u32 port)
211 struct rb_node *n = grp->members.rb_node;
212 u64 nkey, key = (u64)node << 32 | port;
213 struct tipc_member *m;
215 while (n) {
216 m = container_of(n, struct tipc_member, tree_node);
217 nkey = (u64)m->node << 32 | m->port;
218 if (key < nkey)
219 n = n->rb_left;
220 else if (key > nkey)
221 n = n->rb_right;
222 else
223 return m;
225 return NULL;
228 static struct tipc_member *tipc_group_find_dest(struct tipc_group *grp,
229 u32 node, u32 port)
231 struct tipc_member *m;
233 m = tipc_group_find_member(grp, node, port);
234 if (m && tipc_group_is_receiver(m))
235 return m;
236 return NULL;
239 static struct tipc_member *tipc_group_find_node(struct tipc_group *grp,
240 u32 node)
242 struct tipc_member *m;
243 struct rb_node *n;
245 for (n = rb_first(&grp->members); n; n = rb_next(n)) {
246 m = container_of(n, struct tipc_member, tree_node);
247 if (m->node == node)
248 return m;
250 return NULL;
253 static void tipc_group_add_to_tree(struct tipc_group *grp,
254 struct tipc_member *m)
256 u64 nkey, key = (u64)m->node << 32 | m->port;
257 struct rb_node **n, *parent = NULL;
258 struct tipc_member *tmp;
260 n = &grp->members.rb_node;
261 while (*n) {
262 tmp = container_of(*n, struct tipc_member, tree_node);
263 parent = *n;
264 tmp = container_of(parent, struct tipc_member, tree_node);
265 nkey = (u64)tmp->node << 32 | tmp->port;
266 if (key < nkey)
267 n = &(*n)->rb_left;
268 else if (key > nkey)
269 n = &(*n)->rb_right;
270 else
271 return;
273 rb_link_node(&m->tree_node, parent, n);
274 rb_insert_color(&m->tree_node, &grp->members);
277 static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
278 u32 node, u32 port,
279 int state)
281 struct tipc_member *m;
283 m = kzalloc(sizeof(*m), GFP_ATOMIC);
284 if (!m)
285 return NULL;
286 INIT_LIST_HEAD(&m->list);
287 INIT_LIST_HEAD(&m->small_win);
288 __skb_queue_head_init(&m->deferredq);
289 m->group = grp;
290 m->node = node;
291 m->port = port;
292 m->bc_acked = grp->bc_snd_nxt - 1;
293 grp->member_cnt++;
294 tipc_group_add_to_tree(grp, m);
295 tipc_nlist_add(&grp->dests, m->node);
296 m->state = state;
297 return m;
300 void tipc_group_add_member(struct tipc_group *grp, u32 node, u32 port)
302 tipc_group_create_member(grp, node, port, MBR_DISCOVERED);
305 static void tipc_group_delete_member(struct tipc_group *grp,
306 struct tipc_member *m)
308 rb_erase(&m->tree_node, &grp->members);
309 grp->member_cnt--;
311 /* Check if we were waiting for replicast ack from this member */
312 if (grp->bc_ackers && less(m->bc_acked, grp->bc_snd_nxt - 1))
313 grp->bc_ackers--;
315 list_del_init(&m->list);
316 list_del_init(&m->small_win);
317 tipc_group_decr_active(grp, m);
319 /* If last member on a node, remove node from dest list */
320 if (!tipc_group_find_node(grp, m->node))
321 tipc_nlist_del(&grp->dests, m->node);
323 kfree(m);
326 struct tipc_nlist *tipc_group_dests(struct tipc_group *grp)
328 return &grp->dests;
331 void tipc_group_self(struct tipc_group *grp, struct tipc_name_seq *seq,
332 int *scope)
334 seq->type = grp->type;
335 seq->lower = grp->instance;
336 seq->upper = grp->instance;
337 *scope = grp->scope;
340 void tipc_group_update_member(struct tipc_member *m, int len)
342 struct tipc_group *grp = m->group;
343 struct tipc_member *_m, *tmp;
345 if (!tipc_group_is_receiver(m))
346 return;
348 m->window -= len;
350 if (m->window >= ADV_IDLE)
351 return;
353 list_del_init(&m->small_win);
355 /* Sort member into small_window members' list */
356 list_for_each_entry_safe(_m, tmp, &grp->small_win, small_win) {
357 if (_m->window > m->window)
358 break;
360 list_add_tail(&m->small_win, &_m->small_win);
363 void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
365 u16 prev = grp->bc_snd_nxt - 1;
366 struct tipc_member *m;
367 struct rb_node *n;
368 u16 ackers = 0;
370 for (n = rb_first(&grp->members); n; n = rb_next(n)) {
371 m = container_of(n, struct tipc_member, tree_node);
372 if (tipc_group_is_receiver(m)) {
373 tipc_group_update_member(m, len);
374 m->bc_acked = prev;
375 ackers++;
379 /* Mark number of acknowledges to expect, if any */
380 if (ack)
381 grp->bc_ackers = ackers;
382 grp->bc_snd_nxt++;
385 bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
386 int len, struct tipc_member **mbr)
388 struct sk_buff_head xmitq;
389 struct tipc_member *m;
390 int adv, state;
392 m = tipc_group_find_dest(grp, dnode, dport);
393 *mbr = m;
394 if (!m)
395 return false;
396 if (m->usr_pending)
397 return true;
398 if (m->window >= len)
399 return false;
400 m->usr_pending = true;
402 /* If not fully advertised, do it now to prevent mutual blocking */
403 adv = m->advertised;
404 state = m->state;
405 if (state < MBR_JOINED)
406 return true;
407 if (state == MBR_JOINED && adv == ADV_IDLE)
408 return true;
409 if (state == MBR_ACTIVE && adv == ADV_ACTIVE)
410 return true;
411 if (state == MBR_PENDING && adv == ADV_IDLE)
412 return true;
413 skb_queue_head_init(&xmitq);
414 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, &xmitq);
415 tipc_node_distr_xmit(grp->net, &xmitq);
416 return true;
419 bool tipc_group_bc_cong(struct tipc_group *grp, int len)
421 struct tipc_member *m = NULL;
423 /* If prev bcast was replicast, reject until all receivers have acked */
424 if (grp->bc_ackers)
425 return true;
427 if (list_empty(&grp->small_win))
428 return false;
430 m = list_first_entry(&grp->small_win, struct tipc_member, small_win);
431 if (m->window >= len)
432 return false;
434 return tipc_group_cong(grp, m->node, m->port, len, &m);
437 /* tipc_group_sort_msg() - sort msg into queue by bcast sequence number
439 static void tipc_group_sort_msg(struct sk_buff *skb, struct sk_buff_head *defq)
441 struct tipc_msg *_hdr, *hdr = buf_msg(skb);
442 u16 bc_seqno = msg_grp_bc_seqno(hdr);
443 struct sk_buff *_skb, *tmp;
444 int mtyp = msg_type(hdr);
446 /* Bcast/mcast may be bypassed by ucast or other bcast, - sort it in */
447 if (mtyp == TIPC_GRP_BCAST_MSG || mtyp == TIPC_GRP_MCAST_MSG) {
448 skb_queue_walk_safe(defq, _skb, tmp) {
449 _hdr = buf_msg(_skb);
450 if (!less(bc_seqno, msg_grp_bc_seqno(_hdr)))
451 continue;
452 __skb_queue_before(defq, _skb, skb);
453 return;
455 /* Bcast was not bypassed, - add to tail */
457 /* Unicasts are never bypassed, - always add to tail */
458 __skb_queue_tail(defq, skb);
461 /* tipc_group_filter_msg() - determine if we should accept arriving message
463 void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
464 struct sk_buff_head *xmitq)
466 struct sk_buff *skb = __skb_dequeue(inputq);
467 bool ack, deliver, update, leave = false;
468 struct sk_buff_head *defq;
469 struct tipc_member *m;
470 struct tipc_msg *hdr;
471 u32 node, port;
472 int mtyp, blks;
474 if (!skb)
475 return;
477 hdr = buf_msg(skb);
478 node = msg_orignode(hdr);
479 port = msg_origport(hdr);
481 if (!msg_in_group(hdr))
482 goto drop;
484 m = tipc_group_find_member(grp, node, port);
485 if (!tipc_group_is_sender(m))
486 goto drop;
488 if (less(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
489 goto drop;
491 TIPC_SKB_CB(skb)->orig_member = m->instance;
492 defq = &m->deferredq;
493 tipc_group_sort_msg(skb, defq);
495 while ((skb = skb_peek(defq))) {
496 hdr = buf_msg(skb);
497 mtyp = msg_type(hdr);
498 blks = msg_blocks(hdr);
499 deliver = true;
500 ack = false;
501 update = false;
503 if (more(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
504 break;
506 /* Decide what to do with message */
507 switch (mtyp) {
508 case TIPC_GRP_MCAST_MSG:
509 if (msg_nameinst(hdr) != grp->instance) {
510 update = true;
511 deliver = false;
513 /* Fall thru */
514 case TIPC_GRP_BCAST_MSG:
515 m->bc_rcv_nxt++;
516 ack = msg_grp_bc_ack_req(hdr);
517 break;
518 case TIPC_GRP_UCAST_MSG:
519 break;
520 case TIPC_GRP_MEMBER_EVT:
521 if (m->state == MBR_LEAVING)
522 leave = true;
523 if (!grp->events)
524 deliver = false;
525 break;
526 default:
527 break;
530 /* Execute decisions */
531 __skb_dequeue(defq);
532 if (deliver)
533 __skb_queue_tail(inputq, skb);
534 else
535 kfree_skb(skb);
537 if (ack)
538 tipc_group_proto_xmit(grp, m, GRP_ACK_MSG, xmitq);
540 if (leave) {
541 __skb_queue_purge(defq);
542 tipc_group_delete_member(grp, m);
543 break;
545 if (!update)
546 continue;
548 tipc_group_update_rcv_win(grp, blks, node, port, xmitq);
550 return;
551 drop:
552 kfree_skb(skb);
555 void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
556 u32 port, struct sk_buff_head *xmitq)
558 struct list_head *active = &grp->active;
559 int max_active = grp->max_active;
560 int reclaim_limit = max_active * 3 / 4;
561 int active_cnt = grp->active_cnt;
562 struct tipc_member *m, *rm, *pm;
564 m = tipc_group_find_member(grp, node, port);
565 if (!m)
566 return;
568 m->advertised -= blks;
570 switch (m->state) {
571 case MBR_JOINED:
572 /* First, decide if member can go active */
573 if (active_cnt <= max_active) {
574 m->state = MBR_ACTIVE;
575 list_add_tail(&m->list, active);
576 grp->active_cnt++;
577 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
578 } else {
579 m->state = MBR_PENDING;
580 list_add_tail(&m->list, &grp->pending);
583 if (active_cnt < reclaim_limit)
584 break;
586 /* Reclaim from oldest active member, if possible */
587 if (!list_empty(active)) {
588 rm = list_first_entry(active, struct tipc_member, list);
589 rm->state = MBR_RECLAIMING;
590 list_del_init(&rm->list);
591 tipc_group_proto_xmit(grp, rm, GRP_RECLAIM_MSG, xmitq);
592 break;
594 /* Nobody to reclaim from; - revert oldest pending to JOINED */
595 pm = list_first_entry(&grp->pending, struct tipc_member, list);
596 list_del_init(&pm->list);
597 pm->state = MBR_JOINED;
598 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
599 break;
600 case MBR_ACTIVE:
601 if (!list_is_last(&m->list, &grp->active))
602 list_move_tail(&m->list, &grp->active);
603 if (m->advertised > (ADV_ACTIVE * 3 / 4))
604 break;
605 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
606 break;
607 case MBR_REMITTED:
608 if (m->advertised > ADV_IDLE)
609 break;
610 m->state = MBR_JOINED;
611 grp->active_cnt--;
612 if (m->advertised < ADV_IDLE) {
613 pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
614 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
617 if (list_empty(&grp->pending))
618 return;
620 /* Set oldest pending member to active and advertise */
621 pm = list_first_entry(&grp->pending, struct tipc_member, list);
622 pm->state = MBR_ACTIVE;
623 list_move_tail(&pm->list, &grp->active);
624 grp->active_cnt++;
625 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
626 break;
627 case MBR_RECLAIMING:
628 case MBR_DISCOVERED:
629 case MBR_JOINING:
630 case MBR_LEAVING:
631 default:
632 break;
636 static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
637 int mtyp, struct sk_buff_head *xmitq)
639 struct tipc_msg *hdr;
640 struct sk_buff *skb;
641 int adv = 0;
643 skb = tipc_msg_create(GROUP_PROTOCOL, mtyp, INT_H_SIZE, 0,
644 m->node, tipc_own_addr(grp->net),
645 m->port, grp->portid, 0);
646 if (!skb)
647 return;
649 if (m->state == MBR_ACTIVE)
650 adv = ADV_ACTIVE - m->advertised;
651 else if (m->state == MBR_JOINED || m->state == MBR_PENDING)
652 adv = ADV_IDLE - m->advertised;
654 hdr = buf_msg(skb);
656 if (mtyp == GRP_JOIN_MSG) {
657 msg_set_grp_bc_syncpt(hdr, grp->bc_snd_nxt);
658 msg_set_adv_win(hdr, adv);
659 m->advertised += adv;
660 } else if (mtyp == GRP_LEAVE_MSG) {
661 msg_set_grp_bc_syncpt(hdr, grp->bc_snd_nxt);
662 } else if (mtyp == GRP_ADV_MSG) {
663 msg_set_adv_win(hdr, adv);
664 m->advertised += adv;
665 } else if (mtyp == GRP_ACK_MSG) {
666 msg_set_grp_bc_acked(hdr, m->bc_rcv_nxt);
667 } else if (mtyp == GRP_REMIT_MSG) {
668 msg_set_grp_remitted(hdr, m->window);
670 msg_set_dest_droppable(hdr, true);
671 __skb_queue_tail(xmitq, skb);
674 void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
675 struct tipc_msg *hdr, struct sk_buff_head *inputq,
676 struct sk_buff_head *xmitq)
678 u32 node = msg_orignode(hdr);
679 u32 port = msg_origport(hdr);
680 struct tipc_member *m, *pm;
681 struct tipc_msg *ehdr;
682 u16 remitted, in_flight;
684 if (!grp)
685 return;
687 m = tipc_group_find_member(grp, node, port);
689 switch (msg_type(hdr)) {
690 case GRP_JOIN_MSG:
691 if (!m)
692 m = tipc_group_create_member(grp, node, port,
693 MBR_QUARANTINED);
694 if (!m)
695 return;
696 m->bc_syncpt = msg_grp_bc_syncpt(hdr);
697 m->bc_rcv_nxt = m->bc_syncpt;
698 m->window += msg_adv_win(hdr);
700 /* Wait until PUBLISH event is received */
701 if (m->state == MBR_DISCOVERED) {
702 m->state = MBR_JOINING;
703 } else if (m->state == MBR_PUBLISHED) {
704 m->state = MBR_JOINED;
705 *usr_wakeup = true;
706 m->usr_pending = false;
707 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
708 ehdr = buf_msg(m->event_msg);
709 msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
710 __skb_queue_tail(inputq, m->event_msg);
712 list_del_init(&m->small_win);
713 tipc_group_update_member(m, 0);
714 return;
715 case GRP_LEAVE_MSG:
716 if (!m)
717 return;
718 m->bc_syncpt = msg_grp_bc_syncpt(hdr);
719 list_del_init(&m->list);
720 list_del_init(&m->small_win);
721 *usr_wakeup = true;
723 /* Wait until WITHDRAW event is received */
724 if (m->state != MBR_LEAVING) {
725 tipc_group_decr_active(grp, m);
726 m->state = MBR_LEAVING;
727 return;
729 /* Otherwise deliver already received WITHDRAW event */
730 ehdr = buf_msg(m->event_msg);
731 msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
732 __skb_queue_tail(inputq, m->event_msg);
733 return;
734 case GRP_ADV_MSG:
735 if (!m)
736 return;
737 m->window += msg_adv_win(hdr);
738 *usr_wakeup = m->usr_pending;
739 m->usr_pending = false;
740 list_del_init(&m->small_win);
741 return;
742 case GRP_ACK_MSG:
743 if (!m)
744 return;
745 m->bc_acked = msg_grp_bc_acked(hdr);
746 if (--grp->bc_ackers)
747 break;
748 *usr_wakeup = true;
749 m->usr_pending = false;
750 return;
751 case GRP_RECLAIM_MSG:
752 if (!m)
753 return;
754 *usr_wakeup = m->usr_pending;
755 m->usr_pending = false;
756 tipc_group_proto_xmit(grp, m, GRP_REMIT_MSG, xmitq);
757 m->window = ADV_IDLE;
758 return;
759 case GRP_REMIT_MSG:
760 if (!m || m->state != MBR_RECLAIMING)
761 return;
763 remitted = msg_grp_remitted(hdr);
765 /* Messages preceding the REMIT still in receive queue */
766 if (m->advertised > remitted) {
767 m->state = MBR_REMITTED;
768 in_flight = m->advertised - remitted;
769 m->advertised = ADV_IDLE + in_flight;
770 return;
772 /* This should never happen */
773 if (m->advertised < remitted)
774 pr_warn_ratelimited("Unexpected REMIT msg\n");
776 /* All messages preceding the REMIT have been read */
777 m->state = MBR_JOINED;
778 grp->active_cnt--;
779 m->advertised = ADV_IDLE;
781 /* Set oldest pending member to active and advertise */
782 if (list_empty(&grp->pending))
783 return;
784 pm = list_first_entry(&grp->pending, struct tipc_member, list);
785 pm->state = MBR_ACTIVE;
786 list_move_tail(&pm->list, &grp->active);
787 grp->active_cnt++;
788 if (pm->advertised <= (ADV_ACTIVE * 3 / 4))
789 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
790 return;
791 default:
792 pr_warn("Received unknown GROUP_PROTO message\n");
796 /* tipc_group_member_evt() - receive and handle a member up/down event
798 void tipc_group_member_evt(struct tipc_group *grp,
799 bool *usr_wakeup,
800 int *sk_rcvbuf,
801 struct sk_buff *skb,
802 struct sk_buff_head *inputq,
803 struct sk_buff_head *xmitq)
805 struct tipc_msg *hdr = buf_msg(skb);
806 struct tipc_event *evt = (void *)msg_data(hdr);
807 u32 instance = evt->found_lower;
808 u32 node = evt->port.node;
809 u32 port = evt->port.ref;
810 int event = evt->event;
811 struct tipc_member *m;
812 struct net *net;
813 bool node_up;
814 u32 self;
816 if (!grp)
817 goto drop;
819 net = grp->net;
820 self = tipc_own_addr(net);
821 if (!grp->loopback && node == self && port == grp->portid)
822 goto drop;
824 /* Convert message before delivery to user */
825 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
826 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
827 msg_set_type(hdr, TIPC_GRP_MEMBER_EVT);
828 msg_set_origport(hdr, port);
829 msg_set_orignode(hdr, node);
830 msg_set_nametype(hdr, grp->type);
831 msg_set_grp_evt(hdr, event);
833 m = tipc_group_find_member(grp, node, port);
835 if (event == TIPC_PUBLISHED) {
836 if (!m)
837 m = tipc_group_create_member(grp, node, port,
838 MBR_DISCOVERED);
839 if (!m)
840 goto drop;
842 /* Hold back event if JOIN message not yet received */
843 if (m->state == MBR_DISCOVERED) {
844 m->event_msg = skb;
845 m->state = MBR_PUBLISHED;
846 } else {
847 msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
848 __skb_queue_tail(inputq, skb);
849 m->state = MBR_JOINED;
850 *usr_wakeup = true;
851 m->usr_pending = false;
853 m->instance = instance;
854 TIPC_SKB_CB(skb)->orig_member = m->instance;
855 tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
856 tipc_group_update_member(m, 0);
857 } else if (event == TIPC_WITHDRAWN) {
858 if (!m)
859 goto drop;
861 TIPC_SKB_CB(skb)->orig_member = m->instance;
863 *usr_wakeup = true;
864 m->usr_pending = false;
865 node_up = tipc_node_is_up(net, node);
866 m->event_msg = NULL;
868 if (node_up) {
869 /* Hold back event if a LEAVE msg should be expected */
870 if (m->state != MBR_LEAVING) {
871 m->event_msg = skb;
872 tipc_group_decr_active(grp, m);
873 m->state = MBR_LEAVING;
874 } else {
875 msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
876 __skb_queue_tail(inputq, skb);
878 } else {
879 if (m->state != MBR_LEAVING) {
880 tipc_group_decr_active(grp, m);
881 m->state = MBR_LEAVING;
882 msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
883 } else {
884 msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
886 __skb_queue_tail(inputq, skb);
888 list_del_init(&m->list);
889 list_del_init(&m->small_win);
891 *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
892 return;
893 drop:
894 kfree_skb(skb);