dm thin: wake worker when discard is prepared
[linux-2.6.git] / net / tipc / link.c
bloba79c755cb41714bf40c66de615ce6d0cc737cb3b
1 /*
2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include "core.h"
38 #include "link.h"
39 #include "port.h"
40 #include "name_distr.h"
41 #include "discover.h"
42 #include "config.h"
45 * Error message prefixes
47 static const char *link_co_err = "Link changeover error, ";
48 static const char *link_rst_msg = "Resetting link ";
49 static const char *link_unk_evt = "Unknown link event ";
52 * Out-of-range value for link session numbers
54 #define INVALID_SESSION 0x10000
57 * Link state events:
59 #define STARTING_EVT 856384768 /* link processing trigger */
60 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
61 #define TIMEOUT_EVT 560817u /* link timer expired */
64 * The following two 'message types' is really just implementation
65 * data conveniently stored in the message header.
66 * They must not be considered part of the protocol
68 #define OPEN_MSG 0
69 #define CLOSED_MSG 1
72 * State value stored in 'exp_msg_count'
74 #define START_CHANGEOVER 100000u
76 /**
77 * struct tipc_link_name - deconstructed link name
78 * @addr_local: network address of node at this end
79 * @if_local: name of interface at this end
80 * @addr_peer: network address of node at far end
81 * @if_peer: name of interface at far end
83 struct tipc_link_name {
84 u32 addr_local;
85 char if_local[TIPC_MAX_IF_NAME];
86 u32 addr_peer;
87 char if_peer[TIPC_MAX_IF_NAME];
90 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
91 struct sk_buff *buf);
92 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
93 static int link_recv_changeover_msg(struct tipc_link **l_ptr,
94 struct sk_buff **buf);
95 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
96 static int link_send_sections_long(struct tipc_port *sender,
97 struct iovec const *msg_sect,
98 u32 num_sect, unsigned int total_len,
99 u32 destnode);
100 static void link_check_defragm_bufs(struct tipc_link *l_ptr);
101 static void link_state_event(struct tipc_link *l_ptr, u32 event);
102 static void link_reset_statistics(struct tipc_link *l_ptr);
103 static void link_print(struct tipc_link *l_ptr, const char *str);
104 static void link_start(struct tipc_link *l_ptr);
105 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
108 * Simple link routines
110 static unsigned int align(unsigned int i)
112 return (i + 3) & ~3u;
115 static void link_init_max_pkt(struct tipc_link *l_ptr)
117 u32 max_pkt;
119 max_pkt = (l_ptr->b_ptr->mtu & ~3);
120 if (max_pkt > MAX_MSG_SIZE)
121 max_pkt = MAX_MSG_SIZE;
123 l_ptr->max_pkt_target = max_pkt;
124 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
125 l_ptr->max_pkt = l_ptr->max_pkt_target;
126 else
127 l_ptr->max_pkt = MAX_PKT_DEFAULT;
129 l_ptr->max_pkt_probes = 0;
132 static u32 link_next_sent(struct tipc_link *l_ptr)
134 if (l_ptr->next_out)
135 return buf_seqno(l_ptr->next_out);
136 return mod(l_ptr->next_out_no);
139 static u32 link_last_sent(struct tipc_link *l_ptr)
141 return mod(link_next_sent(l_ptr) - 1);
145 * Simple non-static link routines (i.e. referenced outside this file)
147 int tipc_link_is_up(struct tipc_link *l_ptr)
149 if (!l_ptr)
150 return 0;
151 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
154 int tipc_link_is_active(struct tipc_link *l_ptr)
156 return (l_ptr->owner->active_links[0] == l_ptr) ||
157 (l_ptr->owner->active_links[1] == l_ptr);
161 * link_name_validate - validate & (optionally) deconstruct tipc_link name
162 * @name: ptr to link name string
163 * @name_parts: ptr to area for link name components (or NULL if not needed)
165 * Returns 1 if link name is valid, otherwise 0.
167 static int link_name_validate(const char *name,
168 struct tipc_link_name *name_parts)
170 char name_copy[TIPC_MAX_LINK_NAME];
171 char *addr_local;
172 char *if_local;
173 char *addr_peer;
174 char *if_peer;
175 char dummy;
176 u32 z_local, c_local, n_local;
177 u32 z_peer, c_peer, n_peer;
178 u32 if_local_len;
179 u32 if_peer_len;
181 /* copy link name & ensure length is OK */
182 name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
183 /* need above in case non-Posix strncpy() doesn't pad with nulls */
184 strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
185 if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
186 return 0;
188 /* ensure all component parts of link name are present */
189 addr_local = name_copy;
190 if_local = strchr(addr_local, ':');
191 if (if_local == NULL)
192 return 0;
193 *(if_local++) = 0;
194 addr_peer = strchr(if_local, '-');
195 if (addr_peer == NULL)
196 return 0;
197 *(addr_peer++) = 0;
198 if_local_len = addr_peer - if_local;
199 if_peer = strchr(addr_peer, ':');
200 if (if_peer == NULL)
201 return 0;
202 *(if_peer++) = 0;
203 if_peer_len = strlen(if_peer) + 1;
205 /* validate component parts of link name */
206 if ((sscanf(addr_local, "%u.%u.%u%c",
207 &z_local, &c_local, &n_local, &dummy) != 3) ||
208 (sscanf(addr_peer, "%u.%u.%u%c",
209 &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
210 (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
211 (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) ||
212 (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
213 (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME))
214 return 0;
216 /* return link name components, if necessary */
217 if (name_parts) {
218 name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
219 strcpy(name_parts->if_local, if_local);
220 name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
221 strcpy(name_parts->if_peer, if_peer);
223 return 1;
227 * link_timeout - handle expiration of link timer
228 * @l_ptr: pointer to link
230 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
231 * with tipc_link_delete(). (There is no risk that the node will be deleted by
232 * another thread because tipc_link_delete() always cancels the link timer before
233 * tipc_node_delete() is called.)
235 static void link_timeout(struct tipc_link *l_ptr)
237 tipc_node_lock(l_ptr->owner);
239 /* update counters used in statistical profiling of send traffic */
240 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
241 l_ptr->stats.queue_sz_counts++;
243 if (l_ptr->first_out) {
244 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
245 u32 length = msg_size(msg);
247 if ((msg_user(msg) == MSG_FRAGMENTER) &&
248 (msg_type(msg) == FIRST_FRAGMENT)) {
249 length = msg_size(msg_get_wrapped(msg));
251 if (length) {
252 l_ptr->stats.msg_lengths_total += length;
253 l_ptr->stats.msg_length_counts++;
254 if (length <= 64)
255 l_ptr->stats.msg_length_profile[0]++;
256 else if (length <= 256)
257 l_ptr->stats.msg_length_profile[1]++;
258 else if (length <= 1024)
259 l_ptr->stats.msg_length_profile[2]++;
260 else if (length <= 4096)
261 l_ptr->stats.msg_length_profile[3]++;
262 else if (length <= 16384)
263 l_ptr->stats.msg_length_profile[4]++;
264 else if (length <= 32768)
265 l_ptr->stats.msg_length_profile[5]++;
266 else
267 l_ptr->stats.msg_length_profile[6]++;
271 /* do all other link processing performed on a periodic basis */
272 link_check_defragm_bufs(l_ptr);
274 link_state_event(l_ptr, TIMEOUT_EVT);
276 if (l_ptr->next_out)
277 tipc_link_push_queue(l_ptr);
279 tipc_node_unlock(l_ptr->owner);
282 static void link_set_timer(struct tipc_link *l_ptr, u32 time)
284 k_start_timer(&l_ptr->timer, time);
288 * tipc_link_create - create a new link
289 * @n_ptr: pointer to associated node
290 * @b_ptr: pointer to associated bearer
291 * @media_addr: media address to use when sending messages over link
293 * Returns pointer to link.
295 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
296 struct tipc_bearer *b_ptr,
297 const struct tipc_media_addr *media_addr)
299 struct tipc_link *l_ptr;
300 struct tipc_msg *msg;
301 char *if_name;
302 char addr_string[16];
303 u32 peer = n_ptr->addr;
305 if (n_ptr->link_cnt >= 2) {
306 tipc_addr_string_fill(addr_string, n_ptr->addr);
307 pr_err("Attempt to establish third link to %s\n", addr_string);
308 return NULL;
311 if (n_ptr->links[b_ptr->identity]) {
312 tipc_addr_string_fill(addr_string, n_ptr->addr);
313 pr_err("Attempt to establish second link on <%s> to %s\n",
314 b_ptr->name, addr_string);
315 return NULL;
318 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
319 if (!l_ptr) {
320 pr_warn("Link creation failed, no memory\n");
321 return NULL;
324 l_ptr->addr = peer;
325 if_name = strchr(b_ptr->name, ':') + 1;
326 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
327 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
328 tipc_node(tipc_own_addr),
329 if_name,
330 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
331 /* note: peer i/f name is updated by reset/activate message */
332 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
333 l_ptr->owner = n_ptr;
334 l_ptr->checkpoint = 1;
335 l_ptr->peer_session = INVALID_SESSION;
336 l_ptr->b_ptr = b_ptr;
337 link_set_supervision_props(l_ptr, b_ptr->tolerance);
338 l_ptr->state = RESET_UNKNOWN;
340 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
341 msg = l_ptr->pmsg;
342 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
343 msg_set_size(msg, sizeof(l_ptr->proto_msg));
344 msg_set_session(msg, (tipc_random & 0xffff));
345 msg_set_bearer_id(msg, b_ptr->identity);
346 strcpy((char *)msg_data(msg), if_name);
348 l_ptr->priority = b_ptr->priority;
349 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
351 link_init_max_pkt(l_ptr);
353 l_ptr->next_out_no = 1;
354 INIT_LIST_HEAD(&l_ptr->waiting_ports);
356 link_reset_statistics(l_ptr);
358 tipc_node_attach_link(n_ptr, l_ptr);
360 k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
361 list_add_tail(&l_ptr->link_list, &b_ptr->links);
362 tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
364 return l_ptr;
368 * tipc_link_delete - delete a link
369 * @l_ptr: pointer to link
371 * Note: 'tipc_net_lock' is write_locked, bearer is locked.
372 * This routine must not grab the node lock until after link timer cancellation
373 * to avoid a potential deadlock situation.
375 void tipc_link_delete(struct tipc_link *l_ptr)
377 if (!l_ptr) {
378 pr_err("Attempt to delete non-existent link\n");
379 return;
382 k_cancel_timer(&l_ptr->timer);
384 tipc_node_lock(l_ptr->owner);
385 tipc_link_reset(l_ptr);
386 tipc_node_detach_link(l_ptr->owner, l_ptr);
387 tipc_link_stop(l_ptr);
388 list_del_init(&l_ptr->link_list);
389 tipc_node_unlock(l_ptr->owner);
390 k_term_timer(&l_ptr->timer);
391 kfree(l_ptr);
394 static void link_start(struct tipc_link *l_ptr)
396 tipc_node_lock(l_ptr->owner);
397 link_state_event(l_ptr, STARTING_EVT);
398 tipc_node_unlock(l_ptr->owner);
402 * link_schedule_port - schedule port for deferred sending
403 * @l_ptr: pointer to link
404 * @origport: reference to sending port
405 * @sz: amount of data to be sent
407 * Schedules port for renewed sending of messages after link congestion
408 * has abated.
410 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
412 struct tipc_port *p_ptr;
414 spin_lock_bh(&tipc_port_list_lock);
415 p_ptr = tipc_port_lock(origport);
416 if (p_ptr) {
417 if (!p_ptr->wakeup)
418 goto exit;
419 if (!list_empty(&p_ptr->wait_list))
420 goto exit;
421 p_ptr->congested = 1;
422 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
423 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
424 l_ptr->stats.link_congs++;
425 exit:
426 tipc_port_unlock(p_ptr);
428 spin_unlock_bh(&tipc_port_list_lock);
429 return -ELINKCONG;
432 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
434 struct tipc_port *p_ptr;
435 struct tipc_port *temp_p_ptr;
436 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
438 if (all)
439 win = 100000;
440 if (win <= 0)
441 return;
442 if (!spin_trylock_bh(&tipc_port_list_lock))
443 return;
444 if (link_congested(l_ptr))
445 goto exit;
446 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
447 wait_list) {
448 if (win <= 0)
449 break;
450 list_del_init(&p_ptr->wait_list);
451 spin_lock_bh(p_ptr->lock);
452 p_ptr->congested = 0;
453 p_ptr->wakeup(p_ptr);
454 win -= p_ptr->waiting_pkts;
455 spin_unlock_bh(p_ptr->lock);
458 exit:
459 spin_unlock_bh(&tipc_port_list_lock);
463 * link_release_outqueue - purge link's outbound message queue
464 * @l_ptr: pointer to link
466 static void link_release_outqueue(struct tipc_link *l_ptr)
468 struct sk_buff *buf = l_ptr->first_out;
469 struct sk_buff *next;
471 while (buf) {
472 next = buf->next;
473 kfree_skb(buf);
474 buf = next;
476 l_ptr->first_out = NULL;
477 l_ptr->out_queue_size = 0;
481 * tipc_link_reset_fragments - purge link's inbound message fragments queue
482 * @l_ptr: pointer to link
484 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
486 struct sk_buff *buf = l_ptr->defragm_buf;
487 struct sk_buff *next;
489 while (buf) {
490 next = buf->next;
491 kfree_skb(buf);
492 buf = next;
494 l_ptr->defragm_buf = NULL;
498 * tipc_link_stop - purge all inbound and outbound messages associated with link
499 * @l_ptr: pointer to link
501 void tipc_link_stop(struct tipc_link *l_ptr)
503 struct sk_buff *buf;
504 struct sk_buff *next;
506 buf = l_ptr->oldest_deferred_in;
507 while (buf) {
508 next = buf->next;
509 kfree_skb(buf);
510 buf = next;
513 buf = l_ptr->first_out;
514 while (buf) {
515 next = buf->next;
516 kfree_skb(buf);
517 buf = next;
520 tipc_link_reset_fragments(l_ptr);
522 kfree_skb(l_ptr->proto_msg_queue);
523 l_ptr->proto_msg_queue = NULL;
526 void tipc_link_reset(struct tipc_link *l_ptr)
528 struct sk_buff *buf;
529 u32 prev_state = l_ptr->state;
530 u32 checkpoint = l_ptr->next_in_no;
531 int was_active_link = tipc_link_is_active(l_ptr);
533 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
535 /* Link is down, accept any session */
536 l_ptr->peer_session = INVALID_SESSION;
538 /* Prepare for max packet size negotiation */
539 link_init_max_pkt(l_ptr);
541 l_ptr->state = RESET_UNKNOWN;
543 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
544 return;
546 tipc_node_link_down(l_ptr->owner, l_ptr);
547 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
549 if (was_active_link && tipc_node_active_links(l_ptr->owner) &&
550 l_ptr->owner->permit_changeover) {
551 l_ptr->reset_checkpoint = checkpoint;
552 l_ptr->exp_msg_count = START_CHANGEOVER;
555 /* Clean up all queues: */
556 link_release_outqueue(l_ptr);
557 kfree_skb(l_ptr->proto_msg_queue);
558 l_ptr->proto_msg_queue = NULL;
559 buf = l_ptr->oldest_deferred_in;
560 while (buf) {
561 struct sk_buff *next = buf->next;
562 kfree_skb(buf);
563 buf = next;
565 if (!list_empty(&l_ptr->waiting_ports))
566 tipc_link_wakeup_ports(l_ptr, 1);
568 l_ptr->retransm_queue_head = 0;
569 l_ptr->retransm_queue_size = 0;
570 l_ptr->last_out = NULL;
571 l_ptr->first_out = NULL;
572 l_ptr->next_out = NULL;
573 l_ptr->unacked_window = 0;
574 l_ptr->checkpoint = 1;
575 l_ptr->next_out_no = 1;
576 l_ptr->deferred_inqueue_sz = 0;
577 l_ptr->oldest_deferred_in = NULL;
578 l_ptr->newest_deferred_in = NULL;
579 l_ptr->fsm_msg_cnt = 0;
580 l_ptr->stale_count = 0;
581 link_reset_statistics(l_ptr);
585 static void link_activate(struct tipc_link *l_ptr)
587 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
588 tipc_node_link_up(l_ptr->owner, l_ptr);
589 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
593 * link_state_event - link finite state machine
594 * @l_ptr: pointer to link
595 * @event: state machine event to process
597 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
599 struct tipc_link *other;
600 u32 cont_intv = l_ptr->continuity_interval;
602 if (!l_ptr->started && (event != STARTING_EVT))
603 return; /* Not yet. */
605 if (link_blocked(l_ptr)) {
606 if (event == TIMEOUT_EVT)
607 link_set_timer(l_ptr, cont_intv);
608 return; /* Changeover going on */
611 switch (l_ptr->state) {
612 case WORKING_WORKING:
613 switch (event) {
614 case TRAFFIC_MSG_EVT:
615 case ACTIVATE_MSG:
616 break;
617 case TIMEOUT_EVT:
618 if (l_ptr->next_in_no != l_ptr->checkpoint) {
619 l_ptr->checkpoint = l_ptr->next_in_no;
620 if (tipc_bclink_acks_missing(l_ptr->owner)) {
621 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
622 0, 0, 0, 0, 0);
623 l_ptr->fsm_msg_cnt++;
624 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
625 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
626 1, 0, 0, 0, 0);
627 l_ptr->fsm_msg_cnt++;
629 link_set_timer(l_ptr, cont_intv);
630 break;
632 l_ptr->state = WORKING_UNKNOWN;
633 l_ptr->fsm_msg_cnt = 0;
634 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
635 l_ptr->fsm_msg_cnt++;
636 link_set_timer(l_ptr, cont_intv / 4);
637 break;
638 case RESET_MSG:
639 pr_info("%s<%s>, requested by peer\n", link_rst_msg,
640 l_ptr->name);
641 tipc_link_reset(l_ptr);
642 l_ptr->state = RESET_RESET;
643 l_ptr->fsm_msg_cnt = 0;
644 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
645 l_ptr->fsm_msg_cnt++;
646 link_set_timer(l_ptr, cont_intv);
647 break;
648 default:
649 pr_err("%s%u in WW state\n", link_unk_evt, event);
651 break;
652 case WORKING_UNKNOWN:
653 switch (event) {
654 case TRAFFIC_MSG_EVT:
655 case ACTIVATE_MSG:
656 l_ptr->state = WORKING_WORKING;
657 l_ptr->fsm_msg_cnt = 0;
658 link_set_timer(l_ptr, cont_intv);
659 break;
660 case RESET_MSG:
661 pr_info("%s<%s>, requested by peer while probing\n",
662 link_rst_msg, l_ptr->name);
663 tipc_link_reset(l_ptr);
664 l_ptr->state = RESET_RESET;
665 l_ptr->fsm_msg_cnt = 0;
666 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
667 l_ptr->fsm_msg_cnt++;
668 link_set_timer(l_ptr, cont_intv);
669 break;
670 case TIMEOUT_EVT:
671 if (l_ptr->next_in_no != l_ptr->checkpoint) {
672 l_ptr->state = WORKING_WORKING;
673 l_ptr->fsm_msg_cnt = 0;
674 l_ptr->checkpoint = l_ptr->next_in_no;
675 if (tipc_bclink_acks_missing(l_ptr->owner)) {
676 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
677 0, 0, 0, 0, 0);
678 l_ptr->fsm_msg_cnt++;
680 link_set_timer(l_ptr, cont_intv);
681 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
682 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
683 1, 0, 0, 0, 0);
684 l_ptr->fsm_msg_cnt++;
685 link_set_timer(l_ptr, cont_intv / 4);
686 } else { /* Link has failed */
687 pr_warn("%s<%s>, peer not responding\n",
688 link_rst_msg, l_ptr->name);
689 tipc_link_reset(l_ptr);
690 l_ptr->state = RESET_UNKNOWN;
691 l_ptr->fsm_msg_cnt = 0;
692 tipc_link_send_proto_msg(l_ptr, RESET_MSG,
693 0, 0, 0, 0, 0);
694 l_ptr->fsm_msg_cnt++;
695 link_set_timer(l_ptr, cont_intv);
697 break;
698 default:
699 pr_err("%s%u in WU state\n", link_unk_evt, event);
701 break;
702 case RESET_UNKNOWN:
703 switch (event) {
704 case TRAFFIC_MSG_EVT:
705 break;
706 case ACTIVATE_MSG:
707 other = l_ptr->owner->active_links[0];
708 if (other && link_working_unknown(other))
709 break;
710 l_ptr->state = WORKING_WORKING;
711 l_ptr->fsm_msg_cnt = 0;
712 link_activate(l_ptr);
713 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
714 l_ptr->fsm_msg_cnt++;
715 link_set_timer(l_ptr, cont_intv);
716 break;
717 case RESET_MSG:
718 l_ptr->state = RESET_RESET;
719 l_ptr->fsm_msg_cnt = 0;
720 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
721 l_ptr->fsm_msg_cnt++;
722 link_set_timer(l_ptr, cont_intv);
723 break;
724 case STARTING_EVT:
725 l_ptr->started = 1;
726 /* fall through */
727 case TIMEOUT_EVT:
728 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
729 l_ptr->fsm_msg_cnt++;
730 link_set_timer(l_ptr, cont_intv);
731 break;
732 default:
733 pr_err("%s%u in RU state\n", link_unk_evt, event);
735 break;
736 case RESET_RESET:
737 switch (event) {
738 case TRAFFIC_MSG_EVT:
739 case ACTIVATE_MSG:
740 other = l_ptr->owner->active_links[0];
741 if (other && link_working_unknown(other))
742 break;
743 l_ptr->state = WORKING_WORKING;
744 l_ptr->fsm_msg_cnt = 0;
745 link_activate(l_ptr);
746 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
747 l_ptr->fsm_msg_cnt++;
748 link_set_timer(l_ptr, cont_intv);
749 break;
750 case RESET_MSG:
751 break;
752 case TIMEOUT_EVT:
753 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
754 l_ptr->fsm_msg_cnt++;
755 link_set_timer(l_ptr, cont_intv);
756 break;
757 default:
758 pr_err("%s%u in RR state\n", link_unk_evt, event);
760 break;
761 default:
762 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
767 * link_bundle_buf(): Append contents of a buffer to
768 * the tail of an existing one.
770 static int link_bundle_buf(struct tipc_link *l_ptr,
771 struct sk_buff *bundler,
772 struct sk_buff *buf)
774 struct tipc_msg *bundler_msg = buf_msg(bundler);
775 struct tipc_msg *msg = buf_msg(buf);
776 u32 size = msg_size(msg);
777 u32 bundle_size = msg_size(bundler_msg);
778 u32 to_pos = align(bundle_size);
779 u32 pad = to_pos - bundle_size;
781 if (msg_user(bundler_msg) != MSG_BUNDLER)
782 return 0;
783 if (msg_type(bundler_msg) != OPEN_MSG)
784 return 0;
785 if (skb_tailroom(bundler) < (pad + size))
786 return 0;
787 if (l_ptr->max_pkt < (to_pos + size))
788 return 0;
790 skb_put(bundler, pad + size);
791 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
792 msg_set_size(bundler_msg, to_pos + size);
793 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
794 kfree_skb(buf);
795 l_ptr->stats.sent_bundled++;
796 return 1;
799 static void link_add_to_outqueue(struct tipc_link *l_ptr,
800 struct sk_buff *buf,
801 struct tipc_msg *msg)
803 u32 ack = mod(l_ptr->next_in_no - 1);
804 u32 seqno = mod(l_ptr->next_out_no++);
806 msg_set_word(msg, 2, ((ack << 16) | seqno));
807 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
808 buf->next = NULL;
809 if (l_ptr->first_out) {
810 l_ptr->last_out->next = buf;
811 l_ptr->last_out = buf;
812 } else
813 l_ptr->first_out = l_ptr->last_out = buf;
815 l_ptr->out_queue_size++;
816 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
817 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
820 static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
821 struct sk_buff *buf_chain,
822 u32 long_msgno)
824 struct sk_buff *buf;
825 struct tipc_msg *msg;
827 if (!l_ptr->next_out)
828 l_ptr->next_out = buf_chain;
829 while (buf_chain) {
830 buf = buf_chain;
831 buf_chain = buf_chain->next;
833 msg = buf_msg(buf);
834 msg_set_long_msgno(msg, long_msgno);
835 link_add_to_outqueue(l_ptr, buf, msg);
840 * tipc_link_send_buf() is the 'full path' for messages, called from
841 * inside TIPC when the 'fast path' in tipc_send_buf
842 * has failed, and from link_send()
844 int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
846 struct tipc_msg *msg = buf_msg(buf);
847 u32 size = msg_size(msg);
848 u32 dsz = msg_data_sz(msg);
849 u32 queue_size = l_ptr->out_queue_size;
850 u32 imp = tipc_msg_tot_importance(msg);
851 u32 queue_limit = l_ptr->queue_limit[imp];
852 u32 max_packet = l_ptr->max_pkt;
854 /* Match msg importance against queue limits: */
855 if (unlikely(queue_size >= queue_limit)) {
856 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
857 link_schedule_port(l_ptr, msg_origport(msg), size);
858 kfree_skb(buf);
859 return -ELINKCONG;
861 kfree_skb(buf);
862 if (imp > CONN_MANAGER) {
863 pr_warn("%s<%s>, send queue full", link_rst_msg,
864 l_ptr->name);
865 tipc_link_reset(l_ptr);
867 return dsz;
870 /* Fragmentation needed ? */
871 if (size > max_packet)
872 return link_send_long_buf(l_ptr, buf);
874 /* Packet can be queued or sent. */
875 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
876 !link_congested(l_ptr))) {
877 link_add_to_outqueue(l_ptr, buf, msg);
879 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
880 l_ptr->unacked_window = 0;
881 } else {
882 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
883 l_ptr->stats.bearer_congs++;
884 l_ptr->next_out = buf;
886 return dsz;
888 /* Congestion: can message be bundled ? */
889 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
890 (msg_user(msg) != MSG_FRAGMENTER)) {
892 /* Try adding message to an existing bundle */
893 if (l_ptr->next_out &&
894 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
895 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
896 return dsz;
899 /* Try creating a new bundle */
900 if (size <= max_packet * 2 / 3) {
901 struct sk_buff *bundler = tipc_buf_acquire(max_packet);
902 struct tipc_msg bundler_hdr;
904 if (bundler) {
905 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
906 INT_H_SIZE, l_ptr->addr);
907 skb_copy_to_linear_data(bundler, &bundler_hdr,
908 INT_H_SIZE);
909 skb_trim(bundler, INT_H_SIZE);
910 link_bundle_buf(l_ptr, bundler, buf);
911 buf = bundler;
912 msg = buf_msg(buf);
913 l_ptr->stats.sent_bundles++;
917 if (!l_ptr->next_out)
918 l_ptr->next_out = buf;
919 link_add_to_outqueue(l_ptr, buf, msg);
920 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
921 return dsz;
925 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
926 * not been selected yet, and the the owner node is not locked
927 * Called by TIPC internal users, e.g. the name distributor
929 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
931 struct tipc_link *l_ptr;
932 struct tipc_node *n_ptr;
933 int res = -ELINKCONG;
935 read_lock_bh(&tipc_net_lock);
936 n_ptr = tipc_node_find(dest);
937 if (n_ptr) {
938 tipc_node_lock(n_ptr);
939 l_ptr = n_ptr->active_links[selector & 1];
940 if (l_ptr)
941 res = tipc_link_send_buf(l_ptr, buf);
942 else
943 kfree_skb(buf);
944 tipc_node_unlock(n_ptr);
945 } else {
946 kfree_skb(buf);
948 read_unlock_bh(&tipc_net_lock);
949 return res;
953 * tipc_link_send_names - send name table entries to new neighbor
955 * Send routine for bulk delivery of name table messages when contact
956 * with a new neighbor occurs. No link congestion checking is performed
957 * because name table messages *must* be delivered. The messages must be
958 * small enough not to require fragmentation.
959 * Called without any locks held.
961 void tipc_link_send_names(struct list_head *message_list, u32 dest)
963 struct tipc_node *n_ptr;
964 struct tipc_link *l_ptr;
965 struct sk_buff *buf;
966 struct sk_buff *temp_buf;
968 if (list_empty(message_list))
969 return;
971 read_lock_bh(&tipc_net_lock);
972 n_ptr = tipc_node_find(dest);
973 if (n_ptr) {
974 tipc_node_lock(n_ptr);
975 l_ptr = n_ptr->active_links[0];
976 if (l_ptr) {
977 /* convert circular list to linear list */
978 ((struct sk_buff *)message_list->prev)->next = NULL;
979 link_add_chain_to_outqueue(l_ptr,
980 (struct sk_buff *)message_list->next, 0);
981 tipc_link_push_queue(l_ptr);
982 INIT_LIST_HEAD(message_list);
984 tipc_node_unlock(n_ptr);
986 read_unlock_bh(&tipc_net_lock);
988 /* discard the messages if they couldn't be sent */
989 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
990 list_del((struct list_head *)buf);
991 kfree_skb(buf);
996 * link_send_buf_fast: Entry for data messages where the
997 * destination link is known and the header is complete,
998 * inclusive total message length. Very time critical.
999 * Link is locked. Returns user data length.
1001 static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
1002 u32 *used_max_pkt)
1004 struct tipc_msg *msg = buf_msg(buf);
1005 int res = msg_data_sz(msg);
1007 if (likely(!link_congested(l_ptr))) {
1008 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
1009 if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1010 link_add_to_outqueue(l_ptr, buf, msg);
1011 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
1012 &l_ptr->media_addr))) {
1013 l_ptr->unacked_window = 0;
1014 return res;
1016 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1017 l_ptr->stats.bearer_congs++;
1018 l_ptr->next_out = buf;
1019 return res;
1021 } else
1022 *used_max_pkt = l_ptr->max_pkt;
1024 return tipc_link_send_buf(l_ptr, buf); /* All other cases */
1028 * tipc_send_buf_fast: Entry for data messages where the
1029 * destination node is known and the header is complete,
1030 * inclusive total message length.
1031 * Returns user data length.
1033 int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1035 struct tipc_link *l_ptr;
1036 struct tipc_node *n_ptr;
1037 int res;
1038 u32 selector = msg_origport(buf_msg(buf)) & 1;
1039 u32 dummy;
1041 read_lock_bh(&tipc_net_lock);
1042 n_ptr = tipc_node_find(destnode);
1043 if (likely(n_ptr)) {
1044 tipc_node_lock(n_ptr);
1045 l_ptr = n_ptr->active_links[selector];
1046 if (likely(l_ptr)) {
1047 res = link_send_buf_fast(l_ptr, buf, &dummy);
1048 tipc_node_unlock(n_ptr);
1049 read_unlock_bh(&tipc_net_lock);
1050 return res;
1052 tipc_node_unlock(n_ptr);
1054 read_unlock_bh(&tipc_net_lock);
1055 res = msg_data_sz(buf_msg(buf));
1056 tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1057 return res;
1062 * tipc_link_send_sections_fast: Entry for messages where the
1063 * destination processor is known and the header is complete,
1064 * except for total message length.
1065 * Returns user data length or errno.
1067 int tipc_link_send_sections_fast(struct tipc_port *sender,
1068 struct iovec const *msg_sect,
1069 const u32 num_sect,
1070 unsigned int total_len,
1071 u32 destaddr)
1073 struct tipc_msg *hdr = &sender->phdr;
1074 struct tipc_link *l_ptr;
1075 struct sk_buff *buf;
1076 struct tipc_node *node;
1077 int res;
1078 u32 selector = msg_origport(hdr) & 1;
1080 again:
1082 * Try building message using port's max_pkt hint.
1083 * (Must not hold any locks while building message.)
1085 res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
1086 sender->max_pkt, !sender->user_port, &buf);
1088 read_lock_bh(&tipc_net_lock);
1089 node = tipc_node_find(destaddr);
1090 if (likely(node)) {
1091 tipc_node_lock(node);
1092 l_ptr = node->active_links[selector];
1093 if (likely(l_ptr)) {
1094 if (likely(buf)) {
1095 res = link_send_buf_fast(l_ptr, buf,
1096 &sender->max_pkt);
1097 exit:
1098 tipc_node_unlock(node);
1099 read_unlock_bh(&tipc_net_lock);
1100 return res;
1103 /* Exit if build request was invalid */
1104 if (unlikely(res < 0))
1105 goto exit;
1107 /* Exit if link (or bearer) is congested */
1108 if (link_congested(l_ptr) ||
1109 !list_empty(&l_ptr->b_ptr->cong_links)) {
1110 res = link_schedule_port(l_ptr,
1111 sender->ref, res);
1112 goto exit;
1116 * Message size exceeds max_pkt hint; update hint,
1117 * then re-try fast path or fragment the message
1119 sender->max_pkt = l_ptr->max_pkt;
1120 tipc_node_unlock(node);
1121 read_unlock_bh(&tipc_net_lock);
1124 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1125 goto again;
1127 return link_send_sections_long(sender, msg_sect,
1128 num_sect, total_len,
1129 destaddr);
1131 tipc_node_unlock(node);
1133 read_unlock_bh(&tipc_net_lock);
1135 /* Couldn't find a link to the destination node */
1136 if (buf)
1137 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1138 if (res >= 0)
1139 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1140 total_len, TIPC_ERR_NO_NODE);
1141 return res;
1145 * link_send_sections_long(): Entry for long messages where the
1146 * destination node is known and the header is complete,
1147 * inclusive total message length.
1148 * Link and bearer congestion status have been checked to be ok,
1149 * and are ignored if they change.
1151 * Note that fragments do not use the full link MTU so that they won't have
1152 * to undergo refragmentation if link changeover causes them to be sent
1153 * over another link with an additional tunnel header added as prefix.
1154 * (Refragmentation will still occur if the other link has a smaller MTU.)
1156 * Returns user data length or errno.
1158 static int link_send_sections_long(struct tipc_port *sender,
1159 struct iovec const *msg_sect,
1160 u32 num_sect,
1161 unsigned int total_len,
1162 u32 destaddr)
1164 struct tipc_link *l_ptr;
1165 struct tipc_node *node;
1166 struct tipc_msg *hdr = &sender->phdr;
1167 u32 dsz = total_len;
1168 u32 max_pkt, fragm_sz, rest;
1169 struct tipc_msg fragm_hdr;
1170 struct sk_buff *buf, *buf_chain, *prev;
1171 u32 fragm_crs, fragm_rest, hsz, sect_rest;
1172 const unchar *sect_crs;
1173 int curr_sect;
1174 u32 fragm_no;
1176 again:
1177 fragm_no = 1;
1178 max_pkt = sender->max_pkt - INT_H_SIZE;
1179 /* leave room for tunnel header in case of link changeover */
1180 fragm_sz = max_pkt - INT_H_SIZE;
1181 /* leave room for fragmentation header in each fragment */
1182 rest = dsz;
1183 fragm_crs = 0;
1184 fragm_rest = 0;
1185 sect_rest = 0;
1186 sect_crs = NULL;
1187 curr_sect = -1;
1189 /* Prepare reusable fragment header */
1190 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1191 INT_H_SIZE, msg_destnode(hdr));
1192 msg_set_size(&fragm_hdr, max_pkt);
1193 msg_set_fragm_no(&fragm_hdr, 1);
1195 /* Prepare header of first fragment */
1196 buf_chain = buf = tipc_buf_acquire(max_pkt);
1197 if (!buf)
1198 return -ENOMEM;
1199 buf->next = NULL;
1200 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1201 hsz = msg_hdr_sz(hdr);
1202 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
1204 /* Chop up message */
1205 fragm_crs = INT_H_SIZE + hsz;
1206 fragm_rest = fragm_sz - hsz;
1208 do { /* For all sections */
1209 u32 sz;
1211 if (!sect_rest) {
1212 sect_rest = msg_sect[++curr_sect].iov_len;
1213 sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
1216 if (sect_rest < fragm_rest)
1217 sz = sect_rest;
1218 else
1219 sz = fragm_rest;
1221 if (likely(!sender->user_port)) {
1222 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1223 error:
1224 for (; buf_chain; buf_chain = buf) {
1225 buf = buf_chain->next;
1226 kfree_skb(buf_chain);
1228 return -EFAULT;
1230 } else
1231 skb_copy_to_linear_data_offset(buf, fragm_crs,
1232 sect_crs, sz);
1233 sect_crs += sz;
1234 sect_rest -= sz;
1235 fragm_crs += sz;
1236 fragm_rest -= sz;
1237 rest -= sz;
1239 if (!fragm_rest && rest) {
1241 /* Initiate new fragment: */
1242 if (rest <= fragm_sz) {
1243 fragm_sz = rest;
1244 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
1245 } else {
1246 msg_set_type(&fragm_hdr, FRAGMENT);
1248 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1249 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1250 prev = buf;
1251 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
1252 if (!buf)
1253 goto error;
1255 buf->next = NULL;
1256 prev->next = buf;
1257 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1258 fragm_crs = INT_H_SIZE;
1259 fragm_rest = fragm_sz;
1261 } while (rest > 0);
1264 * Now we have a buffer chain. Select a link and check
1265 * that packet size is still OK
1267 node = tipc_node_find(destaddr);
1268 if (likely(node)) {
1269 tipc_node_lock(node);
1270 l_ptr = node->active_links[sender->ref & 1];
1271 if (!l_ptr) {
1272 tipc_node_unlock(node);
1273 goto reject;
1275 if (l_ptr->max_pkt < max_pkt) {
1276 sender->max_pkt = l_ptr->max_pkt;
1277 tipc_node_unlock(node);
1278 for (; buf_chain; buf_chain = buf) {
1279 buf = buf_chain->next;
1280 kfree_skb(buf_chain);
1282 goto again;
1284 } else {
1285 reject:
1286 for (; buf_chain; buf_chain = buf) {
1287 buf = buf_chain->next;
1288 kfree_skb(buf_chain);
1290 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1291 total_len, TIPC_ERR_NO_NODE);
1294 /* Append chain of fragments to send queue & send them */
1295 l_ptr->long_msg_seq_no++;
1296 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1297 l_ptr->stats.sent_fragments += fragm_no;
1298 l_ptr->stats.sent_fragmented++;
1299 tipc_link_push_queue(l_ptr);
1300 tipc_node_unlock(node);
1301 return dsz;
1305 * tipc_link_push_packet: Push one unsent packet to the media
1307 u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1309 struct sk_buff *buf = l_ptr->first_out;
1310 u32 r_q_size = l_ptr->retransm_queue_size;
1311 u32 r_q_head = l_ptr->retransm_queue_head;
1313 /* Step to position where retransmission failed, if any, */
1314 /* consider that buffers may have been released in meantime */
1315 if (r_q_size && buf) {
1316 u32 last = lesser(mod(r_q_head + r_q_size),
1317 link_last_sent(l_ptr));
1318 u32 first = buf_seqno(buf);
1320 while (buf && less(first, r_q_head)) {
1321 first = mod(first + 1);
1322 buf = buf->next;
1324 l_ptr->retransm_queue_head = r_q_head = first;
1325 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1328 /* Continue retransmission now, if there is anything: */
1329 if (r_q_size && buf) {
1330 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1331 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1332 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1333 l_ptr->retransm_queue_head = mod(++r_q_head);
1334 l_ptr->retransm_queue_size = --r_q_size;
1335 l_ptr->stats.retransmitted++;
1336 return 0;
1337 } else {
1338 l_ptr->stats.bearer_congs++;
1339 return PUSH_FAILED;
1343 /* Send deferred protocol message, if any: */
1344 buf = l_ptr->proto_msg_queue;
1345 if (buf) {
1346 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1347 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1348 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1349 l_ptr->unacked_window = 0;
1350 kfree_skb(buf);
1351 l_ptr->proto_msg_queue = NULL;
1352 return 0;
1353 } else {
1354 l_ptr->stats.bearer_congs++;
1355 return PUSH_FAILED;
1359 /* Send one deferred data message, if send window not full: */
1360 buf = l_ptr->next_out;
1361 if (buf) {
1362 struct tipc_msg *msg = buf_msg(buf);
1363 u32 next = msg_seqno(msg);
1364 u32 first = buf_seqno(l_ptr->first_out);
1366 if (mod(next - first) < l_ptr->queue_limit[0]) {
1367 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1368 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1369 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1370 if (msg_user(msg) == MSG_BUNDLER)
1371 msg_set_type(msg, CLOSED_MSG);
1372 l_ptr->next_out = buf->next;
1373 return 0;
1374 } else {
1375 l_ptr->stats.bearer_congs++;
1376 return PUSH_FAILED;
1380 return PUSH_FINISHED;
1384 * push_queue(): push out the unsent messages of a link where
1385 * congestion has abated. Node is locked
1387 void tipc_link_push_queue(struct tipc_link *l_ptr)
1389 u32 res;
1391 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
1392 return;
1394 do {
1395 res = tipc_link_push_packet(l_ptr);
1396 } while (!res);
1398 if (res == PUSH_FAILED)
1399 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1402 static void link_reset_all(unsigned long addr)
1404 struct tipc_node *n_ptr;
1405 char addr_string[16];
1406 u32 i;
1408 read_lock_bh(&tipc_net_lock);
1409 n_ptr = tipc_node_find((u32)addr);
1410 if (!n_ptr) {
1411 read_unlock_bh(&tipc_net_lock);
1412 return; /* node no longer exists */
1415 tipc_node_lock(n_ptr);
1417 pr_warn("Resetting all links to %s\n",
1418 tipc_addr_string_fill(addr_string, n_ptr->addr));
1420 for (i = 0; i < MAX_BEARERS; i++) {
1421 if (n_ptr->links[i]) {
1422 link_print(n_ptr->links[i], "Resetting link\n");
1423 tipc_link_reset(n_ptr->links[i]);
1427 tipc_node_unlock(n_ptr);
1428 read_unlock_bh(&tipc_net_lock);
1431 static void link_retransmit_failure(struct tipc_link *l_ptr,
1432 struct sk_buff *buf)
1434 struct tipc_msg *msg = buf_msg(buf);
1436 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
1438 if (l_ptr->addr) {
1439 /* Handle failure on standard link */
1440 link_print(l_ptr, "Resetting link\n");
1441 tipc_link_reset(l_ptr);
1443 } else {
1444 /* Handle failure on broadcast link */
1445 struct tipc_node *n_ptr;
1446 char addr_string[16];
1448 pr_info("Msg seq number: %u, ", msg_seqno(msg));
1449 pr_cont("Outstanding acks: %lu\n",
1450 (unsigned long) TIPC_SKB_CB(buf)->handle);
1452 n_ptr = tipc_bclink_retransmit_to();
1453 tipc_node_lock(n_ptr);
1455 tipc_addr_string_fill(addr_string, n_ptr->addr);
1456 pr_info("Broadcast link info for %s\n", addr_string);
1457 pr_info("Supportable: %d, Supported: %d, Acked: %u\n",
1458 n_ptr->bclink.supportable,
1459 n_ptr->bclink.supported,
1460 n_ptr->bclink.acked);
1461 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
1462 n_ptr->bclink.last_in,
1463 n_ptr->bclink.oos_state,
1464 n_ptr->bclink.last_sent);
1466 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1468 tipc_node_unlock(n_ptr);
1470 l_ptr->stale_count = 0;
1474 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1475 u32 retransmits)
1477 struct tipc_msg *msg;
1479 if (!buf)
1480 return;
1482 msg = buf_msg(buf);
1484 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1485 if (l_ptr->retransm_queue_size == 0) {
1486 l_ptr->retransm_queue_head = msg_seqno(msg);
1487 l_ptr->retransm_queue_size = retransmits;
1488 } else {
1489 pr_err("Unexpected retransmit on link %s (qsize=%d)\n",
1490 l_ptr->name, l_ptr->retransm_queue_size);
1492 return;
1493 } else {
1494 /* Detect repeated retransmit failures on uncongested bearer */
1495 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1496 if (++l_ptr->stale_count > 100) {
1497 link_retransmit_failure(l_ptr, buf);
1498 return;
1500 } else {
1501 l_ptr->last_retransmitted = msg_seqno(msg);
1502 l_ptr->stale_count = 1;
1506 while (retransmits && (buf != l_ptr->next_out) && buf) {
1507 msg = buf_msg(buf);
1508 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1509 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1510 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1511 buf = buf->next;
1512 retransmits--;
1513 l_ptr->stats.retransmitted++;
1514 } else {
1515 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1516 l_ptr->stats.bearer_congs++;
1517 l_ptr->retransm_queue_head = buf_seqno(buf);
1518 l_ptr->retransm_queue_size = retransmits;
1519 return;
1523 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1527 * link_insert_deferred_queue - insert deferred messages back into receive chain
1529 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
1530 struct sk_buff *buf)
1532 u32 seq_no;
1534 if (l_ptr->oldest_deferred_in == NULL)
1535 return buf;
1537 seq_no = buf_seqno(l_ptr->oldest_deferred_in);
1538 if (seq_no == mod(l_ptr->next_in_no)) {
1539 l_ptr->newest_deferred_in->next = buf;
1540 buf = l_ptr->oldest_deferred_in;
1541 l_ptr->oldest_deferred_in = NULL;
1542 l_ptr->deferred_inqueue_sz = 0;
1544 return buf;
1548 * link_recv_buf_validate - validate basic format of received message
1550 * This routine ensures a TIPC message has an acceptable header, and at least
1551 * as much data as the header indicates it should. The routine also ensures
1552 * that the entire message header is stored in the main fragment of the message
1553 * buffer, to simplify future access to message header fields.
1555 * Note: Having extra info present in the message header or data areas is OK.
1556 * TIPC will ignore the excess, under the assumption that it is optional info
1557 * introduced by a later release of the protocol.
1559 static int link_recv_buf_validate(struct sk_buff *buf)
1561 static u32 min_data_hdr_size[8] = {
1562 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1563 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1566 struct tipc_msg *msg;
1567 u32 tipc_hdr[2];
1568 u32 size;
1569 u32 hdr_size;
1570 u32 min_hdr_size;
1572 if (unlikely(buf->len < MIN_H_SIZE))
1573 return 0;
1575 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1576 if (msg == NULL)
1577 return 0;
1579 if (unlikely(msg_version(msg) != TIPC_VERSION))
1580 return 0;
1582 size = msg_size(msg);
1583 hdr_size = msg_hdr_sz(msg);
1584 min_hdr_size = msg_isdata(msg) ?
1585 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1587 if (unlikely((hdr_size < min_hdr_size) ||
1588 (size < hdr_size) ||
1589 (buf->len < size) ||
1590 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1591 return 0;
1593 return pskb_may_pull(buf, hdr_size);
1597 * tipc_recv_msg - process TIPC messages arriving from off-node
1598 * @head: pointer to message buffer chain
1599 * @tb_ptr: pointer to bearer message arrived on
1601 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1602 * structure (i.e. cannot be NULL), but bearer can be inactive.
1604 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1606 read_lock_bh(&tipc_net_lock);
1607 while (head) {
1608 struct tipc_node *n_ptr;
1609 struct tipc_link *l_ptr;
1610 struct sk_buff *crs;
1611 struct sk_buff *buf = head;
1612 struct tipc_msg *msg;
1613 u32 seq_no;
1614 u32 ackd;
1615 u32 released = 0;
1616 int type;
1618 head = head->next;
1620 /* Ensure bearer is still enabled */
1621 if (unlikely(!b_ptr->active))
1622 goto cont;
1624 /* Ensure message is well-formed */
1625 if (unlikely(!link_recv_buf_validate(buf)))
1626 goto cont;
1628 /* Ensure message data is a single contiguous unit */
1629 if (unlikely(skb_linearize(buf)))
1630 goto cont;
1632 /* Handle arrival of a non-unicast link message */
1633 msg = buf_msg(buf);
1635 if (unlikely(msg_non_seq(msg))) {
1636 if (msg_user(msg) == LINK_CONFIG)
1637 tipc_disc_recv_msg(buf, b_ptr);
1638 else
1639 tipc_bclink_recv_pkt(buf);
1640 continue;
1643 /* Discard unicast link messages destined for another node */
1644 if (unlikely(!msg_short(msg) &&
1645 (msg_destnode(msg) != tipc_own_addr)))
1646 goto cont;
1648 /* Locate neighboring node that sent message */
1649 n_ptr = tipc_node_find(msg_prevnode(msg));
1650 if (unlikely(!n_ptr))
1651 goto cont;
1652 tipc_node_lock(n_ptr);
1654 /* Locate unicast link endpoint that should handle message */
1655 l_ptr = n_ptr->links[b_ptr->identity];
1656 if (unlikely(!l_ptr)) {
1657 tipc_node_unlock(n_ptr);
1658 goto cont;
1661 /* Verify that communication with node is currently allowed */
1662 if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
1663 msg_user(msg) == LINK_PROTOCOL &&
1664 (msg_type(msg) == RESET_MSG ||
1665 msg_type(msg) == ACTIVATE_MSG) &&
1666 !msg_redundant_link(msg))
1667 n_ptr->block_setup &= ~WAIT_PEER_DOWN;
1669 if (n_ptr->block_setup) {
1670 tipc_node_unlock(n_ptr);
1671 goto cont;
1674 /* Validate message sequence number info */
1675 seq_no = msg_seqno(msg);
1676 ackd = msg_ack(msg);
1678 /* Release acked messages */
1679 if (n_ptr->bclink.supported)
1680 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1682 crs = l_ptr->first_out;
1683 while ((crs != l_ptr->next_out) &&
1684 less_eq(buf_seqno(crs), ackd)) {
1685 struct sk_buff *next = crs->next;
1687 kfree_skb(crs);
1688 crs = next;
1689 released++;
1691 if (released) {
1692 l_ptr->first_out = crs;
1693 l_ptr->out_queue_size -= released;
1696 /* Try sending any messages link endpoint has pending */
1697 if (unlikely(l_ptr->next_out))
1698 tipc_link_push_queue(l_ptr);
1699 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1700 tipc_link_wakeup_ports(l_ptr, 0);
1701 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1702 l_ptr->stats.sent_acks++;
1703 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1706 /* Now (finally!) process the incoming message */
1707 protocol_check:
1708 if (likely(link_working_working(l_ptr))) {
1709 if (likely(seq_no == mod(l_ptr->next_in_no))) {
1710 l_ptr->next_in_no++;
1711 if (unlikely(l_ptr->oldest_deferred_in))
1712 head = link_insert_deferred_queue(l_ptr,
1713 head);
1714 deliver:
1715 if (likely(msg_isdata(msg))) {
1716 tipc_node_unlock(n_ptr);
1717 tipc_port_recv_msg(buf);
1718 continue;
1720 switch (msg_user(msg)) {
1721 int ret;
1722 case MSG_BUNDLER:
1723 l_ptr->stats.recv_bundles++;
1724 l_ptr->stats.recv_bundled +=
1725 msg_msgcnt(msg);
1726 tipc_node_unlock(n_ptr);
1727 tipc_link_recv_bundle(buf);
1728 continue;
1729 case NAME_DISTRIBUTOR:
1730 tipc_node_unlock(n_ptr);
1731 tipc_named_recv(buf);
1732 continue;
1733 case CONN_MANAGER:
1734 tipc_node_unlock(n_ptr);
1735 tipc_port_recv_proto_msg(buf);
1736 continue;
1737 case MSG_FRAGMENTER:
1738 l_ptr->stats.recv_fragments++;
1739 ret = tipc_link_recv_fragment(
1740 &l_ptr->defragm_buf,
1741 &buf, &msg);
1742 if (ret == 1) {
1743 l_ptr->stats.recv_fragmented++;
1744 goto deliver;
1746 if (ret == -1)
1747 l_ptr->next_in_no--;
1748 break;
1749 case CHANGEOVER_PROTOCOL:
1750 type = msg_type(msg);
1751 if (link_recv_changeover_msg(&l_ptr,
1752 &buf)) {
1753 msg = buf_msg(buf);
1754 seq_no = msg_seqno(msg);
1755 if (type == ORIGINAL_MSG)
1756 goto deliver;
1757 goto protocol_check;
1759 break;
1760 default:
1761 kfree_skb(buf);
1762 buf = NULL;
1763 break;
1765 tipc_node_unlock(n_ptr);
1766 tipc_net_route_msg(buf);
1767 continue;
1769 link_handle_out_of_seq_msg(l_ptr, buf);
1770 head = link_insert_deferred_queue(l_ptr, head);
1771 tipc_node_unlock(n_ptr);
1772 continue;
1775 if (msg_user(msg) == LINK_PROTOCOL) {
1776 link_recv_proto_msg(l_ptr, buf);
1777 head = link_insert_deferred_queue(l_ptr, head);
1778 tipc_node_unlock(n_ptr);
1779 continue;
1781 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1783 if (link_working_working(l_ptr)) {
1784 /* Re-insert in front of queue */
1785 buf->next = head;
1786 head = buf;
1787 tipc_node_unlock(n_ptr);
1788 continue;
1790 tipc_node_unlock(n_ptr);
1791 cont:
1792 kfree_skb(buf);
1794 read_unlock_bh(&tipc_net_lock);
1798 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1800 * Returns increase in queue length (i.e. 0 or 1)
1802 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
1803 struct sk_buff *buf)
1805 struct sk_buff *queue_buf;
1806 struct sk_buff **prev;
1807 u32 seq_no = buf_seqno(buf);
1809 buf->next = NULL;
1811 /* Empty queue ? */
1812 if (*head == NULL) {
1813 *head = *tail = buf;
1814 return 1;
1817 /* Last ? */
1818 if (less(buf_seqno(*tail), seq_no)) {
1819 (*tail)->next = buf;
1820 *tail = buf;
1821 return 1;
1824 /* Locate insertion point in queue, then insert; discard if duplicate */
1825 prev = head;
1826 queue_buf = *head;
1827 for (;;) {
1828 u32 curr_seqno = buf_seqno(queue_buf);
1830 if (seq_no == curr_seqno) {
1831 kfree_skb(buf);
1832 return 0;
1835 if (less(seq_no, curr_seqno))
1836 break;
1838 prev = &queue_buf->next;
1839 queue_buf = queue_buf->next;
1842 buf->next = queue_buf;
1843 *prev = buf;
1844 return 1;
1848 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1850 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1851 struct sk_buff *buf)
1853 u32 seq_no = buf_seqno(buf);
1855 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1856 link_recv_proto_msg(l_ptr, buf);
1857 return;
1860 /* Record OOS packet arrival (force mismatch on next timeout) */
1861 l_ptr->checkpoint--;
1864 * Discard packet if a duplicate; otherwise add it to deferred queue
1865 * and notify peer of gap as per protocol specification
1867 if (less(seq_no, mod(l_ptr->next_in_no))) {
1868 l_ptr->stats.duplicates++;
1869 kfree_skb(buf);
1870 return;
1873 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1874 &l_ptr->newest_deferred_in, buf)) {
1875 l_ptr->deferred_inqueue_sz++;
1876 l_ptr->stats.deferred_recv++;
1877 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1878 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1879 } else
1880 l_ptr->stats.duplicates++;
1884 * Send protocol message to the other endpoint.
1886 void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
1887 int probe_msg, u32 gap, u32 tolerance,
1888 u32 priority, u32 ack_mtu)
1890 struct sk_buff *buf = NULL;
1891 struct tipc_msg *msg = l_ptr->pmsg;
1892 u32 msg_size = sizeof(l_ptr->proto_msg);
1893 int r_flag;
1895 /* Discard any previous message that was deferred due to congestion */
1896 if (l_ptr->proto_msg_queue) {
1897 kfree_skb(l_ptr->proto_msg_queue);
1898 l_ptr->proto_msg_queue = NULL;
1901 if (link_blocked(l_ptr))
1902 return;
1904 /* Abort non-RESET send if communication with node is prohibited */
1905 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1906 return;
1908 /* Create protocol message with "out-of-sequence" sequence number */
1909 msg_set_type(msg, msg_typ);
1910 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1911 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1912 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1914 if (msg_typ == STATE_MSG) {
1915 u32 next_sent = mod(l_ptr->next_out_no);
1917 if (!tipc_link_is_up(l_ptr))
1918 return;
1919 if (l_ptr->next_out)
1920 next_sent = buf_seqno(l_ptr->next_out);
1921 msg_set_next_sent(msg, next_sent);
1922 if (l_ptr->oldest_deferred_in) {
1923 u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
1924 gap = mod(rec - mod(l_ptr->next_in_no));
1926 msg_set_seq_gap(msg, gap);
1927 if (gap)
1928 l_ptr->stats.sent_nacks++;
1929 msg_set_link_tolerance(msg, tolerance);
1930 msg_set_linkprio(msg, priority);
1931 msg_set_max_pkt(msg, ack_mtu);
1932 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1933 msg_set_probe(msg, probe_msg != 0);
1934 if (probe_msg) {
1935 u32 mtu = l_ptr->max_pkt;
1937 if ((mtu < l_ptr->max_pkt_target) &&
1938 link_working_working(l_ptr) &&
1939 l_ptr->fsm_msg_cnt) {
1940 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1941 if (l_ptr->max_pkt_probes == 10) {
1942 l_ptr->max_pkt_target = (msg_size - 4);
1943 l_ptr->max_pkt_probes = 0;
1944 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1946 l_ptr->max_pkt_probes++;
1949 l_ptr->stats.sent_probes++;
1951 l_ptr->stats.sent_states++;
1952 } else { /* RESET_MSG or ACTIVATE_MSG */
1953 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1954 msg_set_seq_gap(msg, 0);
1955 msg_set_next_sent(msg, 1);
1956 msg_set_probe(msg, 0);
1957 msg_set_link_tolerance(msg, l_ptr->tolerance);
1958 msg_set_linkprio(msg, l_ptr->priority);
1959 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1962 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1963 msg_set_redundant_link(msg, r_flag);
1964 msg_set_linkprio(msg, l_ptr->priority);
1965 msg_set_size(msg, msg_size);
1967 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1969 buf = tipc_buf_acquire(msg_size);
1970 if (!buf)
1971 return;
1973 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1975 /* Defer message if bearer is already congested */
1976 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1977 l_ptr->proto_msg_queue = buf;
1978 return;
1981 /* Defer message if attempting to send results in bearer congestion */
1982 if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1983 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1984 l_ptr->proto_msg_queue = buf;
1985 l_ptr->stats.bearer_congs++;
1986 return;
1989 /* Discard message if it was sent successfully */
1990 l_ptr->unacked_window = 0;
1991 kfree_skb(buf);
1995 * Receive protocol message :
1996 * Note that network plane id propagates through the network, and may
1997 * change at any time. The node with lowest address rules
1999 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2001 u32 rec_gap = 0;
2002 u32 max_pkt_info;
2003 u32 max_pkt_ack;
2004 u32 msg_tol;
2005 struct tipc_msg *msg = buf_msg(buf);
2007 if (link_blocked(l_ptr))
2008 goto exit;
2010 /* record unnumbered packet arrival (force mismatch on next timeout) */
2011 l_ptr->checkpoint--;
2013 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
2014 if (tipc_own_addr > msg_prevnode(msg))
2015 l_ptr->b_ptr->net_plane = msg_net_plane(msg);
2017 l_ptr->owner->permit_changeover = msg_redundant_link(msg);
2019 switch (msg_type(msg)) {
2021 case RESET_MSG:
2022 if (!link_working_unknown(l_ptr) &&
2023 (l_ptr->peer_session != INVALID_SESSION)) {
2024 if (less_eq(msg_session(msg), l_ptr->peer_session))
2025 break; /* duplicate or old reset: ignore */
2028 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
2029 link_working_unknown(l_ptr))) {
2031 * peer has lost contact -- don't allow peer's links
2032 * to reactivate before we recognize loss & clean up
2034 l_ptr->owner->block_setup = WAIT_NODE_DOWN;
2037 link_state_event(l_ptr, RESET_MSG);
2039 /* fall thru' */
2040 case ACTIVATE_MSG:
2041 /* Update link settings according other endpoint's values */
2042 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2044 msg_tol = msg_link_tolerance(msg);
2045 if (msg_tol > l_ptr->tolerance)
2046 link_set_supervision_props(l_ptr, msg_tol);
2048 if (msg_linkprio(msg) > l_ptr->priority)
2049 l_ptr->priority = msg_linkprio(msg);
2051 max_pkt_info = msg_max_pkt(msg);
2052 if (max_pkt_info) {
2053 if (max_pkt_info < l_ptr->max_pkt_target)
2054 l_ptr->max_pkt_target = max_pkt_info;
2055 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2056 l_ptr->max_pkt = l_ptr->max_pkt_target;
2057 } else {
2058 l_ptr->max_pkt = l_ptr->max_pkt_target;
2060 l_ptr->owner->bclink.supportable = (max_pkt_info != 0);
2062 /* Synchronize broadcast link info, if not done previously */
2063 if (!tipc_node_is_up(l_ptr->owner)) {
2064 l_ptr->owner->bclink.last_sent =
2065 l_ptr->owner->bclink.last_in =
2066 msg_last_bcast(msg);
2067 l_ptr->owner->bclink.oos_state = 0;
2070 l_ptr->peer_session = msg_session(msg);
2071 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2073 if (msg_type(msg) == ACTIVATE_MSG)
2074 link_state_event(l_ptr, ACTIVATE_MSG);
2075 break;
2076 case STATE_MSG:
2078 msg_tol = msg_link_tolerance(msg);
2079 if (msg_tol)
2080 link_set_supervision_props(l_ptr, msg_tol);
2082 if (msg_linkprio(msg) &&
2083 (msg_linkprio(msg) != l_ptr->priority)) {
2084 pr_warn("%s<%s>, priority change %u->%u\n",
2085 link_rst_msg, l_ptr->name, l_ptr->priority,
2086 msg_linkprio(msg));
2087 l_ptr->priority = msg_linkprio(msg);
2088 tipc_link_reset(l_ptr); /* Enforce change to take effect */
2089 break;
2091 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
2092 l_ptr->stats.recv_states++;
2093 if (link_reset_unknown(l_ptr))
2094 break;
2096 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
2097 rec_gap = mod(msg_next_sent(msg) -
2098 mod(l_ptr->next_in_no));
2101 max_pkt_ack = msg_max_pkt(msg);
2102 if (max_pkt_ack > l_ptr->max_pkt) {
2103 l_ptr->max_pkt = max_pkt_ack;
2104 l_ptr->max_pkt_probes = 0;
2107 max_pkt_ack = 0;
2108 if (msg_probe(msg)) {
2109 l_ptr->stats.recv_probes++;
2110 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
2111 max_pkt_ack = msg_size(msg);
2114 /* Protocol message before retransmits, reduce loss risk */
2115 if (l_ptr->owner->bclink.supported)
2116 tipc_bclink_update_link_state(l_ptr->owner,
2117 msg_last_bcast(msg));
2119 if (rec_gap || (msg_probe(msg))) {
2120 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2121 0, rec_gap, 0, 0, max_pkt_ack);
2123 if (msg_seq_gap(msg)) {
2124 l_ptr->stats.recv_nacks++;
2125 tipc_link_retransmit(l_ptr, l_ptr->first_out,
2126 msg_seq_gap(msg));
2128 break;
2130 exit:
2131 kfree_skb(buf);
2136 * tipc_link_tunnel(): Send one message via a link belonging to
2137 * another bearer. Owner node is locked.
2139 static void tipc_link_tunnel(struct tipc_link *l_ptr,
2140 struct tipc_msg *tunnel_hdr,
2141 struct tipc_msg *msg,
2142 u32 selector)
2144 struct tipc_link *tunnel;
2145 struct sk_buff *buf;
2146 u32 length = msg_size(msg);
2148 tunnel = l_ptr->owner->active_links[selector & 1];
2149 if (!tipc_link_is_up(tunnel)) {
2150 pr_warn("%stunnel link no longer available\n", link_co_err);
2151 return;
2153 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2154 buf = tipc_buf_acquire(length + INT_H_SIZE);
2155 if (!buf) {
2156 pr_warn("%sunable to send tunnel msg\n", link_co_err);
2157 return;
2159 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
2160 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
2161 tipc_link_send_buf(tunnel, buf);
2167 * changeover(): Send whole message queue via the remaining link
2168 * Owner node is locked.
2170 void tipc_link_changeover(struct tipc_link *l_ptr)
2172 u32 msgcount = l_ptr->out_queue_size;
2173 struct sk_buff *crs = l_ptr->first_out;
2174 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
2175 struct tipc_msg tunnel_hdr;
2176 int split_bundles;
2178 if (!tunnel)
2179 return;
2181 if (!l_ptr->owner->permit_changeover) {
2182 pr_warn("%speer did not permit changeover\n", link_co_err);
2183 return;
2186 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2187 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
2188 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2189 msg_set_msgcnt(&tunnel_hdr, msgcount);
2191 if (!l_ptr->first_out) {
2192 struct sk_buff *buf;
2194 buf = tipc_buf_acquire(INT_H_SIZE);
2195 if (buf) {
2196 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
2197 msg_set_size(&tunnel_hdr, INT_H_SIZE);
2198 tipc_link_send_buf(tunnel, buf);
2199 } else {
2200 pr_warn("%sunable to send changeover msg\n",
2201 link_co_err);
2203 return;
2206 split_bundles = (l_ptr->owner->active_links[0] !=
2207 l_ptr->owner->active_links[1]);
2209 while (crs) {
2210 struct tipc_msg *msg = buf_msg(crs);
2212 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2213 struct tipc_msg *m = msg_get_wrapped(msg);
2214 unchar *pos = (unchar *)m;
2216 msgcount = msg_msgcnt(msg);
2217 while (msgcount--) {
2218 msg_set_seqno(m, msg_seqno(msg));
2219 tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
2220 msg_link_selector(m));
2221 pos += align(msg_size(m));
2222 m = (struct tipc_msg *)pos;
2224 } else {
2225 tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
2226 msg_link_selector(msg));
2228 crs = crs->next;
2232 void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
2234 struct sk_buff *iter;
2235 struct tipc_msg tunnel_hdr;
2237 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2238 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
2239 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2240 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2241 iter = l_ptr->first_out;
2242 while (iter) {
2243 struct sk_buff *outbuf;
2244 struct tipc_msg *msg = buf_msg(iter);
2245 u32 length = msg_size(msg);
2247 if (msg_user(msg) == MSG_BUNDLER)
2248 msg_set_type(msg, CLOSED_MSG);
2249 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
2250 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2251 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2252 outbuf = tipc_buf_acquire(length + INT_H_SIZE);
2253 if (outbuf == NULL) {
2254 pr_warn("%sunable to send duplicate msg\n",
2255 link_co_err);
2256 return;
2258 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2259 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2260 length);
2261 tipc_link_send_buf(tunnel, outbuf);
2262 if (!tipc_link_is_up(l_ptr))
2263 return;
2264 iter = iter->next;
2269 * buf_extract - extracts embedded TIPC message from another message
2270 * @skb: encapsulating message buffer
2271 * @from_pos: offset to extract from
2273 * Returns a new message buffer containing an embedded message. The
2274 * encapsulating message itself is left unchanged.
2276 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2278 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2279 u32 size = msg_size(msg);
2280 struct sk_buff *eb;
2282 eb = tipc_buf_acquire(size);
2283 if (eb)
2284 skb_copy_to_linear_data(eb, msg, size);
2285 return eb;
2289 * link_recv_changeover_msg(): Receive tunneled packet sent
2290 * via other link. Node is locked. Return extracted buffer.
2292 static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2293 struct sk_buff **buf)
2295 struct sk_buff *tunnel_buf = *buf;
2296 struct tipc_link *dest_link;
2297 struct tipc_msg *msg;
2298 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2299 u32 msg_typ = msg_type(tunnel_msg);
2300 u32 msg_count = msg_msgcnt(tunnel_msg);
2302 dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2303 if (!dest_link)
2304 goto exit;
2305 if (dest_link == *l_ptr) {
2306 pr_err("Unexpected changeover message on link <%s>\n",
2307 (*l_ptr)->name);
2308 goto exit;
2310 *l_ptr = dest_link;
2311 msg = msg_get_wrapped(tunnel_msg);
2313 if (msg_typ == DUPLICATE_MSG) {
2314 if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
2315 goto exit;
2316 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2317 if (*buf == NULL) {
2318 pr_warn("%sduplicate msg dropped\n", link_co_err);
2319 goto exit;
2321 kfree_skb(tunnel_buf);
2322 return 1;
2325 /* First original message ?: */
2326 if (tipc_link_is_up(dest_link)) {
2327 pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg,
2328 dest_link->name);
2329 tipc_link_reset(dest_link);
2330 dest_link->exp_msg_count = msg_count;
2331 if (!msg_count)
2332 goto exit;
2333 } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2334 dest_link->exp_msg_count = msg_count;
2335 if (!msg_count)
2336 goto exit;
2339 /* Receive original message */
2340 if (dest_link->exp_msg_count == 0) {
2341 pr_warn("%sgot too many tunnelled messages\n", link_co_err);
2342 goto exit;
2344 dest_link->exp_msg_count--;
2345 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2346 goto exit;
2347 } else {
2348 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2349 if (*buf != NULL) {
2350 kfree_skb(tunnel_buf);
2351 return 1;
2352 } else {
2353 pr_warn("%soriginal msg dropped\n", link_co_err);
2356 exit:
2357 *buf = NULL;
2358 kfree_skb(tunnel_buf);
2359 return 0;
2363 * Bundler functionality:
2365 void tipc_link_recv_bundle(struct sk_buff *buf)
2367 u32 msgcount = msg_msgcnt(buf_msg(buf));
2368 u32 pos = INT_H_SIZE;
2369 struct sk_buff *obuf;
2371 while (msgcount--) {
2372 obuf = buf_extract(buf, pos);
2373 if (obuf == NULL) {
2374 pr_warn("Link unable to unbundle message(s)\n");
2375 break;
2377 pos += align(msg_size(buf_msg(obuf)));
2378 tipc_net_route_msg(obuf);
2380 kfree_skb(buf);
2384 * Fragmentation/defragmentation:
2388 * link_send_long_buf: Entry for buffers needing fragmentation.
2389 * The buffer is complete, inclusive total message length.
2390 * Returns user data length.
2392 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2394 struct sk_buff *buf_chain = NULL;
2395 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
2396 struct tipc_msg *inmsg = buf_msg(buf);
2397 struct tipc_msg fragm_hdr;
2398 u32 insize = msg_size(inmsg);
2399 u32 dsz = msg_data_sz(inmsg);
2400 unchar *crs = buf->data;
2401 u32 rest = insize;
2402 u32 pack_sz = l_ptr->max_pkt;
2403 u32 fragm_sz = pack_sz - INT_H_SIZE;
2404 u32 fragm_no = 0;
2405 u32 destaddr;
2407 if (msg_short(inmsg))
2408 destaddr = l_ptr->addr;
2409 else
2410 destaddr = msg_destnode(inmsg);
2412 /* Prepare reusable fragment header: */
2413 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2414 INT_H_SIZE, destaddr);
2416 /* Chop up message: */
2417 while (rest > 0) {
2418 struct sk_buff *fragm;
2420 if (rest <= fragm_sz) {
2421 fragm_sz = rest;
2422 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2424 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2425 if (fragm == NULL) {
2426 kfree_skb(buf);
2427 while (buf_chain) {
2428 buf = buf_chain;
2429 buf_chain = buf_chain->next;
2430 kfree_skb(buf);
2432 return -ENOMEM;
2434 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2435 fragm_no++;
2436 msg_set_fragm_no(&fragm_hdr, fragm_no);
2437 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2438 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2439 fragm_sz);
2440 buf_chain_tail->next = fragm;
2441 buf_chain_tail = fragm;
2443 rest -= fragm_sz;
2444 crs += fragm_sz;
2445 msg_set_type(&fragm_hdr, FRAGMENT);
2447 kfree_skb(buf);
2449 /* Append chain of fragments to send queue & send them */
2450 l_ptr->long_msg_seq_no++;
2451 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2452 l_ptr->stats.sent_fragments += fragm_no;
2453 l_ptr->stats.sent_fragmented++;
2454 tipc_link_push_queue(l_ptr);
2456 return dsz;
2460 * A pending message being re-assembled must store certain values
2461 * to handle subsequent fragments correctly. The following functions
2462 * help storing these values in unused, available fields in the
2463 * pending message. This makes dynamic memory allocation unnecessary.
2465 static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2467 msg_set_seqno(buf_msg(buf), seqno);
2470 static u32 get_fragm_size(struct sk_buff *buf)
2472 return msg_ack(buf_msg(buf));
2475 static void set_fragm_size(struct sk_buff *buf, u32 sz)
2477 msg_set_ack(buf_msg(buf), sz);
2480 static u32 get_expected_frags(struct sk_buff *buf)
2482 return msg_bcast_ack(buf_msg(buf));
2485 static void set_expected_frags(struct sk_buff *buf, u32 exp)
2487 msg_set_bcast_ack(buf_msg(buf), exp);
2490 static u32 get_timer_cnt(struct sk_buff *buf)
2492 return msg_reroute_cnt(buf_msg(buf));
2495 static void incr_timer_cnt(struct sk_buff *buf)
2497 msg_incr_reroute_cnt(buf_msg(buf));
2501 * tipc_link_recv_fragment(): Called with node lock on. Returns
2502 * the reassembled buffer if message is complete.
2504 int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2505 struct tipc_msg **m)
2507 struct sk_buff *prev = NULL;
2508 struct sk_buff *fbuf = *fb;
2509 struct tipc_msg *fragm = buf_msg(fbuf);
2510 struct sk_buff *pbuf = *pending;
2511 u32 long_msg_seq_no = msg_long_msgno(fragm);
2513 *fb = NULL;
2515 /* Is there an incomplete message waiting for this fragment? */
2516 while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) ||
2517 (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2518 prev = pbuf;
2519 pbuf = pbuf->next;
2522 if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2523 struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2524 u32 msg_sz = msg_size(imsg);
2525 u32 fragm_sz = msg_data_sz(fragm);
2526 u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
2527 u32 max = TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
2528 if (msg_type(imsg) == TIPC_MCAST_MSG)
2529 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2530 if (msg_size(imsg) > max) {
2531 kfree_skb(fbuf);
2532 return 0;
2534 pbuf = tipc_buf_acquire(msg_size(imsg));
2535 if (pbuf != NULL) {
2536 pbuf->next = *pending;
2537 *pending = pbuf;
2538 skb_copy_to_linear_data(pbuf, imsg,
2539 msg_data_sz(fragm));
2540 /* Prepare buffer for subsequent fragments. */
2541 set_long_msg_seqno(pbuf, long_msg_seq_no);
2542 set_fragm_size(pbuf, fragm_sz);
2543 set_expected_frags(pbuf, exp_fragm_cnt - 1);
2544 } else {
2545 pr_debug("Link unable to reassemble fragmented message\n");
2546 kfree_skb(fbuf);
2547 return -1;
2549 kfree_skb(fbuf);
2550 return 0;
2551 } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2552 u32 dsz = msg_data_sz(fragm);
2553 u32 fsz = get_fragm_size(pbuf);
2554 u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2555 u32 exp_frags = get_expected_frags(pbuf) - 1;
2556 skb_copy_to_linear_data_offset(pbuf, crs,
2557 msg_data(fragm), dsz);
2558 kfree_skb(fbuf);
2560 /* Is message complete? */
2561 if (exp_frags == 0) {
2562 if (prev)
2563 prev->next = pbuf->next;
2564 else
2565 *pending = pbuf->next;
2566 msg_reset_reroute_cnt(buf_msg(pbuf));
2567 *fb = pbuf;
2568 *m = buf_msg(pbuf);
2569 return 1;
2571 set_expected_frags(pbuf, exp_frags);
2572 return 0;
2574 kfree_skb(fbuf);
2575 return 0;
2579 * link_check_defragm_bufs - flush stale incoming message fragments
2580 * @l_ptr: pointer to link
2582 static void link_check_defragm_bufs(struct tipc_link *l_ptr)
2584 struct sk_buff *prev = NULL;
2585 struct sk_buff *next = NULL;
2586 struct sk_buff *buf = l_ptr->defragm_buf;
2588 if (!buf)
2589 return;
2590 if (!link_working_working(l_ptr))
2591 return;
2592 while (buf) {
2593 u32 cnt = get_timer_cnt(buf);
2595 next = buf->next;
2596 if (cnt < 4) {
2597 incr_timer_cnt(buf);
2598 prev = buf;
2599 } else {
2600 if (prev)
2601 prev->next = buf->next;
2602 else
2603 l_ptr->defragm_buf = buf->next;
2604 kfree_skb(buf);
2606 buf = next;
2610 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2612 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2613 return;
2615 l_ptr->tolerance = tolerance;
2616 l_ptr->continuity_interval =
2617 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2618 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2621 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2623 /* Data messages from this node, inclusive FIRST_FRAGM */
2624 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2625 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2626 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2627 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
2628 /* Transiting data messages,inclusive FIRST_FRAGM */
2629 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2630 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2631 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2632 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
2633 l_ptr->queue_limit[CONN_MANAGER] = 1200;
2634 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2635 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2636 /* FRAGMENT and LAST_FRAGMENT packets */
2637 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2641 * link_find_link - locate link by name
2642 * @name: ptr to link name string
2643 * @node: ptr to area to be filled with ptr to associated node
2645 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2646 * this also prevents link deletion.
2648 * Returns pointer to link (or 0 if invalid link name).
2650 static struct tipc_link *link_find_link(const char *name,
2651 struct tipc_node **node)
2653 struct tipc_link_name link_name_parts;
2654 struct tipc_bearer *b_ptr;
2655 struct tipc_link *l_ptr;
2657 if (!link_name_validate(name, &link_name_parts))
2658 return NULL;
2660 b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
2661 if (!b_ptr)
2662 return NULL;
2664 *node = tipc_node_find(link_name_parts.addr_peer);
2665 if (!*node)
2666 return NULL;
2668 l_ptr = (*node)->links[b_ptr->identity];
2669 if (!l_ptr || strcmp(l_ptr->name, name))
2670 return NULL;
2672 return l_ptr;
2676 * link_value_is_valid -- validate proposed link tolerance/priority/window
2678 * @cmd: value type (TIPC_CMD_SET_LINK_*)
2679 * @new_value: the new value
2681 * Returns 1 if value is within range, 0 if not.
2683 static int link_value_is_valid(u16 cmd, u32 new_value)
2685 switch (cmd) {
2686 case TIPC_CMD_SET_LINK_TOL:
2687 return (new_value >= TIPC_MIN_LINK_TOL) &&
2688 (new_value <= TIPC_MAX_LINK_TOL);
2689 case TIPC_CMD_SET_LINK_PRI:
2690 return (new_value <= TIPC_MAX_LINK_PRI);
2691 case TIPC_CMD_SET_LINK_WINDOW:
2692 return (new_value >= TIPC_MIN_LINK_WIN) &&
2693 (new_value <= TIPC_MAX_LINK_WIN);
2695 return 0;
2699 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2700 * @name: ptr to link, bearer, or media name
2701 * @new_value: new value of link, bearer, or media setting
2702 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2704 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
2706 * Returns 0 if value updated and negative value on error.
2708 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
2710 struct tipc_node *node;
2711 struct tipc_link *l_ptr;
2712 struct tipc_bearer *b_ptr;
2713 struct tipc_media *m_ptr;
2715 l_ptr = link_find_link(name, &node);
2716 if (l_ptr) {
2718 * acquire node lock for tipc_link_send_proto_msg().
2719 * see "TIPC locking policy" in net.c.
2721 tipc_node_lock(node);
2722 switch (cmd) {
2723 case TIPC_CMD_SET_LINK_TOL:
2724 link_set_supervision_props(l_ptr, new_value);
2725 tipc_link_send_proto_msg(l_ptr,
2726 STATE_MSG, 0, 0, new_value, 0, 0);
2727 break;
2728 case TIPC_CMD_SET_LINK_PRI:
2729 l_ptr->priority = new_value;
2730 tipc_link_send_proto_msg(l_ptr,
2731 STATE_MSG, 0, 0, 0, new_value, 0);
2732 break;
2733 case TIPC_CMD_SET_LINK_WINDOW:
2734 tipc_link_set_queue_limits(l_ptr, new_value);
2735 break;
2737 tipc_node_unlock(node);
2738 return 0;
2741 b_ptr = tipc_bearer_find(name);
2742 if (b_ptr) {
2743 switch (cmd) {
2744 case TIPC_CMD_SET_LINK_TOL:
2745 b_ptr->tolerance = new_value;
2746 return 0;
2747 case TIPC_CMD_SET_LINK_PRI:
2748 b_ptr->priority = new_value;
2749 return 0;
2750 case TIPC_CMD_SET_LINK_WINDOW:
2751 b_ptr->window = new_value;
2752 return 0;
2754 return -EINVAL;
2757 m_ptr = tipc_media_find(name);
2758 if (!m_ptr)
2759 return -ENODEV;
2760 switch (cmd) {
2761 case TIPC_CMD_SET_LINK_TOL:
2762 m_ptr->tolerance = new_value;
2763 return 0;
2764 case TIPC_CMD_SET_LINK_PRI:
2765 m_ptr->priority = new_value;
2766 return 0;
2767 case TIPC_CMD_SET_LINK_WINDOW:
2768 m_ptr->window = new_value;
2769 return 0;
2771 return -EINVAL;
2774 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2775 u16 cmd)
2777 struct tipc_link_config *args;
2778 u32 new_value;
2779 int res;
2781 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2782 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2784 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2785 new_value = ntohl(args->value);
2787 if (!link_value_is_valid(cmd, new_value))
2788 return tipc_cfg_reply_error_string(
2789 "cannot change, value invalid");
2791 if (!strcmp(args->name, tipc_bclink_name)) {
2792 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2793 (tipc_bclink_set_queue_limits(new_value) == 0))
2794 return tipc_cfg_reply_none();
2795 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2796 " (cannot change setting on broadcast link)");
2799 read_lock_bh(&tipc_net_lock);
2800 res = link_cmd_set_value(args->name, new_value, cmd);
2801 read_unlock_bh(&tipc_net_lock);
2802 if (res)
2803 return tipc_cfg_reply_error_string("cannot change link setting");
2805 return tipc_cfg_reply_none();
2809 * link_reset_statistics - reset link statistics
2810 * @l_ptr: pointer to link
2812 static void link_reset_statistics(struct tipc_link *l_ptr)
2814 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2815 l_ptr->stats.sent_info = l_ptr->next_out_no;
2816 l_ptr->stats.recv_info = l_ptr->next_in_no;
2819 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2821 char *link_name;
2822 struct tipc_link *l_ptr;
2823 struct tipc_node *node;
2825 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2826 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2828 link_name = (char *)TLV_DATA(req_tlv_area);
2829 if (!strcmp(link_name, tipc_bclink_name)) {
2830 if (tipc_bclink_reset_stats())
2831 return tipc_cfg_reply_error_string("link not found");
2832 return tipc_cfg_reply_none();
2835 read_lock_bh(&tipc_net_lock);
2836 l_ptr = link_find_link(link_name, &node);
2837 if (!l_ptr) {
2838 read_unlock_bh(&tipc_net_lock);
2839 return tipc_cfg_reply_error_string("link not found");
2842 tipc_node_lock(node);
2843 link_reset_statistics(l_ptr);
2844 tipc_node_unlock(node);
2845 read_unlock_bh(&tipc_net_lock);
2846 return tipc_cfg_reply_none();
2850 * percent - convert count to a percentage of total (rounding up or down)
2852 static u32 percent(u32 count, u32 total)
2854 return (count * 100 + (total / 2)) / total;
2858 * tipc_link_stats - print link statistics
2859 * @name: link name
2860 * @buf: print buffer area
2861 * @buf_size: size of print buffer area
2863 * Returns length of print buffer data string (or 0 if error)
2865 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2867 struct tipc_link *l;
2868 struct tipc_stats *s;
2869 struct tipc_node *node;
2870 char *status;
2871 u32 profile_total = 0;
2872 int ret;
2874 if (!strcmp(name, tipc_bclink_name))
2875 return tipc_bclink_stats(buf, buf_size);
2877 read_lock_bh(&tipc_net_lock);
2878 l = link_find_link(name, &node);
2879 if (!l) {
2880 read_unlock_bh(&tipc_net_lock);
2881 return 0;
2883 tipc_node_lock(node);
2884 s = &l->stats;
2886 if (tipc_link_is_active(l))
2887 status = "ACTIVE";
2888 else if (tipc_link_is_up(l))
2889 status = "STANDBY";
2890 else
2891 status = "DEFUNCT";
2893 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
2894 " %s MTU:%u Priority:%u Tolerance:%u ms"
2895 " Window:%u packets\n",
2896 l->name, status, l->max_pkt, l->priority,
2897 l->tolerance, l->queue_limit[0]);
2899 ret += tipc_snprintf(buf + ret, buf_size - ret,
2900 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2901 l->next_in_no - s->recv_info, s->recv_fragments,
2902 s->recv_fragmented, s->recv_bundles,
2903 s->recv_bundled);
2905 ret += tipc_snprintf(buf + ret, buf_size - ret,
2906 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2907 l->next_out_no - s->sent_info, s->sent_fragments,
2908 s->sent_fragmented, s->sent_bundles,
2909 s->sent_bundled);
2911 profile_total = s->msg_length_counts;
2912 if (!profile_total)
2913 profile_total = 1;
2915 ret += tipc_snprintf(buf + ret, buf_size - ret,
2916 " TX profile sample:%u packets average:%u octets\n"
2917 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2918 "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2919 s->msg_length_counts,
2920 s->msg_lengths_total / profile_total,
2921 percent(s->msg_length_profile[0], profile_total),
2922 percent(s->msg_length_profile[1], profile_total),
2923 percent(s->msg_length_profile[2], profile_total),
2924 percent(s->msg_length_profile[3], profile_total),
2925 percent(s->msg_length_profile[4], profile_total),
2926 percent(s->msg_length_profile[5], profile_total),
2927 percent(s->msg_length_profile[6], profile_total));
2929 ret += tipc_snprintf(buf + ret, buf_size - ret,
2930 " RX states:%u probes:%u naks:%u defs:%u"
2931 " dups:%u\n", s->recv_states, s->recv_probes,
2932 s->recv_nacks, s->deferred_recv, s->duplicates);
2934 ret += tipc_snprintf(buf + ret, buf_size - ret,
2935 " TX states:%u probes:%u naks:%u acks:%u"
2936 " dups:%u\n", s->sent_states, s->sent_probes,
2937 s->sent_nacks, s->sent_acks, s->retransmitted);
2939 ret += tipc_snprintf(buf + ret, buf_size - ret,
2940 " Congestion bearer:%u link:%u Send queue"
2941 " max:%u avg:%u\n", s->bearer_congs, s->link_congs,
2942 s->max_queue_sz, s->queue_sz_counts ?
2943 (s->accu_queue_sz / s->queue_sz_counts) : 0);
2945 tipc_node_unlock(node);
2946 read_unlock_bh(&tipc_net_lock);
2947 return ret;
2950 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2952 struct sk_buff *buf;
2953 struct tlv_desc *rep_tlv;
2954 int str_len;
2955 int pb_len;
2956 char *pb;
2958 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2959 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2961 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
2962 if (!buf)
2963 return NULL;
2965 rep_tlv = (struct tlv_desc *)buf->data;
2966 pb = TLV_DATA(rep_tlv);
2967 pb_len = ULTRA_STRING_MAX_LEN;
2968 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2969 pb, pb_len);
2970 if (!str_len) {
2971 kfree_skb(buf);
2972 return tipc_cfg_reply_error_string("link not found");
2974 str_len += 1; /* for "\0" */
2975 skb_put(buf, TLV_SPACE(str_len));
2976 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2978 return buf;
2982 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
2983 * @dest: network address of destination node
2984 * @selector: used to select from set of active links
2986 * If no active link can be found, uses default maximum packet size.
2988 u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2990 struct tipc_node *n_ptr;
2991 struct tipc_link *l_ptr;
2992 u32 res = MAX_PKT_DEFAULT;
2994 if (dest == tipc_own_addr)
2995 return MAX_MSG_SIZE;
2997 read_lock_bh(&tipc_net_lock);
2998 n_ptr = tipc_node_find(dest);
2999 if (n_ptr) {
3000 tipc_node_lock(n_ptr);
3001 l_ptr = n_ptr->active_links[selector & 1];
3002 if (l_ptr)
3003 res = l_ptr->max_pkt;
3004 tipc_node_unlock(n_ptr);
3006 read_unlock_bh(&tipc_net_lock);
3007 return res;
3010 static void link_print(struct tipc_link *l_ptr, const char *str)
3012 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name);
3014 if (link_working_unknown(l_ptr))
3015 pr_cont(":WU\n");
3016 else if (link_reset_reset(l_ptr))
3017 pr_cont(":RR\n");
3018 else if (link_reset_unknown(l_ptr))
3019 pr_cont(":RU\n");
3020 else if (link_working_working(l_ptr))
3021 pr_cont(":WW\n");
3022 else
3023 pr_cont("\n");