kmod: prevent kmod_loop_msg overflow in __request_module()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / tipc / cluster.c
blob689fdefe9d04c58b7f20e2bc4b90b56ad1d60cce
1 /*
2 * net/tipc/cluster.c: TIPC cluster management routines
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include "core.h"
38 #include "cluster.h"
39 #include "addr.h"
40 #include "node_subscr.h"
41 #include "link.h"
42 #include "node.h"
43 #include "net.h"
44 #include "msg.h"
45 #include "bearer.h"
47 static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
48 u32 lower, u32 upper);
49 static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest);
51 struct tipc_node **tipc_local_nodes = NULL;
52 struct tipc_node_map tipc_cltr_bcast_nodes = {0,{0,}};
53 u32 tipc_highest_allowed_slave = 0;
55 struct cluster *tipc_cltr_create(u32 addr)
57 struct _zone *z_ptr;
58 struct cluster *c_ptr;
59 int max_nodes;
61 c_ptr = kzalloc(sizeof(*c_ptr), GFP_ATOMIC);
62 if (c_ptr == NULL) {
63 warn("Cluster creation failure, no memory\n");
64 return NULL;
67 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
68 if (in_own_cluster(addr))
69 max_nodes = LOWEST_SLAVE + tipc_max_slaves;
70 else
71 max_nodes = tipc_max_nodes + 1;
73 c_ptr->nodes = kcalloc(max_nodes + 1, sizeof(void*), GFP_ATOMIC);
74 if (c_ptr->nodes == NULL) {
75 warn("Cluster creation failure, no memory for node area\n");
76 kfree(c_ptr);
77 return NULL;
80 if (in_own_cluster(addr))
81 tipc_local_nodes = c_ptr->nodes;
82 c_ptr->highest_slave = LOWEST_SLAVE - 1;
83 c_ptr->highest_node = 0;
85 z_ptr = tipc_zone_find(tipc_zone(addr));
86 if (!z_ptr) {
87 z_ptr = tipc_zone_create(addr);
89 if (!z_ptr) {
90 kfree(c_ptr->nodes);
91 kfree(c_ptr);
92 return NULL;
95 tipc_zone_attach_cluster(z_ptr, c_ptr);
96 c_ptr->owner = z_ptr;
97 return c_ptr;
100 void tipc_cltr_delete(struct cluster *c_ptr)
102 u32 n_num;
104 if (!c_ptr)
105 return;
106 for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
107 tipc_node_delete(c_ptr->nodes[n_num]);
109 for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
110 tipc_node_delete(c_ptr->nodes[n_num]);
112 kfree(c_ptr->nodes);
113 kfree(c_ptr);
116 u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr)
118 struct tipc_node *n_ptr;
119 u32 n_num = tipc_node(addr) + 1;
121 if (!c_ptr)
122 return addr;
123 for (; n_num <= c_ptr->highest_node; n_num++) {
124 n_ptr = c_ptr->nodes[n_num];
125 if (n_ptr && tipc_node_has_active_links(n_ptr))
126 return n_ptr->addr;
128 for (n_num = 1; n_num < tipc_node(addr); n_num++) {
129 n_ptr = c_ptr->nodes[n_num];
130 if (n_ptr && tipc_node_has_active_links(n_ptr))
131 return n_ptr->addr;
133 return 0;
136 void tipc_cltr_attach_node(struct cluster *c_ptr, struct tipc_node *n_ptr)
138 u32 n_num = tipc_node(n_ptr->addr);
139 u32 max_n_num = tipc_max_nodes;
141 if (in_own_cluster(n_ptr->addr))
142 max_n_num = tipc_highest_allowed_slave;
143 assert(n_num > 0);
144 assert(n_num <= max_n_num);
145 assert(c_ptr->nodes[n_num] == NULL);
146 c_ptr->nodes[n_num] = n_ptr;
147 if (n_num > c_ptr->highest_node)
148 c_ptr->highest_node = n_num;
152 * tipc_cltr_select_router - select router to a cluster
154 * Uses deterministic and fair algorithm.
157 u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref)
159 u32 n_num;
160 u32 ulim = c_ptr->highest_node;
161 u32 mask;
162 u32 tstart;
164 assert(!in_own_cluster(c_ptr->addr));
165 if (!ulim)
166 return 0;
168 /* Start entry must be random */
169 mask = tipc_max_nodes;
170 while (mask > ulim)
171 mask >>= 1;
172 tstart = ref & mask;
173 n_num = tstart;
175 /* Lookup upwards with wrap-around */
176 do {
177 if (tipc_node_is_up(c_ptr->nodes[n_num]))
178 break;
179 } while (++n_num <= ulim);
180 if (n_num > ulim) {
181 n_num = 1;
182 do {
183 if (tipc_node_is_up(c_ptr->nodes[n_num]))
184 break;
185 } while (++n_num < tstart);
186 if (n_num == tstart)
187 return 0;
189 assert(n_num <= ulim);
190 return tipc_node_select_router(c_ptr->nodes[n_num], ref);
194 * tipc_cltr_select_node - select destination node within a remote cluster
196 * Uses deterministic and fair algorithm.
199 struct tipc_node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector)
201 u32 n_num;
202 u32 mask = tipc_max_nodes;
203 u32 start_entry;
205 assert(!in_own_cluster(c_ptr->addr));
206 if (!c_ptr->highest_node)
207 return NULL;
209 /* Start entry must be random */
210 while (mask > c_ptr->highest_node) {
211 mask >>= 1;
213 start_entry = (selector & mask) ? selector & mask : 1u;
214 assert(start_entry <= c_ptr->highest_node);
216 /* Lookup upwards with wrap-around */
217 for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
218 if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
219 return c_ptr->nodes[n_num];
221 for (n_num = 1; n_num < start_entry; n_num++) {
222 if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
223 return c_ptr->nodes[n_num];
225 return NULL;
229 * Routing table management: See description in node.c
232 static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest)
234 u32 size = INT_H_SIZE + data_size;
235 struct sk_buff *buf = buf_acquire(size);
236 struct tipc_msg *msg;
238 if (buf) {
239 msg = buf_msg(buf);
240 memset((char *)msg, 0, size);
241 msg_init(msg, ROUTE_DISTRIBUTOR, 0, INT_H_SIZE, dest);
243 return buf;
246 void tipc_cltr_bcast_new_route(struct cluster *c_ptr, u32 dest,
247 u32 lower, u32 upper)
249 struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
250 struct tipc_msg *msg;
252 if (buf) {
253 msg = buf_msg(buf);
254 msg_set_remote_node(msg, dest);
255 msg_set_type(msg, ROUTE_ADDITION);
256 tipc_cltr_multicast(c_ptr, buf, lower, upper);
257 } else {
258 warn("Memory squeeze: broadcast of new route failed\n");
262 void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest,
263 u32 lower, u32 upper)
265 struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
266 struct tipc_msg *msg;
268 if (buf) {
269 msg = buf_msg(buf);
270 msg_set_remote_node(msg, dest);
271 msg_set_type(msg, ROUTE_REMOVAL);
272 tipc_cltr_multicast(c_ptr, buf, lower, upper);
273 } else {
274 warn("Memory squeeze: broadcast of lost route failed\n");
278 void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest)
280 struct sk_buff *buf;
281 struct tipc_msg *msg;
282 u32 highest = c_ptr->highest_slave;
283 u32 n_num;
284 int send = 0;
286 assert(!is_slave(dest));
287 assert(in_own_cluster(dest));
288 assert(in_own_cluster(c_ptr->addr));
289 if (highest <= LOWEST_SLAVE)
290 return;
291 buf = tipc_cltr_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
292 c_ptr->addr);
293 if (buf) {
294 msg = buf_msg(buf);
295 msg_set_remote_node(msg, c_ptr->addr);
296 msg_set_type(msg, SLAVE_ROUTING_TABLE);
297 for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
298 if (c_ptr->nodes[n_num] &&
299 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
300 send = 1;
301 msg_set_dataoctet(msg, n_num);
304 if (send)
305 tipc_link_send(buf, dest, dest);
306 else
307 buf_discard(buf);
308 } else {
309 warn("Memory squeeze: broadcast of lost route failed\n");
313 void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest)
315 struct sk_buff *buf;
316 struct tipc_msg *msg;
317 u32 highest = c_ptr->highest_node;
318 u32 n_num;
319 int send = 0;
321 if (in_own_cluster(c_ptr->addr))
322 return;
323 assert(!is_slave(dest));
324 assert(in_own_cluster(dest));
325 highest = c_ptr->highest_node;
326 buf = tipc_cltr_prepare_routing_msg(highest + 1, c_ptr->addr);
327 if (buf) {
328 msg = buf_msg(buf);
329 msg_set_remote_node(msg, c_ptr->addr);
330 msg_set_type(msg, EXT_ROUTING_TABLE);
331 for (n_num = 1; n_num <= highest; n_num++) {
332 if (c_ptr->nodes[n_num] &&
333 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
334 send = 1;
335 msg_set_dataoctet(msg, n_num);
338 if (send)
339 tipc_link_send(buf, dest, dest);
340 else
341 buf_discard(buf);
342 } else {
343 warn("Memory squeeze: broadcast of external route failed\n");
347 void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest)
349 struct sk_buff *buf;
350 struct tipc_msg *msg;
351 u32 highest = c_ptr->highest_node;
352 u32 n_num;
353 int send = 0;
355 assert(is_slave(dest));
356 assert(in_own_cluster(c_ptr->addr));
357 buf = tipc_cltr_prepare_routing_msg(highest, c_ptr->addr);
358 if (buf) {
359 msg = buf_msg(buf);
360 msg_set_remote_node(msg, c_ptr->addr);
361 msg_set_type(msg, LOCAL_ROUTING_TABLE);
362 for (n_num = 1; n_num <= highest; n_num++) {
363 if (c_ptr->nodes[n_num] &&
364 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
365 send = 1;
366 msg_set_dataoctet(msg, n_num);
369 if (send)
370 tipc_link_send(buf, dest, dest);
371 else
372 buf_discard(buf);
373 } else {
374 warn("Memory squeeze: broadcast of local route failed\n");
378 void tipc_cltr_recv_routing_table(struct sk_buff *buf)
380 struct tipc_msg *msg = buf_msg(buf);
381 struct cluster *c_ptr;
382 struct tipc_node *n_ptr;
383 unchar *node_table;
384 u32 table_size;
385 u32 router;
386 u32 rem_node = msg_remote_node(msg);
387 u32 z_num;
388 u32 c_num;
389 u32 n_num;
391 c_ptr = tipc_cltr_find(rem_node);
392 if (!c_ptr) {
393 c_ptr = tipc_cltr_create(rem_node);
394 if (!c_ptr) {
395 buf_discard(buf);
396 return;
400 node_table = buf->data + msg_hdr_sz(msg);
401 table_size = msg_size(msg) - msg_hdr_sz(msg);
402 router = msg_prevnode(msg);
403 z_num = tipc_zone(rem_node);
404 c_num = tipc_cluster(rem_node);
406 switch (msg_type(msg)) {
407 case LOCAL_ROUTING_TABLE:
408 assert(is_slave(tipc_own_addr));
409 case EXT_ROUTING_TABLE:
410 for (n_num = 1; n_num < table_size; n_num++) {
411 if (node_table[n_num]) {
412 u32 addr = tipc_addr(z_num, c_num, n_num);
413 n_ptr = c_ptr->nodes[n_num];
414 if (!n_ptr) {
415 n_ptr = tipc_node_create(addr);
417 if (n_ptr)
418 tipc_node_add_router(n_ptr, router);
421 break;
422 case SLAVE_ROUTING_TABLE:
423 assert(!is_slave(tipc_own_addr));
424 assert(in_own_cluster(c_ptr->addr));
425 for (n_num = 1; n_num < table_size; n_num++) {
426 if (node_table[n_num]) {
427 u32 slave_num = n_num + LOWEST_SLAVE;
428 u32 addr = tipc_addr(z_num, c_num, slave_num);
429 n_ptr = c_ptr->nodes[slave_num];
430 if (!n_ptr) {
431 n_ptr = tipc_node_create(addr);
433 if (n_ptr)
434 tipc_node_add_router(n_ptr, router);
437 break;
438 case ROUTE_ADDITION:
439 if (!is_slave(tipc_own_addr)) {
440 assert(!in_own_cluster(c_ptr->addr)
441 || is_slave(rem_node));
442 } else {
443 assert(in_own_cluster(c_ptr->addr)
444 && !is_slave(rem_node));
446 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
447 if (!n_ptr)
448 n_ptr = tipc_node_create(rem_node);
449 if (n_ptr)
450 tipc_node_add_router(n_ptr, router);
451 break;
452 case ROUTE_REMOVAL:
453 if (!is_slave(tipc_own_addr)) {
454 assert(!in_own_cluster(c_ptr->addr)
455 || is_slave(rem_node));
456 } else {
457 assert(in_own_cluster(c_ptr->addr)
458 && !is_slave(rem_node));
460 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
461 if (n_ptr)
462 tipc_node_remove_router(n_ptr, router);
463 break;
464 default:
465 assert(!"Illegal routing manager message received\n");
467 buf_discard(buf);
470 void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router)
472 u32 start_entry;
473 u32 tstop;
474 u32 n_num;
476 if (is_slave(router))
477 return; /* Slave nodes can not be routers */
479 if (in_own_cluster(c_ptr->addr)) {
480 start_entry = LOWEST_SLAVE;
481 tstop = c_ptr->highest_slave;
482 } else {
483 start_entry = 1;
484 tstop = c_ptr->highest_node;
487 for (n_num = start_entry; n_num <= tstop; n_num++) {
488 if (c_ptr->nodes[n_num]) {
489 tipc_node_remove_router(c_ptr->nodes[n_num], router);
495 * tipc_cltr_multicast - multicast message to local nodes
498 static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
499 u32 lower, u32 upper)
501 struct sk_buff *buf_copy;
502 struct tipc_node *n_ptr;
503 u32 n_num;
504 u32 tstop;
506 assert(lower <= upper);
507 assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
508 ((lower >= LOWEST_SLAVE) && (lower <= tipc_highest_allowed_slave)));
509 assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
510 ((upper >= LOWEST_SLAVE) && (upper <= tipc_highest_allowed_slave)));
511 assert(in_own_cluster(c_ptr->addr));
513 tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
514 if (tstop > upper)
515 tstop = upper;
516 for (n_num = lower; n_num <= tstop; n_num++) {
517 n_ptr = c_ptr->nodes[n_num];
518 if (n_ptr && tipc_node_has_active_links(n_ptr)) {
519 buf_copy = skb_copy(buf, GFP_ATOMIC);
520 if (buf_copy == NULL)
521 break;
522 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
523 tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
526 buf_discard(buf);
530 * tipc_cltr_broadcast - broadcast message to all nodes within cluster
533 void tipc_cltr_broadcast(struct sk_buff *buf)
535 struct sk_buff *buf_copy;
536 struct cluster *c_ptr;
537 struct tipc_node *n_ptr;
538 u32 n_num;
539 u32 tstart;
540 u32 tstop;
541 u32 node_type;
543 if (tipc_mode == TIPC_NET_MODE) {
544 c_ptr = tipc_cltr_find(tipc_own_addr);
545 assert(in_own_cluster(c_ptr->addr)); /* For now */
547 /* Send to standard nodes, then repeat loop sending to slaves */
548 tstart = 1;
549 tstop = c_ptr->highest_node;
550 for (node_type = 1; node_type <= 2; node_type++) {
551 for (n_num = tstart; n_num <= tstop; n_num++) {
552 n_ptr = c_ptr->nodes[n_num];
553 if (n_ptr && tipc_node_has_active_links(n_ptr)) {
554 buf_copy = skb_copy(buf, GFP_ATOMIC);
555 if (buf_copy == NULL)
556 goto exit;
557 msg_set_destnode(buf_msg(buf_copy),
558 n_ptr->addr);
559 tipc_link_send(buf_copy, n_ptr->addr,
560 n_ptr->addr);
563 tstart = LOWEST_SLAVE;
564 tstop = c_ptr->highest_slave;
567 exit:
568 buf_discard(buf);
571 int tipc_cltr_init(void)
573 tipc_highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
574 return tipc_cltr_create(tipc_own_addr) ? 0 : -ENOMEM;