ptrace: fix exit_ptrace() vs ptrace_traceme() race
[linux-2.6/mini2440.git] / net / tipc / discover.c
blob74b7d1e28aec0205bbb405185bc5cba4e179d285
1 /*
2 * net/tipc/discover.c
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include "core.h"
38 #include "dbg.h"
39 #include "link.h"
40 #include "zone.h"
41 #include "discover.h"
42 #include "port.h"
43 #include "name_table.h"
45 #define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */
46 #define TIPC_LINK_REQ_FAST 2000 /* normal delay if bearer has no links */
47 #define TIPC_LINK_REQ_SLOW 600000 /* normal delay if bearer has links */
49 #if 0
50 #define GET_NODE_INFO 300
51 #define GET_NODE_INFO_RESULT 301
52 #define FORWARD_LINK_PROBE 302
53 #define LINK_REQUEST_REJECTED 303
54 #define LINK_REQUEST_ACCEPTED 304
55 #define DROP_LINK_REQUEST 305
56 #define CHECK_LINK_COUNT 306
57 #endif
60 * TODO: Most of the inter-cluster setup stuff should be
61 * rewritten, and be made conformant with specification.
65 /**
66 * struct link_req - information about an ongoing link setup request
67 * @bearer: bearer issuing requests
68 * @dest: destination address for request messages
69 * @buf: request message to be (repeatedly) sent
70 * @timer: timer governing period between requests
71 * @timer_intv: current interval between requests (in ms)
73 struct link_req {
74 struct bearer *bearer;
75 struct tipc_media_addr dest;
76 struct sk_buff *buf;
77 struct timer_list timer;
78 unsigned int timer_intv;
82 #if 0
83 int disc_create_link(const struct tipc_link_create *argv)
86 * Code for inter cluster link setup here
88 return TIPC_OK;
90 #endif
93 * disc_lost_link(): A link has lost contact
96 void tipc_disc_link_event(u32 addr, char *name, int up)
98 if (in_own_cluster(addr))
99 return;
101 * Code for inter cluster link setup here
106 * tipc_disc_init_msg - initialize a link setup message
107 * @type: message type (request or response)
108 * @req_links: number of links associated with message
109 * @dest_domain: network domain of node(s) which should respond to message
110 * @b_ptr: ptr to bearer issuing message
113 static struct sk_buff *tipc_disc_init_msg(u32 type,
114 u32 req_links,
115 u32 dest_domain,
116 struct bearer *b_ptr)
118 struct sk_buff *buf = buf_acquire(DSC_H_SIZE);
119 struct tipc_msg *msg;
121 if (buf) {
122 msg = buf_msg(buf);
123 msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain);
124 msg_set_non_seq(msg, 1);
125 msg_set_req_links(msg, req_links);
126 msg_set_dest_domain(msg, dest_domain);
127 msg_set_bc_netid(msg, tipc_net_id);
128 msg_set_media_addr(msg, &b_ptr->publ.addr);
130 return buf;
134 * disc_dupl_alert - issue node address duplication alert
135 * @b_ptr: pointer to bearer detecting duplication
136 * @node_addr: duplicated node address
137 * @media_addr: media address advertised by duplicated node
140 static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
141 struct tipc_media_addr *media_addr)
143 char node_addr_str[16];
144 char media_addr_str[64];
145 struct print_buf pb;
147 addr_string_fill(node_addr_str, node_addr);
148 tipc_printbuf_init(&pb, media_addr_str, sizeof(media_addr_str));
149 tipc_media_addr_printf(&pb, media_addr);
150 tipc_printbuf_validate(&pb);
151 warn("Duplicate %s using %s seen on <%s>\n",
152 node_addr_str, media_addr_str, b_ptr->publ.name);
156 * tipc_disc_recv_msg - handle incoming link setup message (request or response)
157 * @buf: buffer containing message
158 * @b_ptr: bearer that message arrived on
161 void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
163 struct link *link;
164 struct tipc_media_addr media_addr;
165 struct tipc_msg *msg = buf_msg(buf);
166 u32 dest = msg_dest_domain(msg);
167 u32 orig = msg_prevnode(msg);
168 u32 net_id = msg_bc_netid(msg);
169 u32 type = msg_type(msg);
171 msg_get_media_addr(msg,&media_addr);
172 msg_dbg(msg, "RECV:");
173 buf_discard(buf);
175 if (net_id != tipc_net_id)
176 return;
177 if (!tipc_addr_domain_valid(dest))
178 return;
179 if (!tipc_addr_node_valid(orig))
180 return;
181 if (orig == tipc_own_addr) {
182 if (memcmp(&media_addr, &b_ptr->publ.addr, sizeof(media_addr)))
183 disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
184 return;
186 if (!in_scope(dest, tipc_own_addr))
187 return;
188 if (is_slave(tipc_own_addr) && is_slave(orig))
189 return;
190 if (is_slave(orig) && !in_own_cluster(orig))
191 return;
192 if (in_own_cluster(orig)) {
193 /* Always accept link here */
194 struct sk_buff *rbuf;
195 struct tipc_media_addr *addr;
196 struct tipc_node *n_ptr = tipc_node_find(orig);
197 int link_fully_up;
199 dbg(" in own cluster\n");
200 if (n_ptr == NULL) {
201 n_ptr = tipc_node_create(orig);
202 if (!n_ptr)
203 return;
205 spin_lock_bh(&n_ptr->lock);
206 link = n_ptr->links[b_ptr->identity];
207 if (!link) {
208 dbg("creating link\n");
209 link = tipc_link_create(b_ptr, orig, &media_addr);
210 if (!link) {
211 spin_unlock_bh(&n_ptr->lock);
212 return;
215 addr = &link->media_addr;
216 if (memcmp(addr, &media_addr, sizeof(*addr))) {
217 if (tipc_link_is_up(link) || (!link->started)) {
218 disc_dupl_alert(b_ptr, orig, &media_addr);
219 spin_unlock_bh(&n_ptr->lock);
220 return;
222 warn("Resetting link <%s>, peer interface address changed\n",
223 link->name);
224 memcpy(addr, &media_addr, sizeof(*addr));
225 tipc_link_reset(link);
227 link_fully_up = (link->state == WORKING_WORKING);
228 spin_unlock_bh(&n_ptr->lock);
229 if ((type == DSC_RESP_MSG) || link_fully_up)
230 return;
231 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
232 if (rbuf != NULL) {
233 msg_dbg(buf_msg(rbuf),"SEND:");
234 b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
235 buf_discard(rbuf);
241 * tipc_disc_stop_link_req - stop sending periodic link setup requests
242 * @req: ptr to link request structure
245 void tipc_disc_stop_link_req(struct link_req *req)
247 if (!req)
248 return;
250 k_cancel_timer(&req->timer);
251 k_term_timer(&req->timer);
252 buf_discard(req->buf);
253 kfree(req);
257 * tipc_disc_update_link_req - update frequency of periodic link setup requests
258 * @req: ptr to link request structure
261 void tipc_disc_update_link_req(struct link_req *req)
263 if (!req)
264 return;
266 if (req->timer_intv == TIPC_LINK_REQ_SLOW) {
267 if (!req->bearer->nodes.count) {
268 req->timer_intv = TIPC_LINK_REQ_FAST;
269 k_start_timer(&req->timer, req->timer_intv);
271 } else if (req->timer_intv == TIPC_LINK_REQ_FAST) {
272 if (req->bearer->nodes.count) {
273 req->timer_intv = TIPC_LINK_REQ_SLOW;
274 k_start_timer(&req->timer, req->timer_intv);
276 } else {
277 /* leave timer "as is" if haven't yet reached a "normal" rate */
282 * disc_timeout - send a periodic link setup request
283 * @req: ptr to link request structure
285 * Called whenever a link setup request timer associated with a bearer expires.
288 static void disc_timeout(struct link_req *req)
290 spin_lock_bh(&req->bearer->publ.lock);
292 req->bearer->media->send_msg(req->buf, &req->bearer->publ, &req->dest);
294 if ((req->timer_intv == TIPC_LINK_REQ_SLOW) ||
295 (req->timer_intv == TIPC_LINK_REQ_FAST)) {
296 /* leave timer interval "as is" if already at a "normal" rate */
297 } else {
298 req->timer_intv *= 2;
299 if (req->timer_intv > TIPC_LINK_REQ_FAST)
300 req->timer_intv = TIPC_LINK_REQ_FAST;
301 if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
302 (req->bearer->nodes.count))
303 req->timer_intv = TIPC_LINK_REQ_SLOW;
305 k_start_timer(&req->timer, req->timer_intv);
307 spin_unlock_bh(&req->bearer->publ.lock);
311 * tipc_disc_init_link_req - start sending periodic link setup requests
312 * @b_ptr: ptr to bearer issuing requests
313 * @dest: destination address for request messages
314 * @dest_domain: network domain of node(s) which should respond to message
315 * @req_links: max number of desired links
317 * Returns pointer to link request structure, or NULL if unable to create.
320 struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
321 const struct tipc_media_addr *dest,
322 u32 dest_domain,
323 u32 req_links)
325 struct link_req *req;
327 req = kmalloc(sizeof(*req), GFP_ATOMIC);
328 if (!req)
329 return NULL;
331 req->buf = tipc_disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
332 if (!req->buf) {
333 kfree(req);
334 return NULL;
337 memcpy(&req->dest, dest, sizeof(*dest));
338 req->bearer = b_ptr;
339 req->timer_intv = TIPC_LINK_REQ_INIT;
340 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
341 k_start_timer(&req->timer, req->timer_intv);
342 return req;