[TCP]: Code duplication removal, added tcp_bound_to_half_wnd()
[linux-2.6/mini2440.git] / net / tipc / node.h
blobcd1882654bbb27205307e0f61302c72ff8697f16
1 /*
2 * net/tipc/node.h: Include file for TIPC node management routines
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #ifndef _TIPC_NODE_H
38 #define _TIPC_NODE_H
40 #include "node_subscr.h"
41 #include "addr.h"
42 #include "cluster.h"
43 #include "bearer.h"
45 /**
46 * struct node - TIPC node structure
47 * @addr: network address of node
48 * @lock: spinlock governing access to structure
49 * @owner: pointer to cluster that node belongs to
50 * @next: pointer to next node in sorted list of cluster's nodes
51 * @nsub: list of "node down" subscriptions monitoring node
52 * @active_links: pointers to active links to node
53 * @links: pointers to all links to node
54 * @working_links: number of working links to node (both active and standby)
55 * @link_cnt: number of links to node
56 * @permit_changeover: non-zero if node has redundant links to this system
57 * @routers: bitmap (used for multicluster communication)
58 * @last_router: (used for multicluster communication)
59 * @bclink: broadcast-related info
60 * @supported: non-zero if node supports TIPC b'cast capability
61 * @acked: sequence # of last outbound b'cast message acknowledged by node
62 * @last_in: sequence # of last in-sequence b'cast message received from node
63 * @gap_after: sequence # of last message not requiring a NAK request
64 * @gap_to: sequence # of last message requiring a NAK request
65 * @nack_sync: counter that determines when NAK requests should be sent
66 * @deferred_head: oldest OOS b'cast message received from node
67 * @deferred_tail: newest OOS b'cast message received from node
68 * @defragm: list of partially reassembled b'cast message fragments from node
71 struct node {
72 u32 addr;
73 spinlock_t lock;
74 struct cluster *owner;
75 struct node *next;
76 struct list_head nsub;
77 struct link *active_links[2];
78 struct link *links[MAX_BEARERS];
79 int link_cnt;
80 int working_links;
81 int permit_changeover;
82 u32 routers[512/32];
83 int last_router;
84 struct {
85 int supported;
86 u32 acked;
87 u32 last_in;
88 u32 gap_after;
89 u32 gap_to;
90 u32 nack_sync;
91 struct sk_buff *deferred_head;
92 struct sk_buff *deferred_tail;
93 struct sk_buff *defragm;
94 } bclink;
97 extern struct node *tipc_nodes;
98 extern u32 tipc_own_tag;
100 struct node *tipc_node_create(u32 addr);
101 void tipc_node_delete(struct node *n_ptr);
102 struct node *tipc_node_attach_link(struct link *l_ptr);
103 void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr);
104 void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr);
105 void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr);
106 int tipc_node_has_active_links(struct node *n_ptr);
107 int tipc_node_has_redundant_links(struct node *n_ptr);
108 u32 tipc_node_select_router(struct node *n_ptr, u32 ref);
109 struct node *tipc_node_select_next_hop(u32 addr, u32 selector);
110 int tipc_node_is_up(struct node *n_ptr);
111 void tipc_node_add_router(struct node *n_ptr, u32 router);
112 void tipc_node_remove_router(struct node *n_ptr, u32 router);
113 struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
114 struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
116 static inline struct node *tipc_node_find(u32 addr)
118 if (likely(in_own_cluster(addr)))
119 return tipc_local_nodes[tipc_node(addr)];
120 else if (tipc_addr_domain_valid(addr)) {
121 struct cluster *c_ptr = tipc_cltr_find(addr);
123 if (c_ptr)
124 return c_ptr->nodes[tipc_node(addr)];
126 return NULL;
129 static inline struct node *tipc_node_select(u32 addr, u32 selector)
131 if (likely(in_own_cluster(addr)))
132 return tipc_local_nodes[tipc_node(addr)];
133 return tipc_node_select_next_hop(addr, selector);
136 static inline void tipc_node_lock(struct node *n_ptr)
138 spin_lock_bh(&n_ptr->lock);
141 static inline void tipc_node_unlock(struct node *n_ptr)
143 spin_unlock_bh(&n_ptr->lock);
146 #endif