kbuild: fix section mismatch check for vmlinux
[linux-2.6/pdupreez.git] / net / rxrpc / ar-peer.c
blobd399de4a7fe2b98cdbe3011868181a703d3e2d50
1 /* RxRPC remote transport endpoint management
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/net.h>
14 #include <linux/skbuff.h>
15 #include <linux/udp.h>
16 #include <linux/in.h>
17 #include <linux/in6.h>
18 #include <linux/icmp.h>
19 #include <net/sock.h>
20 #include <net/af_rxrpc.h>
21 #include <net/ip.h>
22 #include "ar-internal.h"
24 static LIST_HEAD(rxrpc_peers);
25 static DEFINE_RWLOCK(rxrpc_peer_lock);
26 static DECLARE_WAIT_QUEUE_HEAD(rxrpc_peer_wq);
28 static void rxrpc_destroy_peer(struct work_struct *work);
31 * allocate a new peer
33 static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
34 gfp_t gfp)
36 struct rxrpc_peer *peer;
38 _enter("");
40 peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
41 if (peer) {
42 INIT_WORK(&peer->destroyer, &rxrpc_destroy_peer);
43 INIT_LIST_HEAD(&peer->link);
44 INIT_LIST_HEAD(&peer->error_targets);
45 spin_lock_init(&peer->lock);
46 atomic_set(&peer->usage, 1);
47 peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
48 memcpy(&peer->srx, srx, sizeof(*srx));
50 peer->mtu = peer->if_mtu = 65535;
52 if (srx->transport.family == AF_INET) {
53 peer->hdrsize = sizeof(struct iphdr);
54 switch (srx->transport_type) {
55 case SOCK_DGRAM:
56 peer->hdrsize += sizeof(struct udphdr);
57 break;
58 default:
59 BUG();
60 break;
62 } else {
63 BUG();
66 peer->hdrsize += sizeof(struct rxrpc_header);
67 peer->maxdata = peer->mtu - peer->hdrsize;
70 _leave(" = %p", peer);
71 return peer;
75 * obtain a remote transport endpoint for the specified address
77 struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp)
79 struct rxrpc_peer *peer, *candidate;
80 const char *new = "old";
81 int usage;
83 _enter("{%d,%d,%u.%u.%u.%u+%hu}",
84 srx->transport_type,
85 srx->transport_len,
86 NIPQUAD(srx->transport.sin.sin_addr),
87 ntohs(srx->transport.sin.sin_port));
89 /* search the peer list first */
90 read_lock_bh(&rxrpc_peer_lock);
91 list_for_each_entry(peer, &rxrpc_peers, link) {
92 _debug("check PEER %d { u=%d t=%d l=%d }",
93 peer->debug_id,
94 atomic_read(&peer->usage),
95 peer->srx.transport_type,
96 peer->srx.transport_len);
98 if (atomic_read(&peer->usage) > 0 &&
99 peer->srx.transport_type == srx->transport_type &&
100 peer->srx.transport_len == srx->transport_len &&
101 memcmp(&peer->srx.transport,
102 &srx->transport,
103 srx->transport_len) == 0)
104 goto found_extant_peer;
106 read_unlock_bh(&rxrpc_peer_lock);
108 /* not yet present - create a candidate for a new record and then
109 * redo the search */
110 candidate = rxrpc_alloc_peer(srx, gfp);
111 if (!candidate) {
112 _leave(" = -ENOMEM");
113 return ERR_PTR(-ENOMEM);
116 write_lock_bh(&rxrpc_peer_lock);
118 list_for_each_entry(peer, &rxrpc_peers, link) {
119 if (atomic_read(&peer->usage) > 0 &&
120 peer->srx.transport_type == srx->transport_type &&
121 peer->srx.transport_len == srx->transport_len &&
122 memcmp(&peer->srx.transport,
123 &srx->transport,
124 srx->transport_len) == 0)
125 goto found_extant_second;
128 /* we can now add the new candidate to the list */
129 peer = candidate;
130 candidate = NULL;
132 list_add_tail(&peer->link, &rxrpc_peers);
133 write_unlock_bh(&rxrpc_peer_lock);
134 new = "new";
136 success:
137 _net("PEER %s %d {%d,%u,%u.%u.%u.%u+%hu}",
138 new,
139 peer->debug_id,
140 peer->srx.transport_type,
141 peer->srx.transport.family,
142 NIPQUAD(peer->srx.transport.sin.sin_addr),
143 ntohs(peer->srx.transport.sin.sin_port));
145 _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
146 return peer;
148 /* we found the peer in the list immediately */
149 found_extant_peer:
150 usage = atomic_inc_return(&peer->usage);
151 read_unlock_bh(&rxrpc_peer_lock);
152 goto success;
154 /* we found the peer on the second time through the list */
155 found_extant_second:
156 usage = atomic_inc_return(&peer->usage);
157 write_unlock_bh(&rxrpc_peer_lock);
158 kfree(candidate);
159 goto success;
163 * find the peer associated with a packet
165 struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local,
166 __be32 addr, __be16 port)
168 struct rxrpc_peer *peer;
170 _enter("");
172 /* search the peer list */
173 read_lock_bh(&rxrpc_peer_lock);
175 if (local->srx.transport.family == AF_INET &&
176 local->srx.transport_type == SOCK_DGRAM
178 list_for_each_entry(peer, &rxrpc_peers, link) {
179 if (atomic_read(&peer->usage) > 0 &&
180 peer->srx.transport_type == SOCK_DGRAM &&
181 peer->srx.transport.family == AF_INET &&
182 peer->srx.transport.sin.sin_port == port &&
183 peer->srx.transport.sin.sin_addr.s_addr == addr)
184 goto found_UDP_peer;
187 goto new_UDP_peer;
190 read_unlock_bh(&rxrpc_peer_lock);
191 _leave(" = -EAFNOSUPPORT");
192 return ERR_PTR(-EAFNOSUPPORT);
194 found_UDP_peer:
195 _net("Rx UDP DGRAM from peer %d", peer->debug_id);
196 atomic_inc(&peer->usage);
197 read_unlock_bh(&rxrpc_peer_lock);
198 _leave(" = %p", peer);
199 return peer;
201 new_UDP_peer:
202 _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id);
203 read_unlock_bh(&rxrpc_peer_lock);
204 _leave(" = -EBUSY [new]");
205 return ERR_PTR(-EBUSY);
209 * release a remote transport endpoint
211 void rxrpc_put_peer(struct rxrpc_peer *peer)
213 _enter("%p{u=%d}", peer, atomic_read(&peer->usage));
215 ASSERTCMP(atomic_read(&peer->usage), >, 0);
217 if (likely(!atomic_dec_and_test(&peer->usage))) {
218 _leave(" [in use]");
219 return;
222 rxrpc_queue_work(&peer->destroyer);
223 _leave("");
227 * destroy a remote transport endpoint
229 static void rxrpc_destroy_peer(struct work_struct *work)
231 struct rxrpc_peer *peer =
232 container_of(work, struct rxrpc_peer, destroyer);
234 _enter("%p{%d}", peer, atomic_read(&peer->usage));
236 write_lock_bh(&rxrpc_peer_lock);
237 list_del(&peer->link);
238 write_unlock_bh(&rxrpc_peer_lock);
240 _net("DESTROY PEER %d", peer->debug_id);
241 kfree(peer);
243 if (list_empty(&rxrpc_peers))
244 wake_up_all(&rxrpc_peer_wq);
245 _leave("");
249 * preemptively destroy all the peer records from a transport endpoint rather
250 * than waiting for them to time out
252 void __exit rxrpc_destroy_all_peers(void)
254 DECLARE_WAITQUEUE(myself,current);
256 _enter("");
258 /* we simply have to wait for them to go away */
259 if (!list_empty(&rxrpc_peers)) {
260 set_current_state(TASK_UNINTERRUPTIBLE);
261 add_wait_queue(&rxrpc_peer_wq, &myself);
263 while (!list_empty(&rxrpc_peers)) {
264 schedule();
265 set_current_state(TASK_UNINTERRUPTIBLE);
268 remove_wait_queue(&rxrpc_peer_wq, &myself);
269 set_current_state(TASK_RUNNING);
272 _leave("");