[PATCH] PG_uncached is ia64 only
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / rxrpc / peer.c
blobed38f5b17c1bd462ae25107322344fd7b5e201b0
1 /* peer.c: Rx RPC peer management
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <rxrpc/rxrpc.h>
16 #include <rxrpc/transport.h>
17 #include <rxrpc/peer.h>
18 #include <rxrpc/connection.h>
19 #include <rxrpc/call.h>
20 #include <rxrpc/message.h>
21 #include <linux/udp.h>
22 #include <linux/ip.h>
23 #include <net/sock.h>
24 #include <asm/uaccess.h>
25 #include <asm/div64.h>
26 #include "internal.h"
28 __RXACCT_DECL(atomic_t rxrpc_peer_count);
29 LIST_HEAD(rxrpc_peers);
30 DECLARE_RWSEM(rxrpc_peers_sem);
31 unsigned long rxrpc_peer_timeout = 12 * 60 * 60;
33 static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer);
35 static void __rxrpc_peer_timeout(rxrpc_timer_t *timer)
37 struct rxrpc_peer *peer =
38 list_entry(timer, struct rxrpc_peer, timeout);
40 _debug("Rx PEER TIMEOUT [%p{u=%d}]", peer, atomic_read(&peer->usage));
42 rxrpc_peer_do_timeout(peer);
45 static const struct rxrpc_timer_ops rxrpc_peer_timer_ops = {
46 .timed_out = __rxrpc_peer_timeout,
49 /*****************************************************************************/
51 * create a peer record
53 static int __rxrpc_create_peer(struct rxrpc_transport *trans, __be32 addr,
54 struct rxrpc_peer **_peer)
56 struct rxrpc_peer *peer;
58 _enter("%p,%08x", trans, ntohl(addr));
60 /* allocate and initialise a peer record */
61 peer = kmalloc(sizeof(struct rxrpc_peer), GFP_KERNEL);
62 if (!peer) {
63 _leave(" = -ENOMEM");
64 return -ENOMEM;
67 memset(peer, 0, sizeof(struct rxrpc_peer));
68 atomic_set(&peer->usage, 1);
70 INIT_LIST_HEAD(&peer->link);
71 INIT_LIST_HEAD(&peer->proc_link);
72 INIT_LIST_HEAD(&peer->conn_idlist);
73 INIT_LIST_HEAD(&peer->conn_active);
74 INIT_LIST_HEAD(&peer->conn_graveyard);
75 spin_lock_init(&peer->conn_gylock);
76 init_waitqueue_head(&peer->conn_gy_waitq);
77 rwlock_init(&peer->conn_idlock);
78 rwlock_init(&peer->conn_lock);
79 atomic_set(&peer->conn_count, 0);
80 spin_lock_init(&peer->lock);
81 rxrpc_timer_init(&peer->timeout, &rxrpc_peer_timer_ops);
83 peer->addr.s_addr = addr;
85 peer->trans = trans;
86 peer->ops = trans->peer_ops;
88 __RXACCT(atomic_inc(&rxrpc_peer_count));
89 *_peer = peer;
90 _leave(" = 0 (%p)", peer);
92 return 0;
93 } /* end __rxrpc_create_peer() */
95 /*****************************************************************************/
97 * find a peer record on the specified transport
98 * - returns (if successful) with peer record usage incremented
99 * - resurrects it from the graveyard if found there
101 int rxrpc_peer_lookup(struct rxrpc_transport *trans, __be32 addr,
102 struct rxrpc_peer **_peer)
104 struct rxrpc_peer *peer, *candidate = NULL;
105 struct list_head *_p;
106 int ret;
108 _enter("%p{%hu},%08x", trans, trans->port, ntohl(addr));
110 /* [common case] search the transport's active list first */
111 read_lock(&trans->peer_lock);
112 list_for_each(_p, &trans->peer_active) {
113 peer = list_entry(_p, struct rxrpc_peer, link);
114 if (peer->addr.s_addr == addr)
115 goto found_active;
117 read_unlock(&trans->peer_lock);
119 /* [uncommon case] not active - create a candidate for a new record */
120 ret = __rxrpc_create_peer(trans, addr, &candidate);
121 if (ret < 0) {
122 _leave(" = %d", ret);
123 return ret;
126 /* search the active list again, just in case it appeared whilst we
127 * were busy */
128 write_lock(&trans->peer_lock);
129 list_for_each(_p, &trans->peer_active) {
130 peer = list_entry(_p, struct rxrpc_peer, link);
131 if (peer->addr.s_addr == addr)
132 goto found_active_second_chance;
135 /* search the transport's graveyard list */
136 spin_lock(&trans->peer_gylock);
137 list_for_each(_p, &trans->peer_graveyard) {
138 peer = list_entry(_p, struct rxrpc_peer, link);
139 if (peer->addr.s_addr == addr)
140 goto found_in_graveyard;
142 spin_unlock(&trans->peer_gylock);
144 /* we can now add the new candidate to the list
145 * - tell the application layer that this peer has been added
147 rxrpc_get_transport(trans);
148 peer = candidate;
149 candidate = NULL;
151 if (peer->ops && peer->ops->adding) {
152 ret = peer->ops->adding(peer);
153 if (ret < 0) {
154 write_unlock(&trans->peer_lock);
155 __RXACCT(atomic_dec(&rxrpc_peer_count));
156 kfree(peer);
157 rxrpc_put_transport(trans);
158 _leave(" = %d", ret);
159 return ret;
163 atomic_inc(&trans->peer_count);
165 make_active:
166 list_add_tail(&peer->link, &trans->peer_active);
168 success_uwfree:
169 write_unlock(&trans->peer_lock);
171 if (candidate) {
172 __RXACCT(atomic_dec(&rxrpc_peer_count));
173 kfree(candidate);
176 if (list_empty(&peer->proc_link)) {
177 down_write(&rxrpc_peers_sem);
178 list_add_tail(&peer->proc_link, &rxrpc_peers);
179 up_write(&rxrpc_peers_sem);
182 success:
183 *_peer = peer;
185 _leave(" = 0 (%p{u=%d cc=%d})",
186 peer,
187 atomic_read(&peer->usage),
188 atomic_read(&peer->conn_count));
189 return 0;
191 /* handle the peer being found in the active list straight off */
192 found_active:
193 rxrpc_get_peer(peer);
194 read_unlock(&trans->peer_lock);
195 goto success;
197 /* handle resurrecting a peer from the graveyard */
198 found_in_graveyard:
199 rxrpc_get_peer(peer);
200 rxrpc_get_transport(peer->trans);
201 rxrpc_krxtimod_del_timer(&peer->timeout);
202 list_del_init(&peer->link);
203 spin_unlock(&trans->peer_gylock);
204 goto make_active;
206 /* handle finding the peer on the second time through the active
207 * list */
208 found_active_second_chance:
209 rxrpc_get_peer(peer);
210 goto success_uwfree;
212 } /* end rxrpc_peer_lookup() */
214 /*****************************************************************************/
216 * finish with a peer record
217 * - it gets sent to the graveyard from where it can be resurrected or timed
218 * out
220 void rxrpc_put_peer(struct rxrpc_peer *peer)
222 struct rxrpc_transport *trans = peer->trans;
224 _enter("%p{cc=%d a=%08x}",
225 peer,
226 atomic_read(&peer->conn_count),
227 ntohl(peer->addr.s_addr));
229 /* sanity check */
230 if (atomic_read(&peer->usage) <= 0)
231 BUG();
233 write_lock(&trans->peer_lock);
234 spin_lock(&trans->peer_gylock);
235 if (likely(!atomic_dec_and_test(&peer->usage))) {
236 spin_unlock(&trans->peer_gylock);
237 write_unlock(&trans->peer_lock);
238 _leave("");
239 return;
242 /* move to graveyard queue */
243 list_del(&peer->link);
244 write_unlock(&trans->peer_lock);
246 list_add_tail(&peer->link, &trans->peer_graveyard);
248 BUG_ON(!list_empty(&peer->conn_active));
250 rxrpc_krxtimod_add_timer(&peer->timeout, rxrpc_peer_timeout * HZ);
252 spin_unlock(&trans->peer_gylock);
254 rxrpc_put_transport(trans);
256 _leave(" [killed]");
257 } /* end rxrpc_put_peer() */
259 /*****************************************************************************/
261 * handle a peer timing out in the graveyard
262 * - called from krxtimod
264 static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer)
266 struct rxrpc_transport *trans = peer->trans;
268 _enter("%p{u=%d cc=%d a=%08x}",
269 peer,
270 atomic_read(&peer->usage),
271 atomic_read(&peer->conn_count),
272 ntohl(peer->addr.s_addr));
274 BUG_ON(atomic_read(&peer->usage) < 0);
276 /* remove from graveyard if still dead */
277 spin_lock(&trans->peer_gylock);
278 if (atomic_read(&peer->usage) == 0)
279 list_del_init(&peer->link);
280 else
281 peer = NULL;
282 spin_unlock(&trans->peer_gylock);
284 if (!peer) {
285 _leave("");
286 return; /* resurrected */
289 /* clear all connections on this peer */
290 rxrpc_conn_clearall(peer);
292 BUG_ON(!list_empty(&peer->conn_active));
293 BUG_ON(!list_empty(&peer->conn_graveyard));
295 /* inform the application layer */
296 if (peer->ops && peer->ops->discarding)
297 peer->ops->discarding(peer);
299 if (!list_empty(&peer->proc_link)) {
300 down_write(&rxrpc_peers_sem);
301 list_del(&peer->proc_link);
302 up_write(&rxrpc_peers_sem);
305 __RXACCT(atomic_dec(&rxrpc_peer_count));
306 kfree(peer);
308 /* if the graveyard is now empty, wake up anyone waiting for that */
309 if (atomic_dec_and_test(&trans->peer_count))
310 wake_up(&trans->peer_gy_waitq);
312 _leave(" [destroyed]");
313 } /* end rxrpc_peer_do_timeout() */
315 /*****************************************************************************/
317 * clear all peer records from a transport endpoint
319 void rxrpc_peer_clearall(struct rxrpc_transport *trans)
321 DECLARE_WAITQUEUE(myself,current);
323 struct rxrpc_peer *peer;
324 int err;
326 _enter("%p",trans);
328 /* there shouldn't be any active peers remaining */
329 BUG_ON(!list_empty(&trans->peer_active));
331 /* manually timeout all peers in the graveyard */
332 spin_lock(&trans->peer_gylock);
333 while (!list_empty(&trans->peer_graveyard)) {
334 peer = list_entry(trans->peer_graveyard.next,
335 struct rxrpc_peer, link);
336 _debug("Clearing peer %p\n", peer);
337 err = rxrpc_krxtimod_del_timer(&peer->timeout);
338 spin_unlock(&trans->peer_gylock);
340 if (err == 0)
341 rxrpc_peer_do_timeout(peer);
343 spin_lock(&trans->peer_gylock);
345 spin_unlock(&trans->peer_gylock);
347 /* wait for the the peer graveyard to be completely cleared */
348 set_current_state(TASK_UNINTERRUPTIBLE);
349 add_wait_queue(&trans->peer_gy_waitq, &myself);
351 while (atomic_read(&trans->peer_count) != 0) {
352 schedule();
353 set_current_state(TASK_UNINTERRUPTIBLE);
356 remove_wait_queue(&trans->peer_gy_waitq, &myself);
357 set_current_state(TASK_RUNNING);
359 _leave("");
360 } /* end rxrpc_peer_clearall() */
362 /*****************************************************************************/
364 * calculate and cache the Round-Trip-Time for a message and its response
366 void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
367 struct rxrpc_message *msg,
368 struct rxrpc_message *resp)
370 unsigned long long rtt;
371 int loop;
373 _enter("%p,%p,%p", peer, msg, resp);
375 /* calculate the latest RTT */
376 rtt = resp->stamp.tv_sec - msg->stamp.tv_sec;
377 rtt *= 1000000UL;
378 rtt += resp->stamp.tv_usec - msg->stamp.tv_usec;
380 /* add to cache */
381 peer->rtt_cache[peer->rtt_point] = rtt;
382 peer->rtt_point++;
383 peer->rtt_point %= RXRPC_RTT_CACHE_SIZE;
385 if (peer->rtt_usage < RXRPC_RTT_CACHE_SIZE)
386 peer->rtt_usage++;
388 /* recalculate RTT */
389 rtt = 0;
390 for (loop = peer->rtt_usage - 1; loop >= 0; loop--)
391 rtt += peer->rtt_cache[loop];
393 do_div(rtt, peer->rtt_usage);
394 peer->rtt = rtt;
396 _leave(" RTT=%lu.%lums",
397 (long) (peer->rtt / 1000), (long) (peer->rtt % 1000));
399 } /* end rxrpc_peer_calculate_rtt() */