1 /* krxiod.c: Rx I/O daemon
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/sched.h>
13 #include <linux/completion.h>
14 #include <linux/spinlock.h>
15 #include <linux/init.h>
16 #include <rxrpc/krxiod.h>
17 #include <rxrpc/transport.h>
18 #include <rxrpc/peer.h>
19 #include <rxrpc/call.h>
22 static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq
);
23 static DECLARE_COMPLETION(rxrpc_krxiod_dead
);
25 static atomic_t rxrpc_krxiod_qcount
= ATOMIC_INIT(0);
27 static LIST_HEAD(rxrpc_krxiod_transportq
);
28 static DEFINE_SPINLOCK(rxrpc_krxiod_transportq_lock
);
30 static LIST_HEAD(rxrpc_krxiod_callq
);
31 static DEFINE_SPINLOCK(rxrpc_krxiod_callq_lock
);
33 static volatile int rxrpc_krxiod_die
;
35 /*****************************************************************************/
39 static int rxrpc_krxiod(void *arg
)
41 DECLARE_WAITQUEUE(krxiod
,current
);
43 printk("Started krxiod %d\n",current
->pid
);
47 /* loop around waiting for work to do */
49 /* wait for work or to be told to exit */
50 _debug("### Begin Wait");
51 if (!atomic_read(&rxrpc_krxiod_qcount
)) {
52 set_current_state(TASK_INTERRUPTIBLE
);
54 add_wait_queue(&rxrpc_krxiod_sleepq
, &krxiod
);
57 set_current_state(TASK_INTERRUPTIBLE
);
58 if (atomic_read(&rxrpc_krxiod_qcount
) ||
60 signal_pending(current
))
66 remove_wait_queue(&rxrpc_krxiod_sleepq
, &krxiod
);
67 set_current_state(TASK_RUNNING
);
69 _debug("### End Wait");
71 /* do work if been given some to do */
72 _debug("### Begin Work");
74 /* see if there's a transport in need of attention */
75 if (!list_empty(&rxrpc_krxiod_transportq
)) {
76 struct rxrpc_transport
*trans
= NULL
;
78 spin_lock_irq(&rxrpc_krxiod_transportq_lock
);
80 if (!list_empty(&rxrpc_krxiod_transportq
)) {
82 rxrpc_krxiod_transportq
.next
,
83 struct rxrpc_transport
,
86 list_del_init(&trans
->krxiodq_link
);
87 atomic_dec(&rxrpc_krxiod_qcount
);
89 /* make sure it hasn't gone away and doesn't go
91 if (atomic_read(&trans
->usage
)>0)
92 rxrpc_get_transport(trans
);
97 spin_unlock_irq(&rxrpc_krxiod_transportq_lock
);
100 rxrpc_trans_receive_packet(trans
);
101 rxrpc_put_transport(trans
);
105 /* see if there's a call in need of attention */
106 if (!list_empty(&rxrpc_krxiod_callq
)) {
107 struct rxrpc_call
*call
= NULL
;
109 spin_lock_irq(&rxrpc_krxiod_callq_lock
);
111 if (!list_empty(&rxrpc_krxiod_callq
)) {
112 call
= list_entry(rxrpc_krxiod_callq
.next
,
115 list_del_init(&call
->rcv_krxiodq_lk
);
116 atomic_dec(&rxrpc_krxiod_qcount
);
118 /* make sure it hasn't gone away and doesn't go
120 if (atomic_read(&call
->usage
) > 0) {
122 " Begin Attend Call %p", call
);
123 rxrpc_get_call(call
);
130 spin_unlock_irq(&rxrpc_krxiod_callq_lock
);
133 rxrpc_call_do_stuff(call
);
134 rxrpc_put_call(call
);
135 _debug("@@@ KRXIOD End Attend Call %p", call
);
139 _debug("### End Work");
143 /* discard pending signals */
144 rxrpc_discard_my_signals();
146 } while (!rxrpc_krxiod_die
);
149 complete_and_exit(&rxrpc_krxiod_dead
, 0);
151 } /* end rxrpc_krxiod() */
153 /*****************************************************************************/
155 * start up a krxiod daemon
157 int __init
rxrpc_krxiod_init(void)
159 return kernel_thread(rxrpc_krxiod
, NULL
, 0);
161 } /* end rxrpc_krxiod_init() */
163 /*****************************************************************************/
165 * kill the krxiod daemon and wait for it to complete
167 void rxrpc_krxiod_kill(void)
169 rxrpc_krxiod_die
= 1;
170 wake_up_all(&rxrpc_krxiod_sleepq
);
171 wait_for_completion(&rxrpc_krxiod_dead
);
173 } /* end rxrpc_krxiod_kill() */
175 /*****************************************************************************/
177 * queue a transport for attention by krxiod
179 void rxrpc_krxiod_queue_transport(struct rxrpc_transport
*trans
)
185 if (list_empty(&trans
->krxiodq_link
)) {
186 spin_lock_irqsave(&rxrpc_krxiod_transportq_lock
, flags
);
188 if (list_empty(&trans
->krxiodq_link
)) {
189 if (atomic_read(&trans
->usage
) > 0) {
190 list_add_tail(&trans
->krxiodq_link
,
191 &rxrpc_krxiod_transportq
);
192 atomic_inc(&rxrpc_krxiod_qcount
);
196 spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock
, flags
);
197 wake_up_all(&rxrpc_krxiod_sleepq
);
202 } /* end rxrpc_krxiod_queue_transport() */
204 /*****************************************************************************/
206 * dequeue a transport from krxiod's attention queue
208 void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport
*trans
)
214 spin_lock_irqsave(&rxrpc_krxiod_transportq_lock
, flags
);
215 if (!list_empty(&trans
->krxiodq_link
)) {
216 list_del_init(&trans
->krxiodq_link
);
217 atomic_dec(&rxrpc_krxiod_qcount
);
219 spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock
, flags
);
223 } /* end rxrpc_krxiod_dequeue_transport() */
225 /*****************************************************************************/
227 * queue a call for attention by krxiod
229 void rxrpc_krxiod_queue_call(struct rxrpc_call
*call
)
233 if (list_empty(&call
->rcv_krxiodq_lk
)) {
234 spin_lock_irqsave(&rxrpc_krxiod_callq_lock
, flags
);
235 if (atomic_read(&call
->usage
) > 0) {
236 list_add_tail(&call
->rcv_krxiodq_lk
,
237 &rxrpc_krxiod_callq
);
238 atomic_inc(&rxrpc_krxiod_qcount
);
240 spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock
, flags
);
242 wake_up_all(&rxrpc_krxiod_sleepq
);
244 } /* end rxrpc_krxiod_queue_call() */
246 /*****************************************************************************/
248 * dequeue a call from krxiod's attention queue
250 void rxrpc_krxiod_dequeue_call(struct rxrpc_call
*call
)
254 spin_lock_irqsave(&rxrpc_krxiod_callq_lock
, flags
);
255 if (!list_empty(&call
->rcv_krxiodq_lk
)) {
256 list_del_init(&call
->rcv_krxiodq_lk
);
257 atomic_dec(&rxrpc_krxiod_qcount
);
259 spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock
, flags
);
261 } /* end rxrpc_krxiod_dequeue_call() */