2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
3 * Licensed under the GPL
6 #include "linux/completion.h"
7 #include "linux/interrupt.h"
8 #include "linux/list.h"
9 #include "linux/mutex.h"
10 #include "linux/workqueue.h"
11 #include "asm/atomic.h"
18 struct list_head list
;
21 struct completion done
;
25 struct list_head pending
;
26 struct list_head connections
;
30 struct port_list
*port
;
36 struct list_head list
;
41 struct port_list
*port
;
44 static irqreturn_t
pipe_interrupt(int irq
, void *data
)
46 struct connection
*conn
= data
;
49 fd
= os_rcv_fd(conn
->socket
[0], &conn
->helper_pid
);
54 printk(KERN_ERR
"pipe_interrupt : os_rcv_fd returned %d\n",
56 os_close_file(conn
->fd
);
59 list_del(&conn
->list
);
62 list_add(&conn
->list
, &conn
->port
->connections
);
64 complete(&conn
->port
->done
);
68 #define NO_WAITER_MSG \
70 "There are currently no UML consoles waiting for port connections.\n" \
71 "Either disconnect from one to make it available or activate some more\n" \
72 "by enabling more consoles in the UML /etc/inittab.\n" \
75 static int port_accept(struct port_list
*port
)
77 struct connection
*conn
;
78 int fd
, socket
[2], pid
;
80 fd
= port_connection(port
->fd
, socket
, &pid
);
83 printk(KERN_ERR
"port_accept : port_connection "
84 "returned %d\n", -fd
);
88 conn
= kmalloc(sizeof(*conn
), GFP_ATOMIC
);
90 printk(KERN_ERR
"port_accept : failed to allocate "
94 *conn
= ((struct connection
)
95 { .list
= LIST_HEAD_INIT(conn
->list
),
97 .socket
= { socket
[0], socket
[1] },
101 if (um_request_irq(TELNETD_IRQ
, socket
[0], IRQ_READ
, pipe_interrupt
,
102 IRQF_DISABLED
| IRQF_SHARED
| IRQF_SAMPLE_RANDOM
,
104 printk(KERN_ERR
"port_accept : failed to get IRQ for "
109 if (atomic_read(&port
->wait_count
) == 0) {
110 os_write_file(fd
, NO_WAITER_MSG
, sizeof(NO_WAITER_MSG
));
111 printk(KERN_ERR
"No one waiting for port\n");
113 list_add(&conn
->list
, &port
->pending
);
120 os_kill_process(pid
, 1);
125 static DEFINE_MUTEX(ports_mutex
);
126 static LIST_HEAD(ports
);
128 static void port_work_proc(struct work_struct
*unused
)
130 struct port_list
*port
;
131 struct list_head
*ele
;
134 local_irq_save(flags
);
135 list_for_each(ele
, &ports
) {
136 port
= list_entry(ele
, struct port_list
, list
);
137 if (!port
->has_connection
)
140 reactivate_fd(port
->fd
, ACCEPT_IRQ
);
141 while (port_accept(port
))
143 port
->has_connection
= 0;
145 local_irq_restore(flags
);
148 DECLARE_WORK(port_work
, port_work_proc
);
150 static irqreturn_t
port_interrupt(int irq
, void *data
)
152 struct port_list
*port
= data
;
154 port
->has_connection
= 1;
155 schedule_work(&port_work
);
159 void *port_data(int port_num
)
161 struct list_head
*ele
;
162 struct port_list
*port
;
163 struct port_dev
*dev
= NULL
;
166 mutex_lock(&ports_mutex
);
167 list_for_each(ele
, &ports
) {
168 port
= list_entry(ele
, struct port_list
, list
);
169 if (port
->port
== port_num
)
172 port
= kmalloc(sizeof(struct port_list
), GFP_KERNEL
);
174 printk(KERN_ERR
"Allocation of port list failed\n");
178 fd
= port_listen_fd(port_num
);
180 printk(KERN_ERR
"binding to port %d failed, errno = %d\n",
185 if (um_request_irq(ACCEPT_IRQ
, fd
, IRQ_READ
, port_interrupt
,
186 IRQF_DISABLED
| IRQF_SHARED
| IRQF_SAMPLE_RANDOM
,
188 printk(KERN_ERR
"Failed to get IRQ for port %d\n", port_num
);
192 *port
= ((struct port_list
)
193 { .list
= LIST_HEAD_INIT(port
->list
),
194 .wait_count
= ATOMIC_INIT(0),
198 .pending
= LIST_HEAD_INIT(port
->pending
),
199 .connections
= LIST_HEAD_INIT(port
->connections
) });
200 spin_lock_init(&port
->lock
);
201 init_completion(&port
->done
);
202 list_add(&port
->list
, &ports
);
205 dev
= kmalloc(sizeof(struct port_dev
), GFP_KERNEL
);
207 printk(KERN_ERR
"Allocation of port device entry failed\n");
211 *dev
= ((struct port_dev
) { .port
= port
,
213 .telnetd_pid
= -1 });
221 mutex_unlock(&ports_mutex
);
225 int port_wait(void *data
)
227 struct port_dev
*dev
= data
;
228 struct connection
*conn
;
229 struct port_list
*port
= dev
->port
;
232 atomic_inc(&port
->wait_count
);
235 if (wait_for_completion_interruptible(&port
->done
))
238 spin_lock(&port
->lock
);
240 conn
= list_entry(port
->connections
.next
, struct connection
,
242 list_del(&conn
->list
);
243 spin_unlock(&port
->lock
);
245 os_shutdown_socket(conn
->socket
[0], 1, 1);
246 os_close_file(conn
->socket
[0]);
247 os_shutdown_socket(conn
->socket
[1], 1, 1);
248 os_close_file(conn
->socket
[1]);
250 /* This is done here because freeing an IRQ can't be done
251 * within the IRQ handler. So, pipe_interrupt always ups
252 * the semaphore regardless of whether it got a successful
253 * connection. Then we loop here throwing out failed
254 * connections until a good one is found.
256 free_irq(TELNETD_IRQ
, conn
);
260 os_close_file(conn
->fd
);
265 dev
->helper_pid
= conn
->helper_pid
;
266 dev
->telnetd_pid
= conn
->telnetd_pid
;
269 atomic_dec(&port
->wait_count
);
273 void port_remove_dev(void *d
)
275 struct port_dev
*dev
= d
;
277 if (dev
->helper_pid
!= -1)
278 os_kill_process(dev
->helper_pid
, 0);
279 if (dev
->telnetd_pid
!= -1)
280 os_kill_process(dev
->telnetd_pid
, 1);
281 dev
->helper_pid
= -1;
282 dev
->telnetd_pid
= -1;
285 void port_kern_free(void *d
)
287 struct port_dev
*dev
= d
;
289 port_remove_dev(dev
);
293 static void free_port(void)
295 struct list_head
*ele
;
296 struct port_list
*port
;
298 list_for_each(ele
, &ports
) {
299 port
= list_entry(ele
, struct port_list
, list
);
300 free_irq_by_fd(port
->fd
);
301 os_close_file(port
->fd
);
305 __uml_exitcall(free_port
);