mfd: Fix build error for tps65911-comparator.c
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / lguest / lguest_user.c
blob948c547b8e9e0d721bb440a5a3e4cc3bf3521067
1 /*P:200 This contains all the /dev/lguest code, whereby the userspace launcher
2 * controls and communicates with the Guest. For example, the first write will
3 * tell us the Guest's memory layout and entry point. A read will run the
4 * Guest until something happens, such as a signal or the Guest doing a NOTIFY
5 * out to the Launcher.
6 :*/
7 #include <linux/uaccess.h>
8 #include <linux/miscdevice.h>
9 #include <linux/fs.h>
10 #include <linux/sched.h>
11 #include <linux/eventfd.h>
12 #include <linux/file.h>
13 #include <linux/slab.h>
14 #include "lg.h"
16 /*L:056
17 * Before we move on, let's jump ahead and look at what the kernel does when
18 * it needs to look up the eventfds. That will complete our picture of how we
19 * use RCU.
21 * The notification value is in cpu->pending_notify: we return true if it went
22 * to an eventfd.
24 bool send_notify_to_eventfd(struct lg_cpu *cpu)
26 unsigned int i;
27 struct lg_eventfd_map *map;
30 * This "rcu_read_lock()" helps track when someone is still looking at
31 * the (RCU-using) eventfds array. It's not actually a lock at all;
32 * indeed it's a noop in many configurations. (You didn't expect me to
33 * explain all the RCU secrets here, did you?)
35 rcu_read_lock();
37 * rcu_dereference is the counter-side of rcu_assign_pointer(); it
38 * makes sure we don't access the memory pointed to by
39 * cpu->lg->eventfds before cpu->lg->eventfds is set. Sounds crazy,
40 * but Alpha allows this! Paul McKenney points out that a really
41 * aggressive compiler could have the same effect:
42 * http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html
44 * So play safe, use rcu_dereference to get the rcu-protected pointer:
46 map = rcu_dereference(cpu->lg->eventfds);
48 * Simple array search: even if they add an eventfd while we do this,
49 * we'll continue to use the old array and just won't see the new one.
51 for (i = 0; i < map->num; i++) {
52 if (map->map[i].addr == cpu->pending_notify) {
53 eventfd_signal(map->map[i].event, 1);
54 cpu->pending_notify = 0;
55 break;
58 /* We're done with the rcu-protected variable cpu->lg->eventfds. */
59 rcu_read_unlock();
61 /* If we cleared the notification, it's because we found a match. */
62 return cpu->pending_notify == 0;
65 /*L:055
66 * One of the more tricksy tricks in the Linux Kernel is a technique called
67 * Read Copy Update. Since one point of lguest is to teach lguest journeyers
68 * about kernel coding, I use it here. (In case you're curious, other purposes
69 * include learning about virtualization and instilling a deep appreciation for
70 * simplicity and puppies).
72 * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we
73 * add new eventfds without ever blocking readers from accessing the array.
74 * The current Launcher only does this during boot, so that never happens. But
75 * Read Copy Update is cool, and adding a lock risks damaging even more puppies
76 * than this code does.
78 * We allocate a brand new one-larger array, copy the old one and add our new
79 * element. Then we make the lg eventfd pointer point to the new array.
80 * That's the easy part: now we need to free the old one, but we need to make
81 * sure no slow CPU somewhere is still looking at it. That's what
82 * synchronize_rcu does for us: waits until every CPU has indicated that it has
83 * moved on to know it's no longer using the old one.
85 * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update.
87 static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
89 struct lg_eventfd_map *new, *old = lg->eventfds;
92 * We don't allow notifications on value 0 anyway (pending_notify of
93 * 0 means "nothing pending").
95 if (!addr)
96 return -EINVAL;
99 * Replace the old array with the new one, carefully: others can
100 * be accessing it at the same time.
102 new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1),
103 GFP_KERNEL);
104 if (!new)
105 return -ENOMEM;
107 /* First make identical copy. */
108 memcpy(new->map, old->map, sizeof(old->map[0]) * old->num);
109 new->num = old->num;
111 /* Now append new entry. */
112 new->map[new->num].addr = addr;
113 new->map[new->num].event = eventfd_ctx_fdget(fd);
114 if (IS_ERR(new->map[new->num].event)) {
115 int err = PTR_ERR(new->map[new->num].event);
116 kfree(new);
117 return err;
119 new->num++;
122 * Now put new one in place: rcu_assign_pointer() is a fancy way of
123 * doing "lg->eventfds = new", but it uses memory barriers to make
124 * absolutely sure that the contents of "new" written above is nailed
125 * down before we actually do the assignment.
127 * We have to think about these kinds of things when we're operating on
128 * live data without locks.
130 rcu_assign_pointer(lg->eventfds, new);
133 * We're not in a big hurry. Wait until no one's looking at old
134 * version, then free it.
136 synchronize_rcu();
137 kfree(old);
139 return 0;
142 /*L:052
143 * Receiving notifications from the Guest is usually done by attaching a
144 * particular LHCALL_NOTIFY value to an event filedescriptor. The eventfd will
145 * become readable when the Guest does an LHCALL_NOTIFY with that value.
147 * This is really convenient for processing each virtqueue in a separate
148 * thread.
150 static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
152 unsigned long addr, fd;
153 int err;
155 if (get_user(addr, input) != 0)
156 return -EFAULT;
157 input++;
158 if (get_user(fd, input) != 0)
159 return -EFAULT;
162 * Just make sure two callers don't add eventfds at once. We really
163 * only need to lock against callers adding to the same Guest, so using
164 * the Big Lguest Lock is overkill. But this is setup, not a fast path.
166 mutex_lock(&lguest_lock);
167 err = add_eventfd(lg, addr, fd);
168 mutex_unlock(&lguest_lock);
170 return err;
173 /*L:050
174 * Sending an interrupt is done by writing LHREQ_IRQ and an interrupt
175 * number to /dev/lguest.
177 static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
179 unsigned long irq;
181 if (get_user(irq, input) != 0)
182 return -EFAULT;
183 if (irq >= LGUEST_IRQS)
184 return -EINVAL;
187 * Next time the Guest runs, the core code will see if it can deliver
188 * this interrupt.
190 set_interrupt(cpu, irq);
191 return 0;
194 /*L:040
195 * Once our Guest is initialized, the Launcher makes it run by reading
196 * from /dev/lguest.
198 static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
200 struct lguest *lg = file->private_data;
201 struct lg_cpu *cpu;
202 unsigned int cpu_id = *o;
204 /* You must write LHREQ_INITIALIZE first! */
205 if (!lg)
206 return -EINVAL;
208 /* Watch out for arbitrary vcpu indexes! */
209 if (cpu_id >= lg->nr_cpus)
210 return -EINVAL;
212 cpu = &lg->cpus[cpu_id];
214 /* If you're not the task which owns the Guest, go away. */
215 if (current != cpu->tsk)
216 return -EPERM;
218 /* If the Guest is already dead, we indicate why */
219 if (lg->dead) {
220 size_t len;
222 /* lg->dead either contains an error code, or a string. */
223 if (IS_ERR(lg->dead))
224 return PTR_ERR(lg->dead);
226 /* We can only return as much as the buffer they read with. */
227 len = min(size, strlen(lg->dead)+1);
228 if (copy_to_user(user, lg->dead, len) != 0)
229 return -EFAULT;
230 return len;
234 * If we returned from read() last time because the Guest sent I/O,
235 * clear the flag.
237 if (cpu->pending_notify)
238 cpu->pending_notify = 0;
240 /* Run the Guest until something interesting happens. */
241 return run_guest(cpu, (unsigned long __user *)user);
244 /*L:025
245 * This actually initializes a CPU. For the moment, a Guest is only
246 * uniprocessor, so "id" is always 0.
248 static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
250 /* We have a limited number the number of CPUs in the lguest struct. */
251 if (id >= ARRAY_SIZE(cpu->lg->cpus))
252 return -EINVAL;
254 /* Set up this CPU's id, and pointer back to the lguest struct. */
255 cpu->id = id;
256 cpu->lg = container_of((cpu - id), struct lguest, cpus[0]);
257 cpu->lg->nr_cpus++;
259 /* Each CPU has a timer it can set. */
260 init_clockdev(cpu);
263 * We need a complete page for the Guest registers: they are accessible
264 * to the Guest and we can only grant it access to whole pages.
266 cpu->regs_page = get_zeroed_page(GFP_KERNEL);
267 if (!cpu->regs_page)
268 return -ENOMEM;
270 /* We actually put the registers at the bottom of the page. */
271 cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs);
274 * Now we initialize the Guest's registers, handing it the start
275 * address.
277 lguest_arch_setup_regs(cpu, start_ip);
280 * We keep a pointer to the Launcher task (ie. current task) for when
281 * other Guests want to wake this one (eg. console input).
283 cpu->tsk = current;
286 * We need to keep a pointer to the Launcher's memory map, because if
287 * the Launcher dies we need to clean it up. If we don't keep a
288 * reference, it is destroyed before close() is called.
290 cpu->mm = get_task_mm(cpu->tsk);
293 * We remember which CPU's pages this Guest used last, for optimization
294 * when the same Guest runs on the same CPU twice.
296 cpu->last_pages = NULL;
298 /* No error == success. */
299 return 0;
302 /*L:020
303 * The initialization write supplies 3 pointer sized (32 or 64 bit) values (in
304 * addition to the LHREQ_INITIALIZE value). These are:
306 * base: The start of the Guest-physical memory inside the Launcher memory.
308 * pfnlimit: The highest (Guest-physical) page number the Guest should be
309 * allowed to access. The Guest memory lives inside the Launcher, so it sets
310 * this to ensure the Guest can only reach its own memory.
312 * start: The first instruction to execute ("eip" in x86-speak).
314 static int initialize(struct file *file, const unsigned long __user *input)
316 /* "struct lguest" contains all we (the Host) know about a Guest. */
317 struct lguest *lg;
318 int err;
319 unsigned long args[3];
322 * We grab the Big Lguest lock, which protects against multiple
323 * simultaneous initializations.
325 mutex_lock(&lguest_lock);
326 /* You can't initialize twice! Close the device and start again... */
327 if (file->private_data) {
328 err = -EBUSY;
329 goto unlock;
332 if (copy_from_user(args, input, sizeof(args)) != 0) {
333 err = -EFAULT;
334 goto unlock;
337 lg = kzalloc(sizeof(*lg), GFP_KERNEL);
338 if (!lg) {
339 err = -ENOMEM;
340 goto unlock;
343 lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL);
344 if (!lg->eventfds) {
345 err = -ENOMEM;
346 goto free_lg;
348 lg->eventfds->num = 0;
350 /* Populate the easy fields of our "struct lguest" */
351 lg->mem_base = (void __user *)args[0];
352 lg->pfn_limit = args[1];
354 /* This is the first cpu (cpu 0) and it will start booting at args[2] */
355 err = lg_cpu_start(&lg->cpus[0], 0, args[2]);
356 if (err)
357 goto free_eventfds;
360 * Initialize the Guest's shadow page tables, using the toplevel
361 * address the Launcher gave us. This allocates memory, so can fail.
363 err = init_guest_pagetable(lg);
364 if (err)
365 goto free_regs;
367 /* We keep our "struct lguest" in the file's private_data. */
368 file->private_data = lg;
370 mutex_unlock(&lguest_lock);
372 /* And because this is a write() call, we return the length used. */
373 return sizeof(args);
375 free_regs:
376 /* FIXME: This should be in free_vcpu */
377 free_page(lg->cpus[0].regs_page);
378 free_eventfds:
379 kfree(lg->eventfds);
380 free_lg:
381 kfree(lg);
382 unlock:
383 mutex_unlock(&lguest_lock);
384 return err;
387 /*L:010
388 * The first operation the Launcher does must be a write. All writes
389 * start with an unsigned long number: for the first write this must be
390 * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use
391 * writes of other values to send interrupts or set up receipt of notifications.
393 * Note that we overload the "offset" in the /dev/lguest file to indicate what
394 * CPU number we're dealing with. Currently this is always 0 since we only
395 * support uniprocessor Guests, but you can see the beginnings of SMP support
396 * here.
398 static ssize_t write(struct file *file, const char __user *in,
399 size_t size, loff_t *off)
402 * Once the Guest is initialized, we hold the "struct lguest" in the
403 * file private data.
405 struct lguest *lg = file->private_data;
406 const unsigned long __user *input = (const unsigned long __user *)in;
407 unsigned long req;
408 struct lg_cpu *uninitialized_var(cpu);
409 unsigned int cpu_id = *off;
411 /* The first value tells us what this request is. */
412 if (get_user(req, input) != 0)
413 return -EFAULT;
414 input++;
416 /* If you haven't initialized, you must do that first. */
417 if (req != LHREQ_INITIALIZE) {
418 if (!lg || (cpu_id >= lg->nr_cpus))
419 return -EINVAL;
420 cpu = &lg->cpus[cpu_id];
422 /* Once the Guest is dead, you can only read() why it died. */
423 if (lg->dead)
424 return -ENOENT;
427 switch (req) {
428 case LHREQ_INITIALIZE:
429 return initialize(file, input);
430 case LHREQ_IRQ:
431 return user_send_irq(cpu, input);
432 case LHREQ_EVENTFD:
433 return attach_eventfd(lg, input);
434 default:
435 return -EINVAL;
439 /*L:060
440 * The final piece of interface code is the close() routine. It reverses
441 * everything done in initialize(). This is usually called because the
442 * Launcher exited.
444 * Note that the close routine returns 0 or a negative error number: it can't
445 * really fail, but it can whine. I blame Sun for this wart, and K&R C for
446 * letting them do it.
448 static int close(struct inode *inode, struct file *file)
450 struct lguest *lg = file->private_data;
451 unsigned int i;
453 /* If we never successfully initialized, there's nothing to clean up */
454 if (!lg)
455 return 0;
458 * We need the big lock, to protect from inter-guest I/O and other
459 * Launchers initializing guests.
461 mutex_lock(&lguest_lock);
463 /* Free up the shadow page tables for the Guest. */
464 free_guest_pagetable(lg);
466 for (i = 0; i < lg->nr_cpus; i++) {
467 /* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */
468 hrtimer_cancel(&lg->cpus[i].hrt);
469 /* We can free up the register page we allocated. */
470 free_page(lg->cpus[i].regs_page);
472 * Now all the memory cleanups are done, it's safe to release
473 * the Launcher's memory management structure.
475 mmput(lg->cpus[i].mm);
478 /* Release any eventfds they registered. */
479 for (i = 0; i < lg->eventfds->num; i++)
480 eventfd_ctx_put(lg->eventfds->map[i].event);
481 kfree(lg->eventfds);
484 * If lg->dead doesn't contain an error code it will be NULL or a
485 * kmalloc()ed string, either of which is ok to hand to kfree().
487 if (!IS_ERR(lg->dead))
488 kfree(lg->dead);
489 /* Free the memory allocated to the lguest_struct */
490 kfree(lg);
491 /* Release lock and exit. */
492 mutex_unlock(&lguest_lock);
494 return 0;
497 /*L:000
498 * Welcome to our journey through the Launcher!
500 * The Launcher is the Host userspace program which sets up, runs and services
501 * the Guest. In fact, many comments in the Drivers which refer to "the Host"
502 * doing things are inaccurate: the Launcher does all the device handling for
503 * the Guest, but the Guest can't know that.
505 * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we
506 * shall see more of that later.
508 * We begin our understanding with the Host kernel interface which the Launcher
509 * uses: reading and writing a character device called /dev/lguest. All the
510 * work happens in the read(), write() and close() routines:
512 static const struct file_operations lguest_fops = {
513 .owner = THIS_MODULE,
514 .release = close,
515 .write = write,
516 .read = read,
517 .llseek = default_llseek,
521 * This is a textbook example of a "misc" character device. Populate a "struct
522 * miscdevice" and register it with misc_register().
524 static struct miscdevice lguest_dev = {
525 .minor = MISC_DYNAMIC_MINOR,
526 .name = "lguest",
527 .fops = &lguest_fops,
530 int __init lguest_device_init(void)
532 return misc_register(&lguest_dev);
535 void __exit lguest_device_remove(void)
537 misc_deregister(&lguest_dev);