1 /*P:300 The I/O mechanism in lguest is simple yet flexible, allowing the Guest
2 * to talk to the Launcher or directly to another Guest. It uses familiar
3 * concepts of DMA and interrupts, plus some neat code stolen from
6 /* Copyright (C) 2006 Rusty Russell IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <linux/types.h>
23 #include <linux/futex.h>
24 #include <linux/jhash.h>
26 #include <linux/highmem.h>
27 #include <linux/uaccess.h>
33 * Getting data in and out of the Guest is quite an art. There are numerous
34 * ways to do it, and they all suck differently. We try to keep things fairly
35 * close to "real" hardware so our Guest's drivers don't look like an alien
36 * visitation in the middle of the Linux code, and yet make sure that Guests
37 * can talk directly to other Guests, not just the Launcher.
39 * To do this, the Guest gives us a key when it binds or sends DMA buffers.
40 * The key corresponds to a "physical" address inside the Guest (ie. a virtual
41 * address inside the Launcher process). We don't, however, use this key
44 * We want Guests which share memory to be able to DMA to each other: two
45 * Launchers can mmap memory the same file, then the Guests can communicate.
46 * Fortunately, the futex code provides us with a way to get a "union
47 * futex_key" corresponding to the memory lying at a virtual address: if the
48 * two processes share memory, the "union futex_key" for that memory will match
49 * even if the memory is mapped at different addresses in each. So we always
50 * convert the keys to "union futex_key"s to compare them.
52 * Before we dive into this though, we need to look at another set of helper
53 * routines used throughout the Host kernel code to access Guest memory.
55 static struct list_head dma_hash
[61];
57 /* An unfortunate side effect of the Linux double-linked list implementation is
58 * that there's no good way to statically initialize an array of linked
60 void lguest_io_init(void)
64 for (i
= 0; i
< ARRAY_SIZE(dma_hash
); i
++)
65 INIT_LIST_HEAD(&dma_hash
[i
]);
68 /* FIXME: allow multi-page lengths. */
69 static int check_dma_list(struct lguest
*lg
, const struct lguest_dma
*dma
)
73 for (i
= 0; i
< LGUEST_MAX_DMA_SECTIONS
; i
++) {
76 if (!lguest_address_ok(lg
, dma
->addr
[i
], dma
->len
[i
]))
78 if (dma
->len
[i
] > PAGE_SIZE
)
80 /* We could do over a page, but is it worth it? */
81 if ((dma
->addr
[i
] % PAGE_SIZE
) + dma
->len
[i
] > PAGE_SIZE
)
87 kill_guest(lg
, "bad DMA entry: %u@%#lx", dma
->len
[i
], dma
->addr
[i
]);
91 /*L:330 This is our hash function, using the wonderful Jenkins hash.
93 * The futex key is a union with three parts: an unsigned long word, a pointer,
94 * and an int "offset". We could use jhash_2words() which takes three u32s.
95 * (Ok, the hash functions are great: the naming sucks though).
97 * It's nice to be portable to 64-bit platforms, so we use the more generic
98 * jhash2(), which takes an array of u32, the number of u32s, and an initial
99 * u32 to roll in. This is uglier, but breaks down to almost the same code on
100 * 32-bit platforms like this one.
102 * We want a position in the array, so we modulo ARRAY_SIZE(dma_hash) (ie. 61).
104 static unsigned int hash(const union futex_key
*key
)
106 return jhash2((u32
*)&key
->both
.word
,
107 (sizeof(key
->both
.word
)+sizeof(key
->both
.ptr
))/4,
109 % ARRAY_SIZE(dma_hash
);
112 /* This is a convenience routine to compare two keys. It's a much bemoaned C
113 * weakness that it doesn't allow '==' on structures or unions, so we have to
114 * open-code it like this. */
115 static inline int key_eq(const union futex_key
*a
, const union futex_key
*b
)
117 return (a
->both
.word
== b
->both
.word
118 && a
->both
.ptr
== b
->both
.ptr
119 && a
->both
.offset
== b
->both
.offset
);
122 /*L:360 OK, when we need to actually free up a Guest's DMA array we do several
123 * things, so we have a convenient function to do it.
125 * The caller must hold a read lock on dmainfo owner's current->mm->mmap_sem
126 * for the drop_futex_key_refs(). */
127 static void unlink_dma(struct lguest_dma_info
*dmainfo
)
129 /* You locked this too, right? */
130 BUG_ON(!mutex_is_locked(&lguest_lock
));
131 /* This is how we know that the entry is free. */
132 dmainfo
->interrupt
= 0;
133 /* Remove it from the hash table. */
134 list_del(&dmainfo
->list
);
135 /* Drop the references we were holding (to the inode or mm). */
136 drop_futex_key_refs(&dmainfo
->key
);
139 /*L:350 This is the routine which we call when the Guest asks to unregister a
140 * DMA array attached to a given key. Returns true if the array was found. */
141 static int unbind_dma(struct lguest
*lg
,
142 const union futex_key
*key
,
147 /* We don't bother with the hash table, just look through all this
148 * Guest's DMA arrays. */
149 for (i
= 0; i
< LGUEST_MAX_DMA
; i
++) {
150 /* In theory it could have more than one array on the same key,
151 * or one array on multiple keys, so we check both */
152 if (key_eq(key
, &lg
->dma
[i
].key
) && dmas
== lg
->dma
[i
].dmas
) {
153 unlink_dma(&lg
->dma
[i
]);
161 /*L:340 BIND_DMA: this is the hypercall which sets up an array of "struct
162 * lguest_dma" for receiving I/O.
164 * The Guest wants to bind an array of "struct lguest_dma"s to a particular key
165 * to receive input. This only happens when the Guest is setting up a new
166 * device, so it doesn't have to be very fast.
168 * It returns 1 on a successful registration (it can fail if we hit the limit
169 * of registrations for this Guest).
171 int bind_dma(struct lguest
*lg
,
172 unsigned long ukey
, unsigned long dmas
, u16 numdmas
, u8 interrupt
)
177 /* Futex code needs the mmap_sem. */
178 struct rw_semaphore
*fshared
= ¤t
->mm
->mmap_sem
;
180 /* Invalid interrupt? (We could kill the guest here). */
181 if (interrupt
>= LGUEST_IRQS
)
184 /* We need to grab the Big Lguest Lock, because other Guests may be
185 * trying to look through this Guest's DMAs to send something while
186 * we're doing this. */
187 mutex_lock(&lguest_lock
);
189 if (get_futex_key((u32 __user
*)ukey
, fshared
, &key
) != 0) {
190 kill_guest(lg
, "bad dma key %#lx", ukey
);
194 /* We want to keep this key valid once we drop mmap_sem, so we have to
195 * hold a reference. */
196 get_futex_key_refs(&key
);
198 /* If the Guest specified an interrupt of 0, that means they want to
199 * unregister this array of "struct lguest_dma"s. */
201 ret
= unbind_dma(lg
, &key
, dmas
);
203 /* Look through this Guest's dma array for an unused entry. */
204 for (i
= 0; i
< LGUEST_MAX_DMA
; i
++) {
205 /* If the interrupt is non-zero, the entry is already
207 if (lg
->dma
[i
].interrupt
)
210 /* OK, a free one! Fill on our details. */
211 lg
->dma
[i
].dmas
= dmas
;
212 lg
->dma
[i
].num_dmas
= numdmas
;
213 lg
->dma
[i
].next_dma
= 0;
214 lg
->dma
[i
].key
= key
;
215 lg
->dma
[i
].guestid
= lg
->guestid
;
216 lg
->dma
[i
].interrupt
= interrupt
;
218 /* Now we add it to the hash table: the position
219 * depends on the futex key that we got. */
220 list_add(&lg
->dma
[i
].list
, &dma_hash
[hash(&key
)]);
226 /* If we didn't find a slot to put the key in, drop the reference
228 drop_futex_key_refs(&key
);
230 /* Unlock and out. */
232 mutex_unlock(&lguest_lock
);
236 /*L:385 Note that our routines to access a different Guest's memory are called
237 * lgread_other() and lgwrite_other(): these names emphasize that they are only
238 * used when the Guest is *not* the current Guest.
240 * The interface for copying from another process's memory is called
241 * access_process_vm(), with a final argument of 0 for a read, and 1 for a
244 * We need lgread_other() to read the destination Guest's "struct lguest_dma"
246 static int lgread_other(struct lguest
*lg
,
247 void *buf
, u32 addr
, unsigned bytes
)
249 if (!lguest_address_ok(lg
, addr
, bytes
)
250 || access_process_vm(lg
->tsk
, addr
, buf
, bytes
, 0) != bytes
) {
251 memset(buf
, 0, bytes
);
252 kill_guest(lg
, "bad address in registered DMA struct");
258 /* "lgwrite()" to another Guest: used to update the destination "used_len" once
259 * we've transferred data into the buffer. */
260 static int lgwrite_other(struct lguest
*lg
, u32 addr
,
261 const void *buf
, unsigned bytes
)
263 if (!lguest_address_ok(lg
, addr
, bytes
)
264 || (access_process_vm(lg
->tsk
, addr
, (void *)buf
, bytes
, 1)
266 kill_guest(lg
, "bad address writing to registered DMA");
272 /*L:400 This is the generic engine which copies from a source "struct
273 * lguest_dma" from this Guest into another Guest's "struct lguest_dma". The
274 * destination Guest's pages have already been mapped, as contained in the
277 * If you're wondering if there's a nice "copy from one process to another"
278 * routine, so was I. But Linux isn't really set up to copy between two
279 * unrelated processes, so we have to write it ourselves.
281 static u32
copy_data(struct lguest
*srclg
,
282 const struct lguest_dma
*src
,
283 const struct lguest_dma
*dst
,
284 struct page
*pages
[])
286 unsigned int totlen
, si
, di
, srcoff
, dstoff
;
289 /* We return the total length transferred. */
292 /* We keep indexes into the source and destination "struct lguest_dma",
293 * and an offset within each region. */
297 /* We loop until the source or destination is exhausted. */
298 while (si
< LGUEST_MAX_DMA_SECTIONS
&& src
->len
[si
]
299 && di
< LGUEST_MAX_DMA_SECTIONS
&& dst
->len
[di
]) {
300 /* We can only transfer the rest of the src buffer, or as much
301 * as will fit into the destination buffer. */
302 u32 len
= min(src
->len
[si
] - srcoff
, dst
->len
[di
] - dstoff
);
304 /* For systems using "highmem" we need to use kmap() to access
305 * the page we want. We often use the same page over and over,
306 * so rather than kmap() it on every loop, we set the maddr
307 * pointer to NULL when we need to move to the next
308 * destination page. */
310 maddr
= kmap(pages
[di
]);
312 /* Copy directly from (this Guest's) source address to the
313 * destination Guest's kmap()ed buffer. Note that maddr points
314 * to the start of the page: we need to add the offset of the
315 * destination address and offset within the buffer. */
317 /* FIXME: This is not completely portable. I looked at
318 * copy_to_user_page(), and some arch's seem to need special
319 * flushes. x86 is fine. */
320 if (copy_from_user(maddr
+ (dst
->addr
[di
] + dstoff
)%PAGE_SIZE
,
321 (void __user
*)src
->addr
[si
], len
) != 0) {
322 /* If a copy failed, it's the source's fault. */
323 kill_guest(srclg
, "bad address in sending DMA");
328 /* Increment the total and src & dst offsets */
333 /* Presumably we reached the end of the src or dest buffers: */
334 if (srcoff
== src
->len
[si
]) {
335 /* Move to the next buffer at offset 0 */
339 if (dstoff
== dst
->len
[di
]) {
340 /* We need to unmap that destination page and reset
341 * maddr ready for the next one. */
349 /* If we still had a page mapped at the end, unmap now. */
356 /*L:390 This is how we transfer a "struct lguest_dma" from the source Guest
357 * (the current Guest which called SEND_DMA) to another Guest. */
358 static u32
do_dma(struct lguest
*srclg
, const struct lguest_dma
*src
,
359 struct lguest
*dstlg
, const struct lguest_dma
*dst
)
363 struct page
*pages
[LGUEST_MAX_DMA_SECTIONS
];
365 /* We check that both source and destination "struct lguest_dma"s are
366 * within the bounds of the source and destination Guests */
367 if (!check_dma_list(dstlg
, dst
) || !check_dma_list(srclg
, src
))
370 /* We need to map the pages which correspond to each parts of
371 * destination buffer. */
372 for (i
= 0; i
< LGUEST_MAX_DMA_SECTIONS
; i
++) {
373 if (dst
->len
[i
] == 0)
375 /* get_user_pages() is a complicated function, especially since
376 * we only want a single page. But it works, and returns the
377 * number of pages. Note that we're holding the destination's
378 * mmap_sem, as get_user_pages() requires. */
379 if (get_user_pages(dstlg
->tsk
, dstlg
->mm
,
380 dst
->addr
[i
], 1, 1, 1, pages
+i
, NULL
)
382 /* This means the destination gave us a bogus buffer */
383 kill_guest(dstlg
, "Error mapping DMA pages");
389 /* Now copy the data until we run out of src or dst. */
390 ret
= copy_data(srclg
, src
, dst
, pages
);
398 /*L:380 Transferring data from one Guest to another is not as simple as I'd
399 * like. We've found the "struct lguest_dma_info" bound to the same address as
400 * the send, we need to copy into it.
402 * This function returns true if the destination array was empty. */
403 static int dma_transfer(struct lguest
*srclg
,
405 struct lguest_dma_info
*dst
)
407 struct lguest_dma dst_dma
, src_dma
;
408 struct lguest
*dstlg
;
411 /* From the "struct lguest_dma_info" we found in the hash, grab the
413 dstlg
= &lguests
[dst
->guestid
];
414 /* Read in the source "struct lguest_dma" handed to SEND_DMA. */
415 lgread(srclg
, &src_dma
, udma
, sizeof(src_dma
));
417 /* We need the destination's mmap_sem, and we already hold the source's
418 * mmap_sem for the futex key lookup. Normally this would suggest that
419 * we could deadlock if the destination Guest was trying to send to
420 * this source Guest at the same time, which is another reason that all
421 * I/O is done under the big lguest_lock. */
422 down_read(&dstlg
->mm
->mmap_sem
);
424 /* Look through the destination DMA array for an available buffer. */
425 for (i
= 0; i
< dst
->num_dmas
; i
++) {
426 /* We keep a "next_dma" pointer which often helps us avoid
427 * looking at lots of previously-filled entries. */
428 dma
= (dst
->next_dma
+ i
) % dst
->num_dmas
;
429 if (!lgread_other(dstlg
, &dst_dma
,
430 dst
->dmas
+ dma
* sizeof(struct lguest_dma
),
434 if (!dst_dma
.used_len
)
438 /* If we found a buffer, we do the actual data copy. */
439 if (i
!= dst
->num_dmas
) {
440 unsigned long used_lenp
;
443 ret
= do_dma(srclg
, &src_dma
, dstlg
, &dst_dma
);
444 /* Put used length in the source "struct lguest_dma"'s used_len
445 * field. It's a little tricky to figure out where that is,
448 udma
+offsetof(struct lguest_dma
, used_len
), ret
);
449 /* Tranferring 0 bytes is OK if the source buffer was empty. */
450 if (ret
== 0 && src_dma
.len
[0] != 0)
453 /* The destination Guest might be running on a different CPU:
454 * we have to make sure that it will see the "used_len" field
455 * change to non-zero *after* it sees the data we copied into
456 * the buffer. Hence a write memory barrier. */
458 /* Figuring out where the destination's used_len field for this
459 * "struct lguest_dma" in the array is also a little ugly. */
460 used_lenp
= dst
->dmas
461 + dma
* sizeof(struct lguest_dma
)
462 + offsetof(struct lguest_dma
, used_len
);
463 lgwrite_other(dstlg
, used_lenp
, &ret
, sizeof(ret
));
464 /* Move the cursor for next time. */
467 up_read(&dstlg
->mm
->mmap_sem
);
469 /* We trigger the destination interrupt, even if the destination was
470 * empty and we didn't transfer anything: this gives them a chance to
471 * wake up and refill. */
472 set_bit(dst
->interrupt
, dstlg
->irqs_pending
);
473 /* Wake up the destination process. */
474 wake_up_process(dstlg
->tsk
);
475 /* If we passed the last "struct lguest_dma", the receive had no
477 return i
== dst
->num_dmas
;
480 up_read(&dstlg
->mm
->mmap_sem
);
484 /*L:370 This is the counter-side to the BIND_DMA hypercall; the SEND_DMA
485 * hypercall. We find out who's listening, and send to them. */
486 void send_dma(struct lguest
*lg
, unsigned long ukey
, unsigned long udma
)
490 struct rw_semaphore
*fshared
= ¤t
->mm
->mmap_sem
;
493 mutex_lock(&lguest_lock
);
495 /* Get the futex key for the key the Guest gave us */
496 if (get_futex_key((u32 __user
*)ukey
, fshared
, &key
) != 0) {
497 kill_guest(lg
, "bad sending DMA key");
500 /* Since the key must be a multiple of 4, the futex key uses the lower
501 * bit of the "offset" field (which would always be 0) to indicate a
502 * mapping which is shared with other processes (ie. Guests). */
503 if (key
.shared
.offset
& 1) {
504 struct lguest_dma_info
*i
;
505 /* Look through the hash for other Guests. */
506 list_for_each_entry(i
, &dma_hash
[hash(&key
)], list
) {
507 /* Don't send to ourselves. */
508 if (i
->guestid
== lg
->guestid
)
510 if (!key_eq(&key
, &i
->key
))
513 /* If dma_transfer() tells us the destination has no
514 * available buffers, we increment "empty". */
515 empty
+= dma_transfer(lg
, udma
, i
);
518 /* If the destination is empty, we release our locks and
519 * give the destination Guest a brief chance to restock. */
521 /* Give any recipients one chance to restock. */
522 up_read(¤t
->mm
->mmap_sem
);
523 mutex_unlock(&lguest_lock
);
524 /* Next time, we won't try again. */
529 /* Private mapping: Guest is sending to its Launcher. We set
530 * the "dma_is_pending" flag so that the main loop will exit
531 * and the Launcher's read() from /dev/lguest will return. */
532 lg
->dma_is_pending
= 1;
533 lg
->pending_dma
= udma
;
534 lg
->pending_key
= ukey
;
538 mutex_unlock(&lguest_lock
);
542 void release_all_dma(struct lguest
*lg
)
546 BUG_ON(!mutex_is_locked(&lguest_lock
));
548 down_read(&lg
->mm
->mmap_sem
);
549 for (i
= 0; i
< LGUEST_MAX_DMA
; i
++) {
550 if (lg
->dma
[i
].interrupt
)
551 unlink_dma(&lg
->dma
[i
]);
553 up_read(&lg
->mm
->mmap_sem
);
556 /*M:007 We only return a single DMA buffer to the Launcher, but it would be
557 * more efficient to return a pointer to the entire array of DMA buffers, which
558 * it can cache and choose one whenever it wants.
560 * Currently the Launcher uses a write to /dev/lguest, and the return value is
561 * the address of the DMA structure with the interrupt number placed in
562 * dma->used_len. If we wanted to return the entire array, we need to return
563 * the address, array size and interrupt number: this seems to require an
566 /*L:320 This routine looks for a DMA buffer registered by the Guest on the
567 * given key (using the BIND_DMA hypercall). */
568 unsigned long get_dma_buffer(struct lguest
*lg
,
569 unsigned long ukey
, unsigned long *interrupt
)
571 unsigned long ret
= 0;
573 struct lguest_dma_info
*i
;
574 struct rw_semaphore
*fshared
= ¤t
->mm
->mmap_sem
;
576 /* Take the Big Lguest Lock to stop other Guests sending this Guest DMA
577 * at the same time. */
578 mutex_lock(&lguest_lock
);
579 /* To match between Guests sharing the same underlying memory we steal
580 * code from the futex infrastructure. This requires that we hold the
581 * "mmap_sem" for our process (the Launcher), and pass it to the futex
585 /* This can fail if it's not a valid address, or if the address is not
586 * divisible by 4 (the futex code needs that, we don't really). */
587 if (get_futex_key((u32 __user
*)ukey
, fshared
, &key
) != 0) {
588 kill_guest(lg
, "bad registered DMA buffer");
591 /* Search the hash table for matching entries (the Launcher can only
592 * send to its own Guest for the moment, so the entry must be for this
594 list_for_each_entry(i
, &dma_hash
[hash(&key
)], list
) {
595 if (key_eq(&key
, &i
->key
) && i
->guestid
== lg
->guestid
) {
597 /* Look through the registered DMA array for an
598 * available buffer. */
599 for (j
= 0; j
< i
->num_dmas
; j
++) {
600 struct lguest_dma dma
;
602 ret
= i
->dmas
+ j
* sizeof(struct lguest_dma
);
603 lgread(lg
, &dma
, ret
, sizeof(dma
));
604 if (dma
.used_len
== 0)
607 /* Store the interrupt the Guest wants when the buffer
609 *interrupt
= i
->interrupt
;
615 mutex_unlock(&lguest_lock
);
620 /*L:410 This really has completed the Launcher. Not only have we now finished
621 * the longest chapter in our journey, but this also means we are over halfway
624 * Enough prevaricating around the bush: it is time for us to dive into the
625 * core of the Host, in "make Host".