pinctrl: mcp23s08: generalize irq property handling
[linux-stable.git] / drivers / xen / grant-table.c
blobd6786b87e13b2392c366cfa807c35cf2401c8387
1 /******************************************************************************
2 * grant_table.c
4 * Granting foreign access to our memory reservation.
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
36 #include <linux/sched.h>
37 #include <linux/mm.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/uaccess.h>
41 #include <linux/io.h>
42 #include <linux/delay.h>
43 #include <linux/hardirq.h>
44 #include <linux/workqueue.h>
46 #include <xen/xen.h>
47 #include <xen/interface/xen.h>
48 #include <xen/page.h>
49 #include <xen/grant_table.h>
50 #include <xen/interface/memory.h>
51 #include <xen/hvc-console.h>
52 #include <xen/swiotlb-xen.h>
53 #include <xen/balloon.h>
54 #include <asm/xen/hypercall.h>
55 #include <asm/xen/interface.h>
57 #include <asm/pgtable.h>
58 #include <asm/sync_bitops.h>
60 /* External tools reserve first few grant table entries. */
61 #define NR_RESERVED_ENTRIES 8
62 #define GNTTAB_LIST_END 0xffffffff
64 static grant_ref_t **gnttab_list;
65 static unsigned int nr_grant_frames;
66 static int gnttab_free_count;
67 static grant_ref_t gnttab_free_head;
68 static DEFINE_SPINLOCK(gnttab_list_lock);
69 struct grant_frames xen_auto_xlat_grant_frames;
71 static union {
72 struct grant_entry_v1 *v1;
73 void *addr;
74 } gnttab_shared;
76 /*This is a structure of function pointers for grant table*/
77 struct gnttab_ops {
79 * Mapping a list of frames for storing grant entries. Frames parameter
80 * is used to store grant table address when grant table being setup,
81 * nr_gframes is the number of frames to map grant table. Returning
82 * GNTST_okay means success and negative value means failure.
84 int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
86 * Release a list of frames which are mapped in map_frames for grant
87 * entry status.
89 void (*unmap_frames)(void);
91 * Introducing a valid entry into the grant table, granting the frame of
92 * this grant entry to domain for accessing or transfering. Ref
93 * parameter is reference of this introduced grant entry, domid is id of
94 * granted domain, frame is the page frame to be granted, and flags is
95 * status of the grant entry to be updated.
97 void (*update_entry)(grant_ref_t ref, domid_t domid,
98 unsigned long frame, unsigned flags);
100 * Stop granting a grant entry to domain for accessing. Ref parameter is
101 * reference of a grant entry whose grant access will be stopped,
102 * readonly is not in use in this function. If the grant entry is
103 * currently mapped for reading or writing, just return failure(==0)
104 * directly and don't tear down the grant access. Otherwise, stop grant
105 * access for this entry and return success(==1).
107 int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
109 * Stop granting a grant entry to domain for transfer. Ref parameter is
110 * reference of a grant entry whose grant transfer will be stopped. If
111 * tranfer has not started, just reclaim the grant entry and return
112 * failure(==0). Otherwise, wait for the transfer to complete and then
113 * return the frame.
115 unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
117 * Query the status of a grant entry. Ref parameter is reference of
118 * queried grant entry, return value is the status of queried entry.
119 * Detailed status(writing/reading) can be gotten from the return value
120 * by bit operations.
122 int (*query_foreign_access)(grant_ref_t ref);
125 struct unmap_refs_callback_data {
126 struct completion completion;
127 int result;
130 static const struct gnttab_ops *gnttab_interface;
132 static int grant_table_version;
133 static int grefs_per_grant_frame;
135 static struct gnttab_free_callback *gnttab_free_callback_list;
137 static int gnttab_expand(unsigned int req_entries);
139 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
141 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
143 return &gnttab_list[(entry) / RPP][(entry) % RPP];
145 /* This can be used as an l-value */
146 #define gnttab_entry(entry) (*__gnttab_entry(entry))
148 static int get_free_entries(unsigned count)
150 unsigned long flags;
151 int ref, rc = 0;
152 grant_ref_t head;
154 spin_lock_irqsave(&gnttab_list_lock, flags);
156 if ((gnttab_free_count < count) &&
157 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
158 spin_unlock_irqrestore(&gnttab_list_lock, flags);
159 return rc;
162 ref = head = gnttab_free_head;
163 gnttab_free_count -= count;
164 while (count-- > 1)
165 head = gnttab_entry(head);
166 gnttab_free_head = gnttab_entry(head);
167 gnttab_entry(head) = GNTTAB_LIST_END;
169 spin_unlock_irqrestore(&gnttab_list_lock, flags);
171 return ref;
174 static void do_free_callbacks(void)
176 struct gnttab_free_callback *callback, *next;
178 callback = gnttab_free_callback_list;
179 gnttab_free_callback_list = NULL;
181 while (callback != NULL) {
182 next = callback->next;
183 if (gnttab_free_count >= callback->count) {
184 callback->next = NULL;
185 callback->fn(callback->arg);
186 } else {
187 callback->next = gnttab_free_callback_list;
188 gnttab_free_callback_list = callback;
190 callback = next;
194 static inline void check_free_callbacks(void)
196 if (unlikely(gnttab_free_callback_list))
197 do_free_callbacks();
200 static void put_free_entry(grant_ref_t ref)
202 unsigned long flags;
203 spin_lock_irqsave(&gnttab_list_lock, flags);
204 gnttab_entry(ref) = gnttab_free_head;
205 gnttab_free_head = ref;
206 gnttab_free_count++;
207 check_free_callbacks();
208 spin_unlock_irqrestore(&gnttab_list_lock, flags);
212 * Following applies to gnttab_update_entry_v1.
213 * Introducing a valid entry into the grant table:
214 * 1. Write ent->domid.
215 * 2. Write ent->frame:
216 * GTF_permit_access: Frame to which access is permitted.
217 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
218 * frame, or zero if none.
219 * 3. Write memory barrier (WMB).
220 * 4. Write ent->flags, inc. valid type.
222 static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
223 unsigned long frame, unsigned flags)
225 gnttab_shared.v1[ref].domid = domid;
226 gnttab_shared.v1[ref].frame = frame;
227 wmb();
228 gnttab_shared.v1[ref].flags = flags;
232 * Public grant-issuing interface functions
234 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
235 unsigned long frame, int readonly)
237 gnttab_interface->update_entry(ref, domid, frame,
238 GTF_permit_access | (readonly ? GTF_readonly : 0));
240 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
242 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
243 int readonly)
245 int ref;
247 ref = get_free_entries(1);
248 if (unlikely(ref < 0))
249 return -ENOSPC;
251 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
253 return ref;
255 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
257 static int gnttab_query_foreign_access_v1(grant_ref_t ref)
259 return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
262 int gnttab_query_foreign_access(grant_ref_t ref)
264 return gnttab_interface->query_foreign_access(ref);
266 EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
268 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
270 u16 flags, nflags;
271 u16 *pflags;
273 pflags = &gnttab_shared.v1[ref].flags;
274 nflags = *pflags;
275 do {
276 flags = nflags;
277 if (flags & (GTF_reading|GTF_writing))
278 return 0;
279 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
281 return 1;
284 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
286 return gnttab_interface->end_foreign_access_ref(ref, readonly);
289 int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
291 if (_gnttab_end_foreign_access_ref(ref, readonly))
292 return 1;
293 pr_warn("WARNING: g.e. %#x still in use!\n", ref);
294 return 0;
296 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
298 struct deferred_entry {
299 struct list_head list;
300 grant_ref_t ref;
301 bool ro;
302 uint16_t warn_delay;
303 struct page *page;
305 static LIST_HEAD(deferred_list);
306 static void gnttab_handle_deferred(unsigned long);
307 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
309 static void gnttab_handle_deferred(unsigned long unused)
311 unsigned int nr = 10;
312 struct deferred_entry *first = NULL;
313 unsigned long flags;
315 spin_lock_irqsave(&gnttab_list_lock, flags);
316 while (nr--) {
317 struct deferred_entry *entry
318 = list_first_entry(&deferred_list,
319 struct deferred_entry, list);
321 if (entry == first)
322 break;
323 list_del(&entry->list);
324 spin_unlock_irqrestore(&gnttab_list_lock, flags);
325 if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
326 put_free_entry(entry->ref);
327 if (entry->page) {
328 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
329 entry->ref, page_to_pfn(entry->page));
330 __free_page(entry->page);
331 } else
332 pr_info("freeing g.e. %#x\n", entry->ref);
333 kfree(entry);
334 entry = NULL;
335 } else {
336 if (!--entry->warn_delay)
337 pr_info("g.e. %#x still pending\n", entry->ref);
338 if (!first)
339 first = entry;
341 spin_lock_irqsave(&gnttab_list_lock, flags);
342 if (entry)
343 list_add_tail(&entry->list, &deferred_list);
344 else if (list_empty(&deferred_list))
345 break;
347 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
348 deferred_timer.expires = jiffies + HZ;
349 add_timer(&deferred_timer);
351 spin_unlock_irqrestore(&gnttab_list_lock, flags);
354 static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
355 struct page *page)
357 struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
358 const char *what = KERN_WARNING "leaking";
360 if (entry) {
361 unsigned long flags;
363 entry->ref = ref;
364 entry->ro = readonly;
365 entry->page = page;
366 entry->warn_delay = 60;
367 spin_lock_irqsave(&gnttab_list_lock, flags);
368 list_add_tail(&entry->list, &deferred_list);
369 if (!timer_pending(&deferred_timer)) {
370 deferred_timer.expires = jiffies + HZ;
371 add_timer(&deferred_timer);
373 spin_unlock_irqrestore(&gnttab_list_lock, flags);
374 what = KERN_DEBUG "deferring";
376 printk("%s g.e. %#x (pfn %#lx)\n",
377 what, ref, page ? page_to_pfn(page) : -1);
380 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
381 unsigned long page)
383 if (gnttab_end_foreign_access_ref(ref, readonly)) {
384 put_free_entry(ref);
385 if (page != 0)
386 free_page(page);
387 } else
388 gnttab_add_deferred(ref, readonly,
389 page ? virt_to_page(page) : NULL);
391 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
393 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
395 int ref;
397 ref = get_free_entries(1);
398 if (unlikely(ref < 0))
399 return -ENOSPC;
400 gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
402 return ref;
404 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
406 void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
407 unsigned long pfn)
409 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
411 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
413 static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
415 unsigned long frame;
416 u16 flags;
417 u16 *pflags;
419 pflags = &gnttab_shared.v1[ref].flags;
422 * If a transfer is not even yet started, try to reclaim the grant
423 * reference and return failure (== 0).
425 while (!((flags = *pflags) & GTF_transfer_committed)) {
426 if (sync_cmpxchg(pflags, flags, 0) == flags)
427 return 0;
428 cpu_relax();
431 /* If a transfer is in progress then wait until it is completed. */
432 while (!(flags & GTF_transfer_completed)) {
433 flags = *pflags;
434 cpu_relax();
437 rmb(); /* Read the frame number /after/ reading completion status. */
438 frame = gnttab_shared.v1[ref].frame;
439 BUG_ON(frame == 0);
441 return frame;
444 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
446 return gnttab_interface->end_foreign_transfer_ref(ref);
448 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
450 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
452 unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
453 put_free_entry(ref);
454 return frame;
456 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
458 void gnttab_free_grant_reference(grant_ref_t ref)
460 put_free_entry(ref);
462 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
464 void gnttab_free_grant_references(grant_ref_t head)
466 grant_ref_t ref;
467 unsigned long flags;
468 int count = 1;
469 if (head == GNTTAB_LIST_END)
470 return;
471 spin_lock_irqsave(&gnttab_list_lock, flags);
472 ref = head;
473 while (gnttab_entry(ref) != GNTTAB_LIST_END) {
474 ref = gnttab_entry(ref);
475 count++;
477 gnttab_entry(ref) = gnttab_free_head;
478 gnttab_free_head = head;
479 gnttab_free_count += count;
480 check_free_callbacks();
481 spin_unlock_irqrestore(&gnttab_list_lock, flags);
483 EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
485 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
487 int h = get_free_entries(count);
489 if (h < 0)
490 return -ENOSPC;
492 *head = h;
494 return 0;
496 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
498 int gnttab_empty_grant_references(const grant_ref_t *private_head)
500 return (*private_head == GNTTAB_LIST_END);
502 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
504 int gnttab_claim_grant_reference(grant_ref_t *private_head)
506 grant_ref_t g = *private_head;
507 if (unlikely(g == GNTTAB_LIST_END))
508 return -ENOSPC;
509 *private_head = gnttab_entry(g);
510 return g;
512 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
514 void gnttab_release_grant_reference(grant_ref_t *private_head,
515 grant_ref_t release)
517 gnttab_entry(release) = *private_head;
518 *private_head = release;
520 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
522 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
523 void (*fn)(void *), void *arg, u16 count)
525 unsigned long flags;
526 struct gnttab_free_callback *cb;
528 spin_lock_irqsave(&gnttab_list_lock, flags);
530 /* Check if the callback is already on the list */
531 cb = gnttab_free_callback_list;
532 while (cb) {
533 if (cb == callback)
534 goto out;
535 cb = cb->next;
538 callback->fn = fn;
539 callback->arg = arg;
540 callback->count = count;
541 callback->next = gnttab_free_callback_list;
542 gnttab_free_callback_list = callback;
543 check_free_callbacks();
544 out:
545 spin_unlock_irqrestore(&gnttab_list_lock, flags);
547 EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
549 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
551 struct gnttab_free_callback **pcb;
552 unsigned long flags;
554 spin_lock_irqsave(&gnttab_list_lock, flags);
555 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
556 if (*pcb == callback) {
557 *pcb = callback->next;
558 break;
561 spin_unlock_irqrestore(&gnttab_list_lock, flags);
563 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
565 static int grow_gnttab_list(unsigned int more_frames)
567 unsigned int new_nr_grant_frames, extra_entries, i;
568 unsigned int nr_glist_frames, new_nr_glist_frames;
570 BUG_ON(grefs_per_grant_frame == 0);
572 new_nr_grant_frames = nr_grant_frames + more_frames;
573 extra_entries = more_frames * grefs_per_grant_frame;
575 nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
576 new_nr_glist_frames =
577 (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
578 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
579 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
580 if (!gnttab_list[i])
581 goto grow_nomem;
585 for (i = grefs_per_grant_frame * nr_grant_frames;
586 i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
587 gnttab_entry(i) = i + 1;
589 gnttab_entry(i) = gnttab_free_head;
590 gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
591 gnttab_free_count += extra_entries;
593 nr_grant_frames = new_nr_grant_frames;
595 check_free_callbacks();
597 return 0;
599 grow_nomem:
600 while (i-- > nr_glist_frames)
601 free_page((unsigned long) gnttab_list[i]);
602 return -ENOMEM;
605 static unsigned int __max_nr_grant_frames(void)
607 struct gnttab_query_size query;
608 int rc;
610 query.dom = DOMID_SELF;
612 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
613 if ((rc < 0) || (query.status != GNTST_okay))
614 return 4; /* Legacy max supported number of frames */
616 return query.max_nr_frames;
619 unsigned int gnttab_max_grant_frames(void)
621 unsigned int xen_max = __max_nr_grant_frames();
622 static unsigned int boot_max_nr_grant_frames;
624 /* First time, initialize it properly. */
625 if (!boot_max_nr_grant_frames)
626 boot_max_nr_grant_frames = __max_nr_grant_frames();
628 if (xen_max > boot_max_nr_grant_frames)
629 return boot_max_nr_grant_frames;
630 return xen_max;
632 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
634 int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
636 xen_pfn_t *pfn;
637 unsigned int max_nr_gframes = __max_nr_grant_frames();
638 unsigned int i;
639 void *vaddr;
641 if (xen_auto_xlat_grant_frames.count)
642 return -EINVAL;
644 vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
645 if (vaddr == NULL) {
646 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
647 &addr);
648 return -ENOMEM;
650 pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
651 if (!pfn) {
652 xen_unmap(vaddr);
653 return -ENOMEM;
655 for (i = 0; i < max_nr_gframes; i++)
656 pfn[i] = XEN_PFN_DOWN(addr) + i;
658 xen_auto_xlat_grant_frames.vaddr = vaddr;
659 xen_auto_xlat_grant_frames.pfn = pfn;
660 xen_auto_xlat_grant_frames.count = max_nr_gframes;
662 return 0;
664 EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
666 void gnttab_free_auto_xlat_frames(void)
668 if (!xen_auto_xlat_grant_frames.count)
669 return;
670 kfree(xen_auto_xlat_grant_frames.pfn);
671 xen_unmap(xen_auto_xlat_grant_frames.vaddr);
673 xen_auto_xlat_grant_frames.pfn = NULL;
674 xen_auto_xlat_grant_frames.count = 0;
675 xen_auto_xlat_grant_frames.vaddr = NULL;
677 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
680 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
681 * @nr_pages: number of pages to alloc
682 * @pages: returns the pages
684 int gnttab_alloc_pages(int nr_pages, struct page **pages)
686 int i;
687 int ret;
689 ret = alloc_xenballooned_pages(nr_pages, pages);
690 if (ret < 0)
691 return ret;
693 for (i = 0; i < nr_pages; i++) {
694 #if BITS_PER_LONG < 64
695 struct xen_page_foreign *foreign;
697 foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
698 if (!foreign) {
699 gnttab_free_pages(nr_pages, pages);
700 return -ENOMEM;
702 set_page_private(pages[i], (unsigned long)foreign);
703 #endif
704 SetPagePrivate(pages[i]);
707 return 0;
709 EXPORT_SYMBOL(gnttab_alloc_pages);
712 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
713 * @nr_pages; number of pages to free
714 * @pages: the pages
716 void gnttab_free_pages(int nr_pages, struct page **pages)
718 int i;
720 for (i = 0; i < nr_pages; i++) {
721 if (PagePrivate(pages[i])) {
722 #if BITS_PER_LONG < 64
723 kfree((void *)page_private(pages[i]));
724 #endif
725 ClearPagePrivate(pages[i]);
728 free_xenballooned_pages(nr_pages, pages);
730 EXPORT_SYMBOL(gnttab_free_pages);
732 /* Handling of paged out grant targets (GNTST_eagain) */
733 #define MAX_DELAY 256
734 static inline void
735 gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
736 const char *func)
738 unsigned delay = 1;
740 do {
741 BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
742 if (*status == GNTST_eagain)
743 msleep(delay++);
744 } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
746 if (delay >= MAX_DELAY) {
747 pr_err("%s: %s eagain grant\n", func, current->comm);
748 *status = GNTST_bad_page;
752 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
754 struct gnttab_map_grant_ref *op;
756 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
757 BUG();
758 for (op = batch; op < batch + count; op++)
759 if (op->status == GNTST_eagain)
760 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
761 &op->status, __func__);
763 EXPORT_SYMBOL_GPL(gnttab_batch_map);
765 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
767 struct gnttab_copy *op;
769 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
770 BUG();
771 for (op = batch; op < batch + count; op++)
772 if (op->status == GNTST_eagain)
773 gnttab_retry_eagain_gop(GNTTABOP_copy, op,
774 &op->status, __func__);
776 EXPORT_SYMBOL_GPL(gnttab_batch_copy);
778 void gnttab_foreach_grant_in_range(struct page *page,
779 unsigned int offset,
780 unsigned int len,
781 xen_grant_fn_t fn,
782 void *data)
784 unsigned int goffset;
785 unsigned int glen;
786 unsigned long xen_pfn;
788 len = min_t(unsigned int, PAGE_SIZE - offset, len);
789 goffset = xen_offset_in_page(offset);
791 xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
793 while (len) {
794 glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
795 fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
797 goffset = 0;
798 xen_pfn++;
799 len -= glen;
802 EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
804 void gnttab_foreach_grant(struct page **pages,
805 unsigned int nr_grefs,
806 xen_grant_fn_t fn,
807 void *data)
809 unsigned int goffset = 0;
810 unsigned long xen_pfn = 0;
811 unsigned int i;
813 for (i = 0; i < nr_grefs; i++) {
814 if ((i % XEN_PFN_PER_PAGE) == 0) {
815 xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
816 goffset = 0;
819 fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
821 goffset += XEN_PAGE_SIZE;
822 xen_pfn++;
826 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
827 struct gnttab_map_grant_ref *kmap_ops,
828 struct page **pages, unsigned int count)
830 int i, ret;
832 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
833 if (ret)
834 return ret;
836 for (i = 0; i < count; i++) {
837 /* Retry eagain maps */
838 if (map_ops[i].status == GNTST_eagain)
839 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
840 &map_ops[i].status, __func__);
842 if (map_ops[i].status == GNTST_okay) {
843 struct xen_page_foreign *foreign;
845 SetPageForeign(pages[i]);
846 foreign = xen_page_foreign(pages[i]);
847 foreign->domid = map_ops[i].dom;
848 foreign->gref = map_ops[i].ref;
852 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
854 EXPORT_SYMBOL_GPL(gnttab_map_refs);
856 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
857 struct gnttab_unmap_grant_ref *kunmap_ops,
858 struct page **pages, unsigned int count)
860 unsigned int i;
861 int ret;
863 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
864 if (ret)
865 return ret;
867 for (i = 0; i < count; i++)
868 ClearPageForeign(pages[i]);
870 return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
872 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
874 #define GNTTAB_UNMAP_REFS_DELAY 5
876 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
878 static void gnttab_unmap_work(struct work_struct *work)
880 struct gntab_unmap_queue_data
881 *unmap_data = container_of(work,
882 struct gntab_unmap_queue_data,
883 gnttab_work.work);
884 if (unmap_data->age != UINT_MAX)
885 unmap_data->age++;
886 __gnttab_unmap_refs_async(unmap_data);
889 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
891 int ret;
892 int pc;
894 for (pc = 0; pc < item->count; pc++) {
895 if (page_count(item->pages[pc]) > 1) {
896 unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
897 schedule_delayed_work(&item->gnttab_work,
898 msecs_to_jiffies(delay));
899 return;
903 ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
904 item->pages, item->count);
905 item->done(ret, item);
908 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
910 INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
911 item->age = 0;
913 __gnttab_unmap_refs_async(item);
915 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
917 static void unmap_refs_callback(int result,
918 struct gntab_unmap_queue_data *data)
920 struct unmap_refs_callback_data *d = data->data;
922 d->result = result;
923 complete(&d->completion);
926 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
928 struct unmap_refs_callback_data data;
930 init_completion(&data.completion);
931 item->data = &data;
932 item->done = &unmap_refs_callback;
933 gnttab_unmap_refs_async(item);
934 wait_for_completion(&data.completion);
936 return data.result;
938 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
940 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
942 int rc;
944 rc = arch_gnttab_map_shared(frames, nr_gframes,
945 gnttab_max_grant_frames(),
946 &gnttab_shared.addr);
947 BUG_ON(rc);
949 return 0;
952 static void gnttab_unmap_frames_v1(void)
954 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
957 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
959 struct gnttab_setup_table setup;
960 xen_pfn_t *frames;
961 unsigned int nr_gframes = end_idx + 1;
962 int rc;
964 if (xen_feature(XENFEAT_auto_translated_physmap)) {
965 struct xen_add_to_physmap xatp;
966 unsigned int i = end_idx;
967 rc = 0;
968 BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
970 * Loop backwards, so that the first hypercall has the largest
971 * index, ensuring that the table will grow only once.
973 do {
974 xatp.domid = DOMID_SELF;
975 xatp.idx = i;
976 xatp.space = XENMAPSPACE_grant_table;
977 xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
978 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
979 if (rc != 0) {
980 pr_warn("grant table add_to_physmap failed, err=%d\n",
981 rc);
982 break;
984 } while (i-- > start_idx);
986 return rc;
989 /* No need for kzalloc as it is initialized in following hypercall
990 * GNTTABOP_setup_table.
992 frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
993 if (!frames)
994 return -ENOMEM;
996 setup.dom = DOMID_SELF;
997 setup.nr_frames = nr_gframes;
998 set_xen_guest_handle(setup.frame_list, frames);
1000 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1001 if (rc == -ENOSYS) {
1002 kfree(frames);
1003 return -ENOSYS;
1006 BUG_ON(rc || setup.status);
1008 rc = gnttab_interface->map_frames(frames, nr_gframes);
1010 kfree(frames);
1012 return rc;
1015 static const struct gnttab_ops gnttab_v1_ops = {
1016 .map_frames = gnttab_map_frames_v1,
1017 .unmap_frames = gnttab_unmap_frames_v1,
1018 .update_entry = gnttab_update_entry_v1,
1019 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
1020 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
1021 .query_foreign_access = gnttab_query_foreign_access_v1,
1024 static void gnttab_request_version(void)
1026 /* Only version 1 is used, which will always be available. */
1027 grant_table_version = 1;
1028 grefs_per_grant_frame = XEN_PAGE_SIZE / sizeof(struct grant_entry_v1);
1029 gnttab_interface = &gnttab_v1_ops;
1031 pr_info("Grant tables using version %d layout\n", grant_table_version);
1034 static int gnttab_setup(void)
1036 unsigned int max_nr_gframes;
1038 max_nr_gframes = gnttab_max_grant_frames();
1039 if (max_nr_gframes < nr_grant_frames)
1040 return -ENOSYS;
1042 if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1043 gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1044 if (gnttab_shared.addr == NULL) {
1045 pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
1046 (unsigned long)xen_auto_xlat_grant_frames.vaddr);
1047 return -ENOMEM;
1050 return gnttab_map(0, nr_grant_frames - 1);
1053 int gnttab_resume(void)
1055 gnttab_request_version();
1056 return gnttab_setup();
1059 int gnttab_suspend(void)
1061 if (!xen_feature(XENFEAT_auto_translated_physmap))
1062 gnttab_interface->unmap_frames();
1063 return 0;
1066 static int gnttab_expand(unsigned int req_entries)
1068 int rc;
1069 unsigned int cur, extra;
1071 BUG_ON(grefs_per_grant_frame == 0);
1072 cur = nr_grant_frames;
1073 extra = ((req_entries + (grefs_per_grant_frame-1)) /
1074 grefs_per_grant_frame);
1075 if (cur + extra > gnttab_max_grant_frames())
1076 return -ENOSPC;
1078 rc = gnttab_map(cur, cur + extra - 1);
1079 if (rc == 0)
1080 rc = grow_gnttab_list(extra);
1082 return rc;
1085 int gnttab_init(void)
1087 int i;
1088 unsigned long max_nr_grant_frames;
1089 unsigned int max_nr_glist_frames, nr_glist_frames;
1090 unsigned int nr_init_grefs;
1091 int ret;
1093 gnttab_request_version();
1094 max_nr_grant_frames = gnttab_max_grant_frames();
1095 nr_grant_frames = 1;
1097 /* Determine the maximum number of frames required for the
1098 * grant reference free list on the current hypervisor.
1100 BUG_ON(grefs_per_grant_frame == 0);
1101 max_nr_glist_frames = (max_nr_grant_frames *
1102 grefs_per_grant_frame / RPP);
1104 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
1105 GFP_KERNEL);
1106 if (gnttab_list == NULL)
1107 return -ENOMEM;
1109 nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
1110 for (i = 0; i < nr_glist_frames; i++) {
1111 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1112 if (gnttab_list[i] == NULL) {
1113 ret = -ENOMEM;
1114 goto ini_nomem;
1118 ret = arch_gnttab_init(max_nr_grant_frames);
1119 if (ret < 0)
1120 goto ini_nomem;
1122 if (gnttab_setup() < 0) {
1123 ret = -ENODEV;
1124 goto ini_nomem;
1127 nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
1129 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1130 gnttab_entry(i) = i + 1;
1132 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1133 gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1134 gnttab_free_head = NR_RESERVED_ENTRIES;
1136 printk("Grant table initialized\n");
1137 return 0;
1139 ini_nomem:
1140 for (i--; i >= 0; i--)
1141 free_page((unsigned long)gnttab_list[i]);
1142 kfree(gnttab_list);
1143 return ret;
1145 EXPORT_SYMBOL_GPL(gnttab_init);
1147 static int __gnttab_init(void)
1149 if (!xen_domain())
1150 return -ENODEV;
1152 /* Delay grant-table initialization in the PV on HVM case */
1153 if (xen_hvm_domain() && !xen_pvh_domain())
1154 return 0;
1156 return gnttab_init();
1158 /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1159 * beforehand to initialize xen_auto_xlat_grant_frames. */
1160 core_initcall_sync(__gnttab_init);