1 /******************************************************************************
4 * Interface to privileged domain-0 commands.
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
15 #include <linux/mman.h>
16 #include <linux/uaccess.h>
17 #include <linux/swap.h>
18 #include <linux/highmem.h>
19 #include <linux/pagemap.h>
20 #include <linux/seq_file.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pgtable.h>
25 #include <asm/xen/hypervisor.h>
26 #include <asm/xen/hypercall.h>
29 #include <xen/privcmd.h>
30 #include <xen/interface/xen.h>
31 #include <xen/features.h>
33 #include <xen/xen-ops.h>
35 #ifndef HAVE_ARCH_PRIVCMD_MMAP
36 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct
*vma
);
39 static long privcmd_ioctl_hypercall(void __user
*udata
)
41 struct privcmd_hypercall hypercall
;
44 if (copy_from_user(&hypercall
, udata
, sizeof(hypercall
)))
47 ret
= privcmd_call(hypercall
.op
,
48 hypercall
.arg
[0], hypercall
.arg
[1],
49 hypercall
.arg
[2], hypercall
.arg
[3],
55 static void free_page_list(struct list_head
*pages
)
59 list_for_each_entry_safe(p
, n
, pages
, lru
)
62 INIT_LIST_HEAD(pages
);
66 * Given an array of items in userspace, return a list of pages
67 * containing the data. If copying fails, either because of memory
68 * allocation failure or a problem reading user memory, return an
69 * error code; its up to the caller to dispose of any partial list.
71 static int gather_array(struct list_head
*pagelist
,
72 unsigned nelem
, size_t size
,
83 pagedata
= NULL
; /* quiet, gcc */
85 if (pageidx
> PAGE_SIZE
-size
) {
86 struct page
*page
= alloc_page(GFP_KERNEL
);
92 pagedata
= page_address(page
);
94 list_add_tail(&page
->lru
, pagelist
);
99 if (copy_from_user(pagedata
+ pageidx
, data
, size
))
113 * Call function "fn" on each element of the array fragmented
114 * over a list of pages.
116 static int traverse_pages(unsigned nelem
, size_t size
,
117 struct list_head
*pos
,
118 int (*fn
)(void *data
, void *state
),
125 BUG_ON(size
> PAGE_SIZE
);
128 pagedata
= NULL
; /* hush, gcc */
131 if (pageidx
> PAGE_SIZE
-size
) {
134 page
= list_entry(pos
, struct page
, lru
);
135 pagedata
= page_address(page
);
139 ret
= (*fn
)(pagedata
+ pageidx
, state
);
148 struct mmap_mfn_state
{
150 struct vm_area_struct
*vma
;
154 static int mmap_mfn_range(void *data
, void *state
)
156 struct privcmd_mmap_entry
*msg
= data
;
157 struct mmap_mfn_state
*st
= state
;
158 struct vm_area_struct
*vma
= st
->vma
;
161 /* Do not allow range to wrap the address space. */
162 if ((msg
->npages
> (LONG_MAX
>> PAGE_SHIFT
)) ||
163 ((unsigned long)(msg
->npages
<< PAGE_SHIFT
) >= -st
->va
))
166 /* Range chunks must be contiguous in va space. */
167 if ((msg
->va
!= st
->va
) ||
168 ((msg
->va
+(msg
->npages
<<PAGE_SHIFT
)) > vma
->vm_end
))
171 rc
= xen_remap_domain_mfn_range(vma
,
173 msg
->mfn
, msg
->npages
,
179 st
->va
+= msg
->npages
<< PAGE_SHIFT
;
184 static long privcmd_ioctl_mmap(void __user
*udata
)
186 struct privcmd_mmap mmapcmd
;
187 struct mm_struct
*mm
= current
->mm
;
188 struct vm_area_struct
*vma
;
191 struct mmap_mfn_state state
;
193 if (!xen_initial_domain())
196 if (copy_from_user(&mmapcmd
, udata
, sizeof(mmapcmd
)))
199 rc
= gather_array(&pagelist
,
200 mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
203 if (rc
|| list_empty(&pagelist
))
206 down_write(&mm
->mmap_sem
);
209 struct page
*page
= list_first_entry(&pagelist
,
211 struct privcmd_mmap_entry
*msg
= page_address(page
);
213 vma
= find_vma(mm
, msg
->va
);
216 if (!vma
|| (msg
->va
!= vma
->vm_start
) ||
217 !privcmd_enforce_singleshot_mapping(vma
))
221 state
.va
= vma
->vm_start
;
223 state
.domain
= mmapcmd
.dom
;
225 rc
= traverse_pages(mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
227 mmap_mfn_range
, &state
);
231 up_write(&mm
->mmap_sem
);
234 free_page_list(&pagelist
);
239 struct mmap_batch_state
{
242 struct vm_area_struct
*vma
;
245 xen_pfn_t __user
*user
;
248 static int mmap_batch_fn(void *data
, void *state
)
250 xen_pfn_t
*mfnp
= data
;
251 struct mmap_batch_state
*st
= state
;
253 if (xen_remap_domain_mfn_range(st
->vma
, st
->va
& PAGE_MASK
, *mfnp
, 1,
254 st
->vma
->vm_page_prot
, st
->domain
) < 0) {
255 *mfnp
|= 0xf0000000U
;
263 static int mmap_return_errors(void *data
, void *state
)
265 xen_pfn_t
*mfnp
= data
;
266 struct mmap_batch_state
*st
= state
;
268 put_user(*mfnp
, st
->user
++);
273 static struct vm_operations_struct privcmd_vm_ops
;
275 static long privcmd_ioctl_mmap_batch(void __user
*udata
)
278 struct privcmd_mmapbatch m
;
279 struct mm_struct
*mm
= current
->mm
;
280 struct vm_area_struct
*vma
;
281 unsigned long nr_pages
;
283 struct mmap_batch_state state
;
285 if (!xen_initial_domain())
288 if (copy_from_user(&m
, udata
, sizeof(m
)))
292 if ((m
.num
<= 0) || (nr_pages
> (LONG_MAX
>> PAGE_SHIFT
)))
295 ret
= gather_array(&pagelist
, m
.num
, sizeof(xen_pfn_t
),
298 if (ret
|| list_empty(&pagelist
))
301 down_write(&mm
->mmap_sem
);
303 vma
= find_vma(mm
, m
.addr
);
306 vma
->vm_ops
!= &privcmd_vm_ops
||
307 (m
.addr
!= vma
->vm_start
) ||
308 ((m
.addr
+ (nr_pages
<< PAGE_SHIFT
)) != vma
->vm_end
) ||
309 !privcmd_enforce_singleshot_mapping(vma
)) {
310 up_write(&mm
->mmap_sem
);
314 state
.domain
= m
.dom
;
319 ret
= traverse_pages(m
.num
, sizeof(xen_pfn_t
),
320 &pagelist
, mmap_batch_fn
, &state
);
322 up_write(&mm
->mmap_sem
);
328 traverse_pages(m
.num
, sizeof(xen_pfn_t
),
330 mmap_return_errors
, &state
);
334 free_page_list(&pagelist
);
339 static long privcmd_ioctl(struct file
*file
,
340 unsigned int cmd
, unsigned long data
)
343 void __user
*udata
= (void __user
*) data
;
346 case IOCTL_PRIVCMD_HYPERCALL
:
347 ret
= privcmd_ioctl_hypercall(udata
);
350 case IOCTL_PRIVCMD_MMAP
:
351 ret
= privcmd_ioctl_mmap(udata
);
354 case IOCTL_PRIVCMD_MMAPBATCH
:
355 ret
= privcmd_ioctl_mmap_batch(udata
);
366 #ifndef HAVE_ARCH_PRIVCMD_MMAP
367 static int privcmd_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
369 printk(KERN_DEBUG
"privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
370 vma
, vma
->vm_start
, vma
->vm_end
,
371 vmf
->pgoff
, vmf
->virtual_address
);
373 return VM_FAULT_SIGBUS
;
376 static struct vm_operations_struct privcmd_vm_ops
= {
377 .fault
= privcmd_fault
380 static int privcmd_mmap(struct file
*file
, struct vm_area_struct
*vma
)
382 /* Unsupported for auto-translate guests. */
383 if (xen_feature(XENFEAT_auto_translated_physmap
))
386 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
387 vma
->vm_flags
|= VM_RESERVED
| VM_IO
| VM_DONTCOPY
;
388 vma
->vm_ops
= &privcmd_vm_ops
;
389 vma
->vm_private_data
= NULL
;
394 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct
*vma
)
396 return (xchg(&vma
->vm_private_data
, (void *)1) == NULL
);
400 const struct file_operations privcmd_file_ops
= {
401 .unlocked_ioctl
= privcmd_ioctl
,
402 .mmap
= privcmd_mmap
,