Merge branch 'drm-next-3.9' of git://people.freedesktop.org/~agd5f/linux into drm...
[linux-2.6/libata-dev.git] / virt / kvm / iommu.c
blob4a340cb230134bd7fb15b62545ff8b5245ecbfa2
1 /*
2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008
19 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
21 * Author: Allen M. Kay <allen.m.kay@intel.com>
22 * Author: Weidong Han <weidong.han@intel.com>
23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
26 #include <linux/list.h>
27 #include <linux/kvm_host.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/stat.h>
31 #include <linux/dmar.h>
32 #include <linux/iommu.h>
33 #include <linux/intel-iommu.h>
35 static bool allow_unsafe_assigned_interrupts;
36 module_param_named(allow_unsafe_assigned_interrupts,
37 allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
39 "Enable device assignment on platforms without interrupt remapping support.");
41 static int kvm_iommu_unmap_memslots(struct kvm *kvm);
42 static void kvm_iommu_put_pages(struct kvm *kvm,
43 gfn_t base_gfn, unsigned long npages);
45 static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
46 unsigned long size)
48 gfn_t end_gfn;
49 pfn_t pfn;
51 pfn = gfn_to_pfn_memslot(slot, gfn);
52 end_gfn = gfn + (size >> PAGE_SHIFT);
53 gfn += 1;
55 if (is_error_noslot_pfn(pfn))
56 return pfn;
58 while (gfn < end_gfn)
59 gfn_to_pfn_memslot(slot, gfn++);
61 return pfn;
64 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
66 gfn_t gfn, end_gfn;
67 pfn_t pfn;
68 int r = 0;
69 struct iommu_domain *domain = kvm->arch.iommu_domain;
70 int flags;
72 /* check if iommu exists and in use */
73 if (!domain)
74 return 0;
76 gfn = slot->base_gfn;
77 end_gfn = gfn + slot->npages;
79 flags = IOMMU_READ | IOMMU_WRITE;
80 if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
81 flags |= IOMMU_CACHE;
84 while (gfn < end_gfn) {
85 unsigned long page_size;
87 /* Check if already mapped */
88 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
89 gfn += 1;
90 continue;
93 /* Get the page size we could use to map */
94 page_size = kvm_host_page_size(kvm, gfn);
96 /* Make sure the page_size does not exceed the memslot */
97 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
98 page_size >>= 1;
100 /* Make sure gfn is aligned to the page size we want to map */
101 while ((gfn << PAGE_SHIFT) & (page_size - 1))
102 page_size >>= 1;
105 * Pin all pages we are about to map in memory. This is
106 * important because we unmap and unpin in 4kb steps later.
108 pfn = kvm_pin_pages(slot, gfn, page_size);
109 if (is_error_noslot_pfn(pfn)) {
110 gfn += 1;
111 continue;
114 /* Map into IO address space */
115 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
116 page_size, flags);
117 if (r) {
118 printk(KERN_ERR "kvm_iommu_map_address:"
119 "iommu failed to map pfn=%llx\n", pfn);
120 goto unmap_pages;
123 gfn += page_size >> PAGE_SHIFT;
128 return 0;
130 unmap_pages:
131 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
132 return r;
135 static int kvm_iommu_map_memslots(struct kvm *kvm)
137 int idx, r = 0;
138 struct kvm_memslots *slots;
139 struct kvm_memory_slot *memslot;
141 idx = srcu_read_lock(&kvm->srcu);
142 slots = kvm_memslots(kvm);
144 kvm_for_each_memslot(memslot, slots) {
145 r = kvm_iommu_map_pages(kvm, memslot);
146 if (r)
147 break;
149 srcu_read_unlock(&kvm->srcu, idx);
151 return r;
154 int kvm_assign_device(struct kvm *kvm,
155 struct kvm_assigned_dev_kernel *assigned_dev)
157 struct pci_dev *pdev = NULL;
158 struct iommu_domain *domain = kvm->arch.iommu_domain;
159 int r, last_flags;
161 /* check if iommu exists and in use */
162 if (!domain)
163 return 0;
165 pdev = assigned_dev->dev;
166 if (pdev == NULL)
167 return -ENODEV;
169 r = iommu_attach_device(domain, &pdev->dev);
170 if (r) {
171 dev_err(&pdev->dev, "kvm assign device failed ret %d", r);
172 return r;
175 last_flags = kvm->arch.iommu_flags;
176 if (iommu_domain_has_cap(kvm->arch.iommu_domain,
177 IOMMU_CAP_CACHE_COHERENCY))
178 kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY;
180 /* Check if need to update IOMMU page table for guest memory */
181 if ((last_flags ^ kvm->arch.iommu_flags) ==
182 KVM_IOMMU_CACHE_COHERENCY) {
183 kvm_iommu_unmap_memslots(kvm);
184 r = kvm_iommu_map_memslots(kvm);
185 if (r)
186 goto out_unmap;
189 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
191 printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
192 assigned_dev->host_segnr,
193 assigned_dev->host_busnr,
194 PCI_SLOT(assigned_dev->host_devfn),
195 PCI_FUNC(assigned_dev->host_devfn));
197 return 0;
198 out_unmap:
199 kvm_iommu_unmap_memslots(kvm);
200 return r;
203 int kvm_deassign_device(struct kvm *kvm,
204 struct kvm_assigned_dev_kernel *assigned_dev)
206 struct iommu_domain *domain = kvm->arch.iommu_domain;
207 struct pci_dev *pdev = NULL;
209 /* check if iommu exists and in use */
210 if (!domain)
211 return 0;
213 pdev = assigned_dev->dev;
214 if (pdev == NULL)
215 return -ENODEV;
217 iommu_detach_device(domain, &pdev->dev);
219 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
221 printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
222 assigned_dev->host_segnr,
223 assigned_dev->host_busnr,
224 PCI_SLOT(assigned_dev->host_devfn),
225 PCI_FUNC(assigned_dev->host_devfn));
227 return 0;
230 int kvm_iommu_map_guest(struct kvm *kvm)
232 int r;
234 if (!iommu_present(&pci_bus_type)) {
235 printk(KERN_ERR "%s: iommu not found\n", __func__);
236 return -ENODEV;
239 mutex_lock(&kvm->slots_lock);
241 kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
242 if (!kvm->arch.iommu_domain) {
243 r = -ENOMEM;
244 goto out_unlock;
247 if (!allow_unsafe_assigned_interrupts &&
248 !iommu_domain_has_cap(kvm->arch.iommu_domain,
249 IOMMU_CAP_INTR_REMAP)) {
250 printk(KERN_WARNING "%s: No interrupt remapping support,"
251 " disallowing device assignment."
252 " Re-enble with \"allow_unsafe_assigned_interrupts=1\""
253 " module option.\n", __func__);
254 iommu_domain_free(kvm->arch.iommu_domain);
255 kvm->arch.iommu_domain = NULL;
256 r = -EPERM;
257 goto out_unlock;
260 r = kvm_iommu_map_memslots(kvm);
261 if (r)
262 kvm_iommu_unmap_memslots(kvm);
264 out_unlock:
265 mutex_unlock(&kvm->slots_lock);
266 return r;
269 static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
271 unsigned long i;
273 for (i = 0; i < npages; ++i)
274 kvm_release_pfn_clean(pfn + i);
277 static void kvm_iommu_put_pages(struct kvm *kvm,
278 gfn_t base_gfn, unsigned long npages)
280 struct iommu_domain *domain;
281 gfn_t end_gfn, gfn;
282 pfn_t pfn;
283 u64 phys;
285 domain = kvm->arch.iommu_domain;
286 end_gfn = base_gfn + npages;
287 gfn = base_gfn;
289 /* check if iommu exists and in use */
290 if (!domain)
291 return;
293 while (gfn < end_gfn) {
294 unsigned long unmap_pages;
295 size_t size;
297 /* Get physical address */
298 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
300 if (!phys) {
301 gfn++;
302 continue;
305 pfn = phys >> PAGE_SHIFT;
307 /* Unmap address from IO address space */
308 size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
309 unmap_pages = 1ULL << get_order(size);
311 /* Unpin all pages we just unmapped to not leak any memory */
312 kvm_unpin_pages(kvm, pfn, unmap_pages);
314 gfn += unmap_pages;
318 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
320 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
323 static int kvm_iommu_unmap_memslots(struct kvm *kvm)
325 int idx;
326 struct kvm_memslots *slots;
327 struct kvm_memory_slot *memslot;
329 idx = srcu_read_lock(&kvm->srcu);
330 slots = kvm_memslots(kvm);
332 kvm_for_each_memslot(memslot, slots)
333 kvm_iommu_unmap_pages(kvm, memslot);
335 srcu_read_unlock(&kvm->srcu, idx);
337 return 0;
340 int kvm_iommu_unmap_guest(struct kvm *kvm)
342 struct iommu_domain *domain = kvm->arch.iommu_domain;
344 /* check if iommu exists and in use */
345 if (!domain)
346 return 0;
348 mutex_lock(&kvm->slots_lock);
349 kvm_iommu_unmap_memslots(kvm);
350 kvm->arch.iommu_domain = NULL;
351 mutex_unlock(&kvm->slots_lock);
353 iommu_domain_free(domain);
354 return 0;