audit: complex interfield comparison helper
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / iommu / iommu.c
blob2198b2dbbcd3ad964b03a13dd6fd8dd336f27bed
1 /*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #define pr_fmt(fmt) "%s: " fmt, __func__
21 #include <linux/device.h>
22 #include <linux/kernel.h>
23 #include <linux/bug.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/iommu.h>
30 static ssize_t show_iommu_group(struct device *dev,
31 struct device_attribute *attr, char *buf)
33 unsigned int groupid;
35 if (iommu_device_group(dev, &groupid))
36 return 0;
38 return sprintf(buf, "%u", groupid);
40 static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
42 static int add_iommu_group(struct device *dev, void *data)
44 unsigned int groupid;
46 if (iommu_device_group(dev, &groupid) == 0)
47 return device_create_file(dev, &dev_attr_iommu_group);
49 return 0;
52 static int remove_iommu_group(struct device *dev)
54 unsigned int groupid;
56 if (iommu_device_group(dev, &groupid) == 0)
57 device_remove_file(dev, &dev_attr_iommu_group);
59 return 0;
62 static int iommu_device_notifier(struct notifier_block *nb,
63 unsigned long action, void *data)
65 struct device *dev = data;
67 if (action == BUS_NOTIFY_ADD_DEVICE)
68 return add_iommu_group(dev, NULL);
69 else if (action == BUS_NOTIFY_DEL_DEVICE)
70 return remove_iommu_group(dev);
72 return 0;
75 static struct notifier_block iommu_device_nb = {
76 .notifier_call = iommu_device_notifier,
79 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
81 bus_register_notifier(bus, &iommu_device_nb);
82 bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
85 /**
86 * bus_set_iommu - set iommu-callbacks for the bus
87 * @bus: bus.
88 * @ops: the callbacks provided by the iommu-driver
90 * This function is called by an iommu driver to set the iommu methods
91 * used for a particular bus. Drivers for devices on that bus can use
92 * the iommu-api after these ops are registered.
93 * This special function is needed because IOMMUs are usually devices on
94 * the bus itself, so the iommu drivers are not initialized when the bus
95 * is set up. With this function the iommu-driver can set the iommu-ops
96 * afterwards.
98 int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
100 if (bus->iommu_ops != NULL)
101 return -EBUSY;
103 bus->iommu_ops = ops;
105 /* Do IOMMU specific setup for this bus-type */
106 iommu_bus_init(bus, ops);
108 return 0;
110 EXPORT_SYMBOL_GPL(bus_set_iommu);
112 bool iommu_present(struct bus_type *bus)
114 return bus->iommu_ops != NULL;
116 EXPORT_SYMBOL_GPL(iommu_present);
119 * iommu_set_fault_handler() - set a fault handler for an iommu domain
120 * @domain: iommu domain
121 * @handler: fault handler
123 * This function should be used by IOMMU users which want to be notified
124 * whenever an IOMMU fault happens.
126 * The fault handler itself should return 0 on success, and an appropriate
127 * error code otherwise.
129 void iommu_set_fault_handler(struct iommu_domain *domain,
130 iommu_fault_handler_t handler)
132 BUG_ON(!domain);
134 domain->handler = handler;
136 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
138 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
140 struct iommu_domain *domain;
141 int ret;
143 if (bus == NULL || bus->iommu_ops == NULL)
144 return NULL;
146 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
147 if (!domain)
148 return NULL;
150 domain->ops = bus->iommu_ops;
152 ret = domain->ops->domain_init(domain);
153 if (ret)
154 goto out_free;
156 return domain;
158 out_free:
159 kfree(domain);
161 return NULL;
163 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
165 void iommu_domain_free(struct iommu_domain *domain)
167 if (likely(domain->ops->domain_destroy != NULL))
168 domain->ops->domain_destroy(domain);
170 kfree(domain);
172 EXPORT_SYMBOL_GPL(iommu_domain_free);
174 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
176 if (unlikely(domain->ops->attach_dev == NULL))
177 return -ENODEV;
179 return domain->ops->attach_dev(domain, dev);
181 EXPORT_SYMBOL_GPL(iommu_attach_device);
183 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
185 if (unlikely(domain->ops->detach_dev == NULL))
186 return;
188 domain->ops->detach_dev(domain, dev);
190 EXPORT_SYMBOL_GPL(iommu_detach_device);
192 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
193 unsigned long iova)
195 if (unlikely(domain->ops->iova_to_phys == NULL))
196 return 0;
198 return domain->ops->iova_to_phys(domain, iova);
200 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
202 int iommu_domain_has_cap(struct iommu_domain *domain,
203 unsigned long cap)
205 if (unlikely(domain->ops->domain_has_cap == NULL))
206 return 0;
208 return domain->ops->domain_has_cap(domain, cap);
210 EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
212 int iommu_map(struct iommu_domain *domain, unsigned long iova,
213 phys_addr_t paddr, size_t size, int prot)
215 unsigned long orig_iova = iova;
216 unsigned int min_pagesz;
217 size_t orig_size = size;
218 int ret = 0;
220 if (unlikely(domain->ops->map == NULL))
221 return -ENODEV;
223 /* find out the minimum page size supported */
224 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
227 * both the virtual address and the physical one, as well as
228 * the size of the mapping, must be aligned (at least) to the
229 * size of the smallest page supported by the hardware
231 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
232 pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
233 "0x%x\n", iova, (unsigned long)paddr,
234 (unsigned long)size, min_pagesz);
235 return -EINVAL;
238 pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
239 (unsigned long)paddr, (unsigned long)size);
241 while (size) {
242 unsigned long pgsize, addr_merge = iova | paddr;
243 unsigned int pgsize_idx;
245 /* Max page size that still fits into 'size' */
246 pgsize_idx = __fls(size);
248 /* need to consider alignment requirements ? */
249 if (likely(addr_merge)) {
250 /* Max page size allowed by both iova and paddr */
251 unsigned int align_pgsize_idx = __ffs(addr_merge);
253 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
256 /* build a mask of acceptable page sizes */
257 pgsize = (1UL << (pgsize_idx + 1)) - 1;
259 /* throw away page sizes not supported by the hardware */
260 pgsize &= domain->ops->pgsize_bitmap;
262 /* make sure we're still sane */
263 BUG_ON(!pgsize);
265 /* pick the biggest page */
266 pgsize_idx = __fls(pgsize);
267 pgsize = 1UL << pgsize_idx;
269 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
270 (unsigned long)paddr, pgsize);
272 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
273 if (ret)
274 break;
276 iova += pgsize;
277 paddr += pgsize;
278 size -= pgsize;
281 /* unroll mapping in case something went wrong */
282 if (ret)
283 iommu_unmap(domain, orig_iova, orig_size - size);
285 return ret;
287 EXPORT_SYMBOL_GPL(iommu_map);
289 size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
291 size_t unmapped_page, unmapped = 0;
292 unsigned int min_pagesz;
294 if (unlikely(domain->ops->unmap == NULL))
295 return -ENODEV;
297 /* find out the minimum page size supported */
298 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
301 * The virtual address, as well as the size of the mapping, must be
302 * aligned (at least) to the size of the smallest page supported
303 * by the hardware
305 if (!IS_ALIGNED(iova | size, min_pagesz)) {
306 pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
307 iova, (unsigned long)size, min_pagesz);
308 return -EINVAL;
311 pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
312 (unsigned long)size);
315 * Keep iterating until we either unmap 'size' bytes (or more)
316 * or we hit an area that isn't mapped.
318 while (unmapped < size) {
319 size_t left = size - unmapped;
321 unmapped_page = domain->ops->unmap(domain, iova, left);
322 if (!unmapped_page)
323 break;
325 pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
326 (unsigned long)unmapped_page);
328 iova += unmapped_page;
329 unmapped += unmapped_page;
332 return unmapped;
334 EXPORT_SYMBOL_GPL(iommu_unmap);
336 int iommu_device_group(struct device *dev, unsigned int *groupid)
338 if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
339 return dev->bus->iommu_ops->device_group(dev, groupid);
341 return -ENODEV;
343 EXPORT_SYMBOL_GPL(iommu_device_group);