2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This driver supports an interface for DCA clients and providers to meet.
26 #include <linux/kernel.h>
27 #include <linux/notifier.h>
28 #include <linux/device.h>
29 #include <linux/dca.h>
30 #include <linux/slab.h>
32 #define DCA_VERSION "1.12.1"
34 MODULE_VERSION(DCA_VERSION
);
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Intel Corporation");
38 static DEFINE_SPINLOCK(dca_lock
);
40 static LIST_HEAD(dca_domains
);
42 static BLOCKING_NOTIFIER_HEAD(dca_provider_chain
);
44 static int dca_providers_blocked
;
46 static struct pci_bus
*dca_pci_rc_from_dev(struct device
*dev
)
48 struct pci_dev
*pdev
= to_pci_dev(dev
);
49 struct pci_bus
*bus
= pdev
->bus
;
57 static struct dca_domain
*dca_allocate_domain(struct pci_bus
*rc
)
59 struct dca_domain
*domain
;
61 domain
= kzalloc(sizeof(*domain
), GFP_NOWAIT
);
65 INIT_LIST_HEAD(&domain
->dca_providers
);
71 static void dca_free_domain(struct dca_domain
*domain
)
73 list_del(&domain
->node
);
77 static int dca_provider_ioat_ver_3_0(struct device
*dev
)
79 struct pci_dev
*pdev
= to_pci_dev(dev
);
81 return ((pdev
->vendor
== PCI_VENDOR_ID_INTEL
) &&
82 ((pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
) ||
83 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG1
) ||
84 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG2
) ||
85 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG3
) ||
86 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG4
) ||
87 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG5
) ||
88 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG6
) ||
89 (pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_TBG7
)));
92 static void unregister_dca_providers(void)
94 struct dca_provider
*dca
, *_dca
;
95 struct list_head unregistered_providers
;
96 struct dca_domain
*domain
;
99 blocking_notifier_call_chain(&dca_provider_chain
,
100 DCA_PROVIDER_REMOVE
, NULL
);
102 INIT_LIST_HEAD(&unregistered_providers
);
104 spin_lock_irqsave(&dca_lock
, flags
);
106 if (list_empty(&dca_domains
)) {
107 spin_unlock_irqrestore(&dca_lock
, flags
);
111 /* at this point only one domain in the list is expected */
112 domain
= list_first_entry(&dca_domains
, struct dca_domain
, node
);
116 list_for_each_entry_safe(dca
, _dca
, &domain
->dca_providers
, node
) {
117 list_del(&dca
->node
);
118 list_add(&dca
->node
, &unregistered_providers
);
121 dca_free_domain(domain
);
123 spin_unlock_irqrestore(&dca_lock
, flags
);
125 list_for_each_entry_safe(dca
, _dca
, &unregistered_providers
, node
) {
126 dca_sysfs_remove_provider(dca
);
127 list_del(&dca
->node
);
131 static struct dca_domain
*dca_find_domain(struct pci_bus
*rc
)
133 struct dca_domain
*domain
;
135 list_for_each_entry(domain
, &dca_domains
, node
)
136 if (domain
->pci_rc
== rc
)
142 static struct dca_domain
*dca_get_domain(struct device
*dev
)
145 struct dca_domain
*domain
;
147 rc
= dca_pci_rc_from_dev(dev
);
148 domain
= dca_find_domain(rc
);
151 if (dca_provider_ioat_ver_3_0(dev
) && !list_empty(&dca_domains
)) {
152 dca_providers_blocked
= 1;
154 domain
= dca_allocate_domain(rc
);
156 list_add(&domain
->node
, &dca_domains
);
163 static struct dca_provider
*dca_find_provider_by_dev(struct device
*dev
)
165 struct dca_provider
*dca
;
167 struct dca_domain
*domain
;
170 rc
= dca_pci_rc_from_dev(dev
);
171 domain
= dca_find_domain(rc
);
175 if (!list_empty(&dca_domains
))
176 domain
= list_first_entry(&dca_domains
,
183 list_for_each_entry(dca
, &domain
->dca_providers
, node
)
184 if ((!dev
) || (dca
->ops
->dev_managed(dca
, dev
)))
191 * dca_add_requester - add a dca client to the list
192 * @dev - the device that wants dca service
194 int dca_add_requester(struct device
*dev
)
196 struct dca_provider
*dca
;
197 int err
, slot
= -ENODEV
;
199 struct pci_bus
*pci_rc
;
200 struct dca_domain
*domain
;
205 spin_lock_irqsave(&dca_lock
, flags
);
207 /* check if the requester has not been added already */
208 dca
= dca_find_provider_by_dev(dev
);
210 spin_unlock_irqrestore(&dca_lock
, flags
);
214 pci_rc
= dca_pci_rc_from_dev(dev
);
215 domain
= dca_find_domain(pci_rc
);
217 spin_unlock_irqrestore(&dca_lock
, flags
);
221 list_for_each_entry(dca
, &domain
->dca_providers
, node
) {
222 slot
= dca
->ops
->add_requester(dca
, dev
);
227 spin_unlock_irqrestore(&dca_lock
, flags
);
232 err
= dca_sysfs_add_req(dca
, dev
, slot
);
234 spin_lock_irqsave(&dca_lock
, flags
);
235 if (dca
== dca_find_provider_by_dev(dev
))
236 dca
->ops
->remove_requester(dca
, dev
);
237 spin_unlock_irqrestore(&dca_lock
, flags
);
243 EXPORT_SYMBOL_GPL(dca_add_requester
);
246 * dca_remove_requester - remove a dca client from the list
247 * @dev - the device that wants dca service
249 int dca_remove_requester(struct device
*dev
)
251 struct dca_provider
*dca
;
258 spin_lock_irqsave(&dca_lock
, flags
);
259 dca
= dca_find_provider_by_dev(dev
);
261 spin_unlock_irqrestore(&dca_lock
, flags
);
264 slot
= dca
->ops
->remove_requester(dca
, dev
);
265 spin_unlock_irqrestore(&dca_lock
, flags
);
270 dca_sysfs_remove_req(dca
, slot
);
274 EXPORT_SYMBOL_GPL(dca_remove_requester
);
277 * dca_common_get_tag - return the dca tag (serves both new and old api)
278 * @dev - the device that wants dca service
279 * @cpu - the cpuid as returned by get_cpu()
281 u8
dca_common_get_tag(struct device
*dev
, int cpu
)
283 struct dca_provider
*dca
;
287 spin_lock_irqsave(&dca_lock
, flags
);
289 dca
= dca_find_provider_by_dev(dev
);
291 spin_unlock_irqrestore(&dca_lock
, flags
);
294 tag
= dca
->ops
->get_tag(dca
, dev
, cpu
);
296 spin_unlock_irqrestore(&dca_lock
, flags
);
301 * dca3_get_tag - return the dca tag to the requester device
302 * for the given cpu (new api)
303 * @dev - the device that wants dca service
304 * @cpu - the cpuid as returned by get_cpu()
306 u8
dca3_get_tag(struct device
*dev
, int cpu
)
311 return dca_common_get_tag(dev
, cpu
);
313 EXPORT_SYMBOL_GPL(dca3_get_tag
);
316 * dca_get_tag - return the dca tag for the given cpu (old api)
317 * @cpu - the cpuid as returned by get_cpu()
319 u8
dca_get_tag(int cpu
)
321 struct device
*dev
= NULL
;
323 return dca_common_get_tag(dev
, cpu
);
325 EXPORT_SYMBOL_GPL(dca_get_tag
);
328 * alloc_dca_provider - get data struct for describing a dca provider
329 * @ops - pointer to struct of dca operation function pointers
330 * @priv_size - size of extra mem to be added for provider's needs
332 struct dca_provider
*alloc_dca_provider(struct dca_ops
*ops
, int priv_size
)
334 struct dca_provider
*dca
;
337 alloc_size
= (sizeof(*dca
) + priv_size
);
338 dca
= kzalloc(alloc_size
, GFP_KERNEL
);
345 EXPORT_SYMBOL_GPL(alloc_dca_provider
);
348 * free_dca_provider - release the dca provider data struct
349 * @ops - pointer to struct of dca operation function pointers
350 * @priv_size - size of extra mem to be added for provider's needs
352 void free_dca_provider(struct dca_provider
*dca
)
356 EXPORT_SYMBOL_GPL(free_dca_provider
);
359 * register_dca_provider - register a dca provider
360 * @dca - struct created by alloc_dca_provider()
361 * @dev - device providing dca services
363 int register_dca_provider(struct dca_provider
*dca
, struct device
*dev
)
367 struct dca_domain
*domain
;
369 spin_lock_irqsave(&dca_lock
, flags
);
370 if (dca_providers_blocked
) {
371 spin_unlock_irqrestore(&dca_lock
, flags
);
374 spin_unlock_irqrestore(&dca_lock
, flags
);
376 err
= dca_sysfs_add_provider(dca
, dev
);
380 spin_lock_irqsave(&dca_lock
, flags
);
381 domain
= dca_get_domain(dev
);
383 if (dca_providers_blocked
) {
384 spin_unlock_irqrestore(&dca_lock
, flags
);
385 dca_sysfs_remove_provider(dca
);
386 unregister_dca_providers();
388 spin_unlock_irqrestore(&dca_lock
, flags
);
392 list_add(&dca
->node
, &domain
->dca_providers
);
393 spin_unlock_irqrestore(&dca_lock
, flags
);
395 blocking_notifier_call_chain(&dca_provider_chain
,
396 DCA_PROVIDER_ADD
, NULL
);
399 EXPORT_SYMBOL_GPL(register_dca_provider
);
402 * unregister_dca_provider - remove a dca provider
403 * @dca - struct created by alloc_dca_provider()
405 void unregister_dca_provider(struct dca_provider
*dca
, struct device
*dev
)
408 struct pci_bus
*pci_rc
;
409 struct dca_domain
*domain
;
411 blocking_notifier_call_chain(&dca_provider_chain
,
412 DCA_PROVIDER_REMOVE
, NULL
);
414 spin_lock_irqsave(&dca_lock
, flags
);
416 list_del(&dca
->node
);
418 pci_rc
= dca_pci_rc_from_dev(dev
);
419 domain
= dca_find_domain(pci_rc
);
420 if (list_empty(&domain
->dca_providers
))
421 dca_free_domain(domain
);
423 spin_unlock_irqrestore(&dca_lock
, flags
);
425 dca_sysfs_remove_provider(dca
);
427 EXPORT_SYMBOL_GPL(unregister_dca_provider
);
430 * dca_register_notify - register a client's notifier callback
432 void dca_register_notify(struct notifier_block
*nb
)
434 blocking_notifier_chain_register(&dca_provider_chain
, nb
);
436 EXPORT_SYMBOL_GPL(dca_register_notify
);
439 * dca_unregister_notify - remove a client's notifier callback
441 void dca_unregister_notify(struct notifier_block
*nb
)
443 blocking_notifier_chain_unregister(&dca_provider_chain
, nb
);
445 EXPORT_SYMBOL_GPL(dca_unregister_notify
);
447 static int __init
dca_init(void)
449 pr_info("dca service started, version %s\n", DCA_VERSION
);
450 return dca_sysfs_init();
453 static void __exit
dca_exit(void)
458 arch_initcall(dca_init
);
459 module_exit(dca_exit
);