tile: nohz: warn if nohz_full uses hypervisor shared cores
[linux-2.6/btrfs-unstable.git] / drivers / misc / mei / pci-me.c
blobbd3039ab8f98e67e86de56ef8429dfd164c89d86
1 /*
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/fs.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/fcntl.h>
24 #include <linux/aio.h>
25 #include <linux/pci.h>
26 #include <linux/poll.h>
27 #include <linux/ioctl.h>
28 #include <linux/cdev.h>
29 #include <linux/sched.h>
30 #include <linux/uuid.h>
31 #include <linux/compat.h>
32 #include <linux/jiffies.h>
33 #include <linux/interrupt.h>
35 #include <linux/pm_runtime.h>
37 #include <linux/mei.h>
39 #include "mei_dev.h"
40 #include "client.h"
41 #include "hw-me-regs.h"
42 #include "hw-me.h"
44 /* mei_pci_tbl - PCI Device ID Table */
45 static const struct pci_device_id mei_me_pci_tbl[] = {
46 {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)},
47 {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)},
48 {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)},
49 {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)},
50 {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)},
51 {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)},
52 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)},
53 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)},
54 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)},
55 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)},
56 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)},
58 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)},
59 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)},
60 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)},
61 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)},
62 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)},
63 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)},
64 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)},
65 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)},
66 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)},
67 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)},
68 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)},
69 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)},
70 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)},
72 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)},
73 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)},
74 {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)},
75 {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)},
76 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)},
77 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)},
78 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)},
79 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_pch8_sps_cfg)},
80 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_pch8_sps_cfg)},
81 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch8_cfg)},
82 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_pch8_sps_cfg)},
83 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)},
84 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)},
86 /* required last entry */
87 {0, }
90 MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
92 #ifdef CONFIG_PM
93 static inline void mei_me_set_pm_domain(struct mei_device *dev);
94 static inline void mei_me_unset_pm_domain(struct mei_device *dev);
95 #else
96 static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
97 static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
98 #endif /* CONFIG_PM */
101 * mei_me_quirk_probe - probe for devices that doesn't valid ME interface
103 * @pdev: PCI device structure
104 * @cfg: per generation config
106 * Return: true if ME Interface is valid, false otherwise
108 static bool mei_me_quirk_probe(struct pci_dev *pdev,
109 const struct mei_cfg *cfg)
111 if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
112 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
113 return false;
116 return true;
120 * mei_me_probe - Device Initialization Routine
122 * @pdev: PCI device structure
123 * @ent: entry in kcs_pci_tbl
125 * Return: 0 on success, <0 on failure.
127 static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
129 const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
130 struct mei_device *dev;
131 struct mei_me_hw *hw;
132 int err;
135 if (!mei_me_quirk_probe(pdev, cfg))
136 return -ENODEV;
138 /* enable pci dev */
139 err = pci_enable_device(pdev);
140 if (err) {
141 dev_err(&pdev->dev, "failed to enable pci device.\n");
142 goto end;
144 /* set PCI host mastering */
145 pci_set_master(pdev);
146 /* pci request regions for mei driver */
147 err = pci_request_regions(pdev, KBUILD_MODNAME);
148 if (err) {
149 dev_err(&pdev->dev, "failed to get pci regions.\n");
150 goto disable_device;
153 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
154 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
156 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
157 if (err)
158 err = dma_set_coherent_mask(&pdev->dev,
159 DMA_BIT_MASK(32));
161 if (err) {
162 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
163 goto release_regions;
167 /* allocates and initializes the mei dev structure */
168 dev = mei_me_dev_init(pdev, cfg);
169 if (!dev) {
170 err = -ENOMEM;
171 goto release_regions;
173 hw = to_me_hw(dev);
174 /* mapping IO device memory */
175 hw->mem_addr = pci_iomap(pdev, 0, 0);
176 if (!hw->mem_addr) {
177 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
178 err = -ENOMEM;
179 goto free_device;
181 pci_enable_msi(pdev);
183 /* request and enable interrupt */
184 if (pci_dev_msi_enabled(pdev))
185 err = request_threaded_irq(pdev->irq,
186 NULL,
187 mei_me_irq_thread_handler,
188 IRQF_ONESHOT, KBUILD_MODNAME, dev);
189 else
190 err = request_threaded_irq(pdev->irq,
191 mei_me_irq_quick_handler,
192 mei_me_irq_thread_handler,
193 IRQF_SHARED, KBUILD_MODNAME, dev);
195 if (err) {
196 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
197 pdev->irq);
198 goto disable_msi;
201 if (mei_start(dev)) {
202 dev_err(&pdev->dev, "init hw failure.\n");
203 err = -ENODEV;
204 goto release_irq;
207 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
208 pm_runtime_use_autosuspend(&pdev->dev);
210 err = mei_register(dev, &pdev->dev);
211 if (err)
212 goto release_irq;
214 pci_set_drvdata(pdev, dev);
216 schedule_delayed_work(&dev->timer_work, HZ);
219 * For not wake-able HW runtime pm framework
220 * can't be used on pci device level.
221 * Use domain runtime pm callbacks instead.
223 if (!pci_dev_run_wake(pdev))
224 mei_me_set_pm_domain(dev);
226 if (mei_pg_is_enabled(dev))
227 pm_runtime_put_noidle(&pdev->dev);
229 dev_dbg(&pdev->dev, "initialization successful.\n");
231 return 0;
233 release_irq:
234 mei_cancel_work(dev);
235 mei_disable_interrupts(dev);
236 free_irq(pdev->irq, dev);
237 disable_msi:
238 pci_disable_msi(pdev);
239 pci_iounmap(pdev, hw->mem_addr);
240 free_device:
241 kfree(dev);
242 release_regions:
243 pci_release_regions(pdev);
244 disable_device:
245 pci_disable_device(pdev);
246 end:
247 dev_err(&pdev->dev, "initialization failed.\n");
248 return err;
252 * mei_me_remove - Device Removal Routine
254 * @pdev: PCI device structure
256 * mei_remove is called by the PCI subsystem to alert the driver
257 * that it should release a PCI device.
259 static void mei_me_remove(struct pci_dev *pdev)
261 struct mei_device *dev;
262 struct mei_me_hw *hw;
264 dev = pci_get_drvdata(pdev);
265 if (!dev)
266 return;
268 if (mei_pg_is_enabled(dev))
269 pm_runtime_get_noresume(&pdev->dev);
271 hw = to_me_hw(dev);
274 dev_dbg(&pdev->dev, "stop\n");
275 mei_stop(dev);
277 if (!pci_dev_run_wake(pdev))
278 mei_me_unset_pm_domain(dev);
280 /* disable interrupts */
281 mei_disable_interrupts(dev);
283 free_irq(pdev->irq, dev);
284 pci_disable_msi(pdev);
286 if (hw->mem_addr)
287 pci_iounmap(pdev, hw->mem_addr);
289 mei_deregister(dev);
291 kfree(dev);
293 pci_release_regions(pdev);
294 pci_disable_device(pdev);
298 #ifdef CONFIG_PM_SLEEP
299 static int mei_me_pci_suspend(struct device *device)
301 struct pci_dev *pdev = to_pci_dev(device);
302 struct mei_device *dev = pci_get_drvdata(pdev);
304 if (!dev)
305 return -ENODEV;
307 dev_dbg(&pdev->dev, "suspend\n");
309 mei_stop(dev);
311 mei_disable_interrupts(dev);
313 free_irq(pdev->irq, dev);
314 pci_disable_msi(pdev);
316 return 0;
319 static int mei_me_pci_resume(struct device *device)
321 struct pci_dev *pdev = to_pci_dev(device);
322 struct mei_device *dev;
323 int err;
325 dev = pci_get_drvdata(pdev);
326 if (!dev)
327 return -ENODEV;
329 pci_enable_msi(pdev);
331 /* request and enable interrupt */
332 if (pci_dev_msi_enabled(pdev))
333 err = request_threaded_irq(pdev->irq,
334 NULL,
335 mei_me_irq_thread_handler,
336 IRQF_ONESHOT, KBUILD_MODNAME, dev);
337 else
338 err = request_threaded_irq(pdev->irq,
339 mei_me_irq_quick_handler,
340 mei_me_irq_thread_handler,
341 IRQF_SHARED, KBUILD_MODNAME, dev);
343 if (err) {
344 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
345 pdev->irq);
346 return err;
349 err = mei_restart(dev);
350 if (err)
351 return err;
353 /* Start timer if stopped in suspend */
354 schedule_delayed_work(&dev->timer_work, HZ);
356 return 0;
358 #endif /* CONFIG_PM_SLEEP */
360 #ifdef CONFIG_PM
361 static int mei_me_pm_runtime_idle(struct device *device)
363 struct pci_dev *pdev = to_pci_dev(device);
364 struct mei_device *dev;
366 dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n");
368 dev = pci_get_drvdata(pdev);
369 if (!dev)
370 return -ENODEV;
371 if (mei_write_is_idle(dev))
372 pm_runtime_autosuspend(device);
374 return -EBUSY;
377 static int mei_me_pm_runtime_suspend(struct device *device)
379 struct pci_dev *pdev = to_pci_dev(device);
380 struct mei_device *dev;
381 int ret;
383 dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n");
385 dev = pci_get_drvdata(pdev);
386 if (!dev)
387 return -ENODEV;
389 mutex_lock(&dev->device_lock);
391 if (mei_write_is_idle(dev))
392 ret = mei_me_pg_set_sync(dev);
393 else
394 ret = -EAGAIN;
396 mutex_unlock(&dev->device_lock);
398 dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret);
400 return ret;
403 static int mei_me_pm_runtime_resume(struct device *device)
405 struct pci_dev *pdev = to_pci_dev(device);
406 struct mei_device *dev;
407 int ret;
409 dev_dbg(&pdev->dev, "rpm: me: runtime resume\n");
411 dev = pci_get_drvdata(pdev);
412 if (!dev)
413 return -ENODEV;
415 mutex_lock(&dev->device_lock);
417 ret = mei_me_pg_unset_sync(dev);
419 mutex_unlock(&dev->device_lock);
421 dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret);
423 return ret;
427 * mei_me_set_pm_domain - fill and set pm domain structure for device
429 * @dev: mei_device
431 static inline void mei_me_set_pm_domain(struct mei_device *dev)
433 struct pci_dev *pdev = to_pci_dev(dev->dev);
435 if (pdev->dev.bus && pdev->dev.bus->pm) {
436 dev->pg_domain.ops = *pdev->dev.bus->pm;
438 dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
439 dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
440 dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
442 pdev->dev.pm_domain = &dev->pg_domain;
447 * mei_me_unset_pm_domain - clean pm domain structure for device
449 * @dev: mei_device
451 static inline void mei_me_unset_pm_domain(struct mei_device *dev)
453 /* stop using pm callbacks if any */
454 dev->dev->pm_domain = NULL;
457 static const struct dev_pm_ops mei_me_pm_ops = {
458 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
459 mei_me_pci_resume)
460 SET_RUNTIME_PM_OPS(
461 mei_me_pm_runtime_suspend,
462 mei_me_pm_runtime_resume,
463 mei_me_pm_runtime_idle)
466 #define MEI_ME_PM_OPS (&mei_me_pm_ops)
467 #else
468 #define MEI_ME_PM_OPS NULL
469 #endif /* CONFIG_PM */
471 * PCI driver structure
473 static struct pci_driver mei_me_driver = {
474 .name = KBUILD_MODNAME,
475 .id_table = mei_me_pci_tbl,
476 .probe = mei_me_probe,
477 .remove = mei_me_remove,
478 .shutdown = mei_me_remove,
479 .driver.pm = MEI_ME_PM_OPS,
482 module_pci_driver(mei_me_driver);
484 MODULE_AUTHOR("Intel Corporation");
485 MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
486 MODULE_LICENSE("GPL v2");