Merge tag 'mm-hotfixes-stable-2024-11-16-15-33' of git://git.kernel.org/pub/scm/linux...
[linux.git] / drivers / fpga / dfl-afu-main.c
blob6b97c073849ea91e4ec90849371ea1b6ee55e9ca
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Driver for FPGA Accelerated Function Unit (AFU)
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
7 * Authors:
8 * Wu Hao <hao.wu@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Henry Mitchel <henry.mitchel@intel.com>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/uaccess.h>
20 #include <linux/fpga-dfl.h>
22 #include "dfl-afu.h"
24 #define RST_POLL_INVL 10 /* us */
25 #define RST_POLL_TIMEOUT 1000 /* us */
27 /**
28 * __afu_port_enable - enable a port by clear reset
29 * @pdev: port platform device.
31 * Enable Port by clear the port soft reset bit, which is set by default.
32 * The AFU is unable to respond to any MMIO access while in reset.
33 * __afu_port_enable function should only be used after __afu_port_disable
34 * function.
36 * The caller needs to hold lock for protection.
38 int __afu_port_enable(struct platform_device *pdev)
40 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
41 void __iomem *base;
42 u64 v;
44 WARN_ON(!pdata->disable_count);
46 if (--pdata->disable_count != 0)
47 return 0;
49 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
51 /* Clear port soft reset */
52 v = readq(base + PORT_HDR_CTRL);
53 v &= ~PORT_CTRL_SFTRST;
54 writeq(v, base + PORT_HDR_CTRL);
57 * HW clears the ack bit to indicate that the port is fully out
58 * of reset.
60 if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
61 !(v & PORT_CTRL_SFTRST_ACK),
62 RST_POLL_INVL, RST_POLL_TIMEOUT)) {
63 dev_err(&pdev->dev, "timeout, failure to enable device\n");
64 return -ETIMEDOUT;
67 return 0;
70 /**
71 * __afu_port_disable - disable a port by hold reset
72 * @pdev: port platform device.
74 * Disable Port by setting the port soft reset bit, it puts the port into reset.
76 * The caller needs to hold lock for protection.
78 int __afu_port_disable(struct platform_device *pdev)
80 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
81 void __iomem *base;
82 u64 v;
84 if (pdata->disable_count++ != 0)
85 return 0;
87 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
89 /* Set port soft reset */
90 v = readq(base + PORT_HDR_CTRL);
91 v |= PORT_CTRL_SFTRST;
92 writeq(v, base + PORT_HDR_CTRL);
95 * HW sets ack bit to 1 when all outstanding requests have been drained
96 * on this port and minimum soft reset pulse width has elapsed.
97 * Driver polls port_soft_reset_ack to determine if reset done by HW.
99 if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
100 v & PORT_CTRL_SFTRST_ACK,
101 RST_POLL_INVL, RST_POLL_TIMEOUT)) {
102 dev_err(&pdev->dev, "timeout, failure to disable device\n");
103 return -ETIMEDOUT;
106 return 0;
110 * This function resets the FPGA Port and its accelerator (AFU) by function
111 * __port_disable and __port_enable (set port soft reset bit and then clear
112 * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
113 * Reconfiguration. But it should never cause any system level issue, only
114 * functional failure (e.g. DMA or PR operation failure) and be recoverable
115 * from the failure.
117 * Note: the accelerator (AFU) is not accessible when its port is in reset
118 * (disabled). Any attempts on MMIO access to AFU while in reset, will
119 * result errors reported via port error reporting sub feature (if present).
121 static int __port_reset(struct platform_device *pdev)
123 int ret;
125 ret = __afu_port_disable(pdev);
126 if (ret)
127 return ret;
129 return __afu_port_enable(pdev);
132 static int port_reset(struct platform_device *pdev)
134 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
135 int ret;
137 mutex_lock(&pdata->lock);
138 ret = __port_reset(pdev);
139 mutex_unlock(&pdata->lock);
141 return ret;
144 static int port_get_id(struct platform_device *pdev)
146 void __iomem *base;
148 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
150 return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
153 static ssize_t
154 id_show(struct device *dev, struct device_attribute *attr, char *buf)
156 int id = port_get_id(to_platform_device(dev));
158 return scnprintf(buf, PAGE_SIZE, "%d\n", id);
160 static DEVICE_ATTR_RO(id);
162 static ssize_t
163 ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
165 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
166 void __iomem *base;
167 u64 v;
169 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
171 mutex_lock(&pdata->lock);
172 v = readq(base + PORT_HDR_CTRL);
173 mutex_unlock(&pdata->lock);
175 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
178 static ssize_t
179 ltr_store(struct device *dev, struct device_attribute *attr,
180 const char *buf, size_t count)
182 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
183 void __iomem *base;
184 bool ltr;
185 u64 v;
187 if (kstrtobool(buf, &ltr))
188 return -EINVAL;
190 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
192 mutex_lock(&pdata->lock);
193 v = readq(base + PORT_HDR_CTRL);
194 v &= ~PORT_CTRL_LATENCY;
195 v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
196 writeq(v, base + PORT_HDR_CTRL);
197 mutex_unlock(&pdata->lock);
199 return count;
201 static DEVICE_ATTR_RW(ltr);
203 static ssize_t
204 ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
206 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
207 void __iomem *base;
208 u64 v;
210 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
212 mutex_lock(&pdata->lock);
213 v = readq(base + PORT_HDR_STS);
214 mutex_unlock(&pdata->lock);
216 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
219 static ssize_t
220 ap1_event_store(struct device *dev, struct device_attribute *attr,
221 const char *buf, size_t count)
223 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
224 void __iomem *base;
225 bool clear;
227 if (kstrtobool(buf, &clear) || !clear)
228 return -EINVAL;
230 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
232 mutex_lock(&pdata->lock);
233 writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
234 mutex_unlock(&pdata->lock);
236 return count;
238 static DEVICE_ATTR_RW(ap1_event);
240 static ssize_t
241 ap2_event_show(struct device *dev, struct device_attribute *attr,
242 char *buf)
244 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
245 void __iomem *base;
246 u64 v;
248 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
250 mutex_lock(&pdata->lock);
251 v = readq(base + PORT_HDR_STS);
252 mutex_unlock(&pdata->lock);
254 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
257 static ssize_t
258 ap2_event_store(struct device *dev, struct device_attribute *attr,
259 const char *buf, size_t count)
261 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
262 void __iomem *base;
263 bool clear;
265 if (kstrtobool(buf, &clear) || !clear)
266 return -EINVAL;
268 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
270 mutex_lock(&pdata->lock);
271 writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
272 mutex_unlock(&pdata->lock);
274 return count;
276 static DEVICE_ATTR_RW(ap2_event);
278 static ssize_t
279 power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
281 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
282 void __iomem *base;
283 u64 v;
285 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
287 mutex_lock(&pdata->lock);
288 v = readq(base + PORT_HDR_STS);
289 mutex_unlock(&pdata->lock);
291 return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
293 static DEVICE_ATTR_RO(power_state);
295 static ssize_t
296 userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
297 const char *buf, size_t count)
299 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
300 u64 userclk_freq_cmd;
301 void __iomem *base;
303 if (kstrtou64(buf, 0, &userclk_freq_cmd))
304 return -EINVAL;
306 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
308 mutex_lock(&pdata->lock);
309 writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
310 mutex_unlock(&pdata->lock);
312 return count;
314 static DEVICE_ATTR_WO(userclk_freqcmd);
316 static ssize_t
317 userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
318 const char *buf, size_t count)
320 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
321 u64 userclk_freqcntr_cmd;
322 void __iomem *base;
324 if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
325 return -EINVAL;
327 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
329 mutex_lock(&pdata->lock);
330 writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
331 mutex_unlock(&pdata->lock);
333 return count;
335 static DEVICE_ATTR_WO(userclk_freqcntrcmd);
337 static ssize_t
338 userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
339 char *buf)
341 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
342 u64 userclk_freqsts;
343 void __iomem *base;
345 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
347 mutex_lock(&pdata->lock);
348 userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
349 mutex_unlock(&pdata->lock);
351 return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
353 static DEVICE_ATTR_RO(userclk_freqsts);
355 static ssize_t
356 userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
357 char *buf)
359 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
360 u64 userclk_freqcntrsts;
361 void __iomem *base;
363 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
365 mutex_lock(&pdata->lock);
366 userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
367 mutex_unlock(&pdata->lock);
369 return sprintf(buf, "0x%llx\n",
370 (unsigned long long)userclk_freqcntrsts);
372 static DEVICE_ATTR_RO(userclk_freqcntrsts);
374 static struct attribute *port_hdr_attrs[] = {
375 &dev_attr_id.attr,
376 &dev_attr_ltr.attr,
377 &dev_attr_ap1_event.attr,
378 &dev_attr_ap2_event.attr,
379 &dev_attr_power_state.attr,
380 &dev_attr_userclk_freqcmd.attr,
381 &dev_attr_userclk_freqcntrcmd.attr,
382 &dev_attr_userclk_freqsts.attr,
383 &dev_attr_userclk_freqcntrsts.attr,
384 NULL,
387 static umode_t port_hdr_attrs_visible(struct kobject *kobj,
388 struct attribute *attr, int n)
390 struct device *dev = kobj_to_dev(kobj);
391 umode_t mode = attr->mode;
392 void __iomem *base;
394 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
396 if (dfl_feature_revision(base) > 0) {
398 * userclk sysfs interfaces are only visible in case port
399 * revision is 0, as hardware with revision >0 doesn't
400 * support this.
402 if (attr == &dev_attr_userclk_freqcmd.attr ||
403 attr == &dev_attr_userclk_freqcntrcmd.attr ||
404 attr == &dev_attr_userclk_freqsts.attr ||
405 attr == &dev_attr_userclk_freqcntrsts.attr)
406 mode = 0;
409 return mode;
412 static const struct attribute_group port_hdr_group = {
413 .attrs = port_hdr_attrs,
414 .is_visible = port_hdr_attrs_visible,
417 static int port_hdr_init(struct platform_device *pdev,
418 struct dfl_feature *feature)
420 port_reset(pdev);
422 return 0;
425 static long
426 port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
427 unsigned int cmd, unsigned long arg)
429 long ret;
431 switch (cmd) {
432 case DFL_FPGA_PORT_RESET:
433 if (!arg)
434 ret = port_reset(pdev);
435 else
436 ret = -EINVAL;
437 break;
438 default:
439 dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
440 ret = -ENODEV;
443 return ret;
446 static const struct dfl_feature_id port_hdr_id_table[] = {
447 {.id = PORT_FEATURE_ID_HEADER,},
448 {0,}
451 static const struct dfl_feature_ops port_hdr_ops = {
452 .init = port_hdr_init,
453 .ioctl = port_hdr_ioctl,
456 static ssize_t
457 afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
459 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
460 void __iomem *base;
461 u64 guidl, guidh;
463 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
465 mutex_lock(&pdata->lock);
466 if (pdata->disable_count) {
467 mutex_unlock(&pdata->lock);
468 return -EBUSY;
471 guidl = readq(base + GUID_L);
472 guidh = readq(base + GUID_H);
473 mutex_unlock(&pdata->lock);
475 return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
477 static DEVICE_ATTR_RO(afu_id);
479 static struct attribute *port_afu_attrs[] = {
480 &dev_attr_afu_id.attr,
481 NULL
484 static umode_t port_afu_attrs_visible(struct kobject *kobj,
485 struct attribute *attr, int n)
487 struct device *dev = kobj_to_dev(kobj);
490 * sysfs entries are visible only if related private feature is
491 * enumerated.
493 if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
494 return 0;
496 return attr->mode;
499 static const struct attribute_group port_afu_group = {
500 .attrs = port_afu_attrs,
501 .is_visible = port_afu_attrs_visible,
504 static int port_afu_init(struct platform_device *pdev,
505 struct dfl_feature *feature)
507 struct resource *res = &pdev->resource[feature->resource_index];
509 return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
510 DFL_PORT_REGION_INDEX_AFU,
511 resource_size(res), res->start,
512 DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
513 DFL_PORT_REGION_WRITE);
516 static const struct dfl_feature_id port_afu_id_table[] = {
517 {.id = PORT_FEATURE_ID_AFU,},
518 {0,}
521 static const struct dfl_feature_ops port_afu_ops = {
522 .init = port_afu_init,
525 static int port_stp_init(struct platform_device *pdev,
526 struct dfl_feature *feature)
528 struct resource *res = &pdev->resource[feature->resource_index];
530 return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
531 DFL_PORT_REGION_INDEX_STP,
532 resource_size(res), res->start,
533 DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
534 DFL_PORT_REGION_WRITE);
537 static const struct dfl_feature_id port_stp_id_table[] = {
538 {.id = PORT_FEATURE_ID_STP,},
539 {0,}
542 static const struct dfl_feature_ops port_stp_ops = {
543 .init = port_stp_init,
546 static long
547 port_uint_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
548 unsigned int cmd, unsigned long arg)
550 switch (cmd) {
551 case DFL_FPGA_PORT_UINT_GET_IRQ_NUM:
552 return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
553 case DFL_FPGA_PORT_UINT_SET_IRQ:
554 return dfl_feature_ioctl_set_irq(pdev, feature, arg);
555 default:
556 dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
557 return -ENODEV;
561 static const struct dfl_feature_id port_uint_id_table[] = {
562 {.id = PORT_FEATURE_ID_UINT,},
563 {0,}
566 static const struct dfl_feature_ops port_uint_ops = {
567 .ioctl = port_uint_ioctl,
570 static struct dfl_feature_driver port_feature_drvs[] = {
572 .id_table = port_hdr_id_table,
573 .ops = &port_hdr_ops,
576 .id_table = port_afu_id_table,
577 .ops = &port_afu_ops,
580 .id_table = port_err_id_table,
581 .ops = &port_err_ops,
584 .id_table = port_stp_id_table,
585 .ops = &port_stp_ops,
588 .id_table = port_uint_id_table,
589 .ops = &port_uint_ops,
592 .ops = NULL,
596 static int afu_open(struct inode *inode, struct file *filp)
598 struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
599 struct dfl_feature_platform_data *pdata;
600 int ret;
602 pdata = dev_get_platdata(&fdev->dev);
603 if (WARN_ON(!pdata))
604 return -ENODEV;
606 mutex_lock(&pdata->lock);
607 ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
608 if (!ret) {
609 dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
610 dfl_feature_dev_use_count(pdata));
611 filp->private_data = fdev;
613 mutex_unlock(&pdata->lock);
615 return ret;
618 static int afu_release(struct inode *inode, struct file *filp)
620 struct platform_device *pdev = filp->private_data;
621 struct dfl_feature_platform_data *pdata;
622 struct dfl_feature *feature;
624 dev_dbg(&pdev->dev, "Device File Release\n");
626 pdata = dev_get_platdata(&pdev->dev);
628 mutex_lock(&pdata->lock);
629 dfl_feature_dev_use_end(pdata);
631 if (!dfl_feature_dev_use_count(pdata)) {
632 dfl_fpga_dev_for_each_feature(pdata, feature)
633 dfl_fpga_set_irq_triggers(feature, 0,
634 feature->nr_irqs, NULL);
635 __port_reset(pdev);
636 afu_dma_region_destroy(pdata);
638 mutex_unlock(&pdata->lock);
640 return 0;
643 static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
644 unsigned long arg)
646 /* No extension support for now */
647 return 0;
650 static long
651 afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
653 struct dfl_fpga_port_info info;
654 struct dfl_afu *afu;
655 unsigned long minsz;
657 minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
659 if (copy_from_user(&info, arg, minsz))
660 return -EFAULT;
662 if (info.argsz < minsz)
663 return -EINVAL;
665 mutex_lock(&pdata->lock);
666 afu = dfl_fpga_pdata_get_private(pdata);
667 info.flags = 0;
668 info.num_regions = afu->num_regions;
669 info.num_umsgs = afu->num_umsgs;
670 mutex_unlock(&pdata->lock);
672 if (copy_to_user(arg, &info, sizeof(info)))
673 return -EFAULT;
675 return 0;
678 static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
679 void __user *arg)
681 struct dfl_fpga_port_region_info rinfo;
682 struct dfl_afu_mmio_region region;
683 unsigned long minsz;
684 long ret;
686 minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
688 if (copy_from_user(&rinfo, arg, minsz))
689 return -EFAULT;
691 if (rinfo.argsz < minsz || rinfo.padding)
692 return -EINVAL;
694 ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
695 if (ret)
696 return ret;
698 rinfo.flags = region.flags;
699 rinfo.size = region.size;
700 rinfo.offset = region.offset;
702 if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
703 return -EFAULT;
705 return 0;
708 static long
709 afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
711 struct dfl_fpga_port_dma_map map;
712 unsigned long minsz;
713 long ret;
715 minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
717 if (copy_from_user(&map, arg, minsz))
718 return -EFAULT;
720 if (map.argsz < minsz || map.flags)
721 return -EINVAL;
723 ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
724 if (ret)
725 return ret;
727 if (copy_to_user(arg, &map, sizeof(map))) {
728 afu_dma_unmap_region(pdata, map.iova);
729 return -EFAULT;
732 dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
733 (unsigned long long)map.user_addr,
734 (unsigned long long)map.length,
735 (unsigned long long)map.iova);
737 return 0;
740 static long
741 afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
743 struct dfl_fpga_port_dma_unmap unmap;
744 unsigned long minsz;
746 minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
748 if (copy_from_user(&unmap, arg, minsz))
749 return -EFAULT;
751 if (unmap.argsz < minsz || unmap.flags)
752 return -EINVAL;
754 return afu_dma_unmap_region(pdata, unmap.iova);
757 static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
759 struct platform_device *pdev = filp->private_data;
760 struct dfl_feature_platform_data *pdata;
761 struct dfl_feature *f;
762 long ret;
764 dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
766 pdata = dev_get_platdata(&pdev->dev);
768 switch (cmd) {
769 case DFL_FPGA_GET_API_VERSION:
770 return DFL_FPGA_API_VERSION;
771 case DFL_FPGA_CHECK_EXTENSION:
772 return afu_ioctl_check_extension(pdata, arg);
773 case DFL_FPGA_PORT_GET_INFO:
774 return afu_ioctl_get_info(pdata, (void __user *)arg);
775 case DFL_FPGA_PORT_GET_REGION_INFO:
776 return afu_ioctl_get_region_info(pdata, (void __user *)arg);
777 case DFL_FPGA_PORT_DMA_MAP:
778 return afu_ioctl_dma_map(pdata, (void __user *)arg);
779 case DFL_FPGA_PORT_DMA_UNMAP:
780 return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
781 default:
783 * Let sub-feature's ioctl function to handle the cmd
784 * Sub-feature's ioctl returns -ENODEV when cmd is not
785 * handled in this sub feature, and returns 0 and other
786 * error code if cmd is handled.
788 dfl_fpga_dev_for_each_feature(pdata, f)
789 if (f->ops && f->ops->ioctl) {
790 ret = f->ops->ioctl(pdev, f, cmd, arg);
791 if (ret != -ENODEV)
792 return ret;
796 return -EINVAL;
799 static const struct vm_operations_struct afu_vma_ops = {
800 #ifdef CONFIG_HAVE_IOREMAP_PROT
801 .access = generic_access_phys,
802 #endif
805 static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
807 struct platform_device *pdev = filp->private_data;
808 struct dfl_feature_platform_data *pdata;
809 u64 size = vma->vm_end - vma->vm_start;
810 struct dfl_afu_mmio_region region;
811 u64 offset;
812 int ret;
814 if (!(vma->vm_flags & VM_SHARED))
815 return -EINVAL;
817 pdata = dev_get_platdata(&pdev->dev);
819 offset = vma->vm_pgoff << PAGE_SHIFT;
820 ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
821 if (ret)
822 return ret;
824 if (!(region.flags & DFL_PORT_REGION_MMAP))
825 return -EINVAL;
827 if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
828 return -EPERM;
830 if ((vma->vm_flags & VM_WRITE) &&
831 !(region.flags & DFL_PORT_REGION_WRITE))
832 return -EPERM;
834 /* Support debug access to the mapping */
835 vma->vm_ops = &afu_vma_ops;
837 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
839 return remap_pfn_range(vma, vma->vm_start,
840 (region.phys + (offset - region.offset)) >> PAGE_SHIFT,
841 size, vma->vm_page_prot);
844 static const struct file_operations afu_fops = {
845 .owner = THIS_MODULE,
846 .open = afu_open,
847 .release = afu_release,
848 .unlocked_ioctl = afu_ioctl,
849 .mmap = afu_mmap,
852 static int afu_dev_init(struct platform_device *pdev)
854 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
855 struct dfl_afu *afu;
857 afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
858 if (!afu)
859 return -ENOMEM;
861 mutex_lock(&pdata->lock);
862 dfl_fpga_pdata_set_private(pdata, afu);
863 afu_mmio_region_init(pdata);
864 afu_dma_region_init(pdata);
865 mutex_unlock(&pdata->lock);
867 return 0;
870 static int afu_dev_destroy(struct platform_device *pdev)
872 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
874 mutex_lock(&pdata->lock);
875 afu_mmio_region_destroy(pdata);
876 afu_dma_region_destroy(pdata);
877 dfl_fpga_pdata_set_private(pdata, NULL);
878 mutex_unlock(&pdata->lock);
880 return 0;
883 static int port_enable_set(struct platform_device *pdev, bool enable)
885 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
886 int ret;
888 mutex_lock(&pdata->lock);
889 if (enable)
890 ret = __afu_port_enable(pdev);
891 else
892 ret = __afu_port_disable(pdev);
893 mutex_unlock(&pdata->lock);
895 return ret;
898 static struct dfl_fpga_port_ops afu_port_ops = {
899 .name = DFL_FPGA_FEATURE_DEV_PORT,
900 .owner = THIS_MODULE,
901 .get_id = port_get_id,
902 .enable_set = port_enable_set,
905 static int afu_probe(struct platform_device *pdev)
907 int ret;
909 dev_dbg(&pdev->dev, "%s\n", __func__);
911 ret = afu_dev_init(pdev);
912 if (ret)
913 goto exit;
915 ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
916 if (ret)
917 goto dev_destroy;
919 ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
920 if (ret) {
921 dfl_fpga_dev_feature_uinit(pdev);
922 goto dev_destroy;
925 return 0;
927 dev_destroy:
928 afu_dev_destroy(pdev);
929 exit:
930 return ret;
933 static void afu_remove(struct platform_device *pdev)
935 dev_dbg(&pdev->dev, "%s\n", __func__);
937 dfl_fpga_dev_ops_unregister(pdev);
938 dfl_fpga_dev_feature_uinit(pdev);
939 afu_dev_destroy(pdev);
942 static const struct attribute_group *afu_dev_groups[] = {
943 &port_hdr_group,
944 &port_afu_group,
945 &port_err_group,
946 NULL
949 static struct platform_driver afu_driver = {
950 .driver = {
951 .name = DFL_FPGA_FEATURE_DEV_PORT,
952 .dev_groups = afu_dev_groups,
954 .probe = afu_probe,
955 .remove_new = afu_remove,
958 static int __init afu_init(void)
960 int ret;
962 dfl_fpga_port_ops_add(&afu_port_ops);
964 ret = platform_driver_register(&afu_driver);
965 if (ret)
966 dfl_fpga_port_ops_del(&afu_port_ops);
968 return ret;
971 static void __exit afu_exit(void)
973 platform_driver_unregister(&afu_driver);
975 dfl_fpga_port_ops_del(&afu_port_ops);
978 module_init(afu_init);
979 module_exit(afu_exit);
981 MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
982 MODULE_AUTHOR("Intel Corporation");
983 MODULE_LICENSE("GPL v2");
984 MODULE_ALIAS("platform:dfl-port");