1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
3 // Copyright (c) 2018, Linaro Limited
5 #include <linux/completion.h>
6 #include <linux/device.h>
7 #include <linux/dma-buf.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/dma-resv.h>
10 #include <linux/idr.h>
11 #include <linux/list.h>
12 #include <linux/miscdevice.h>
13 #include <linux/module.h>
14 #include <linux/of_address.h>
16 #include <linux/platform_device.h>
17 #include <linux/sort.h>
18 #include <linux/of_platform.h>
19 #include <linux/rpmsg.h>
20 #include <linux/scatterlist.h>
21 #include <linux/slab.h>
22 #include <linux/firmware/qcom/qcom_scm.h>
23 #include <uapi/misc/fastrpc.h>
24 #include <linux/of_reserved_mem.h>
26 #define ADSP_DOMAIN_ID (0)
27 #define MDSP_DOMAIN_ID (1)
28 #define SDSP_DOMAIN_ID (2)
29 #define CDSP_DOMAIN_ID (3)
30 #define CDSP1_DOMAIN_ID (4)
31 #define FASTRPC_DEV_MAX 5 /* adsp, mdsp, slpi, cdsp, cdsp1 */
32 #define FASTRPC_MAX_SESSIONS 14
33 #define FASTRPC_MAX_VMIDS 16
34 #define FASTRPC_ALIGN 128
35 #define FASTRPC_MAX_FDLIST 16
36 #define FASTRPC_MAX_CRCLIST 64
37 #define FASTRPC_PHYS(p) ((p) & 0xffffffff)
38 #define FASTRPC_CTX_MAX (256)
39 #define FASTRPC_INIT_HANDLE 1
40 #define FASTRPC_DSP_UTILITIES_HANDLE 2
41 #define FASTRPC_CTXID_MASK (0xFF0)
42 #define INIT_FILELEN_MAX (2 * 1024 * 1024)
43 #define INIT_FILE_NAMELEN_MAX (128)
44 #define FASTRPC_DEVICE_NAME "fastrpc"
46 /* Add memory to static PD pool, protection thru XPU */
47 #define ADSP_MMAP_HEAP_ADDR 4
48 /* MAP static DMA buffer on DSP User PD */
49 #define ADSP_MMAP_DMA_BUFFER 6
50 /* Add memory to static PD pool protection thru hypervisor */
51 #define ADSP_MMAP_REMOTE_HEAP_ADDR 8
52 /* Add memory to userPD pool, for user heap */
53 #define ADSP_MMAP_ADD_PAGES 0x1000
54 /* Add memory to userPD pool, for LLC heap */
55 #define ADSP_MMAP_ADD_PAGES_LLC 0x3000,
57 #define DSP_UNSUPPORTED_API (0x80000414)
58 /* MAX NUMBER of DSP ATTRIBUTES SUPPORTED */
59 #define FASTRPC_MAX_DSP_ATTRIBUTES (256)
60 #define FASTRPC_MAX_DSP_ATTRIBUTES_LEN (sizeof(u32) * FASTRPC_MAX_DSP_ATTRIBUTES)
62 /* Retrives number of input buffers from the scalars parameter */
63 #define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
65 /* Retrives number of output buffers from the scalars parameter */
66 #define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
68 /* Retrives number of input handles from the scalars parameter */
69 #define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
71 /* Retrives number of output handles from the scalars parameter */
72 #define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
74 #define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
75 REMOTE_SCALARS_OUTBUFS(sc) + \
76 REMOTE_SCALARS_INHANDLES(sc)+ \
77 REMOTE_SCALARS_OUTHANDLES(sc))
78 #define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
79 (((attr & 0x07) << 29) | \
80 ((method & 0x1f) << 24) | \
81 ((in & 0xff) << 16) | \
82 ((out & 0xff) << 8) | \
83 ((oin & 0x0f) << 4) | \
86 #define FASTRPC_SCALARS(method, in, out) \
87 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
89 #define FASTRPC_CREATE_PROCESS_NARGS 6
90 #define FASTRPC_CREATE_STATIC_PROCESS_NARGS 3
91 /* Remote Method id table */
92 #define FASTRPC_RMID_INIT_ATTACH 0
93 #define FASTRPC_RMID_INIT_RELEASE 1
94 #define FASTRPC_RMID_INIT_MMAP 4
95 #define FASTRPC_RMID_INIT_MUNMAP 5
96 #define FASTRPC_RMID_INIT_CREATE 6
97 #define FASTRPC_RMID_INIT_CREATE_ATTR 7
98 #define FASTRPC_RMID_INIT_CREATE_STATIC 8
99 #define FASTRPC_RMID_INIT_MEM_MAP 10
100 #define FASTRPC_RMID_INIT_MEM_UNMAP 11
102 /* Protection Domain(PD) ids */
105 #define SENSORS_PD (2)
107 #define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
109 static const char *domains
[FASTRPC_DEV_MAX
] = { "adsp", "mdsp",
110 "sdsp", "cdsp", "cdsp1" };
111 struct fastrpc_phy_page
{
112 u64 addr
; /* physical address */
113 u64 size
; /* size of contiguous region */
116 struct fastrpc_invoke_buf
{
117 u32 num
; /* number of contiguous regions */
118 u32 pgidx
; /* index to start of contiguous region */
121 struct fastrpc_remote_dmahandle
{
122 s32 fd
; /* dma handle fd */
123 u32 offset
; /* dma handle offset */
124 u32 len
; /* dma handle length */
127 struct fastrpc_remote_buf
{
128 u64 pv
; /* buffer pointer */
129 u64 len
; /* length of buffer */
132 union fastrpc_remote_arg
{
133 struct fastrpc_remote_buf buf
;
134 struct fastrpc_remote_dmahandle dma
;
137 struct fastrpc_mmap_rsp_msg
{
141 struct fastrpc_mmap_req_msg
{
148 struct fastrpc_mem_map_req_msg
{
158 struct fastrpc_munmap_req_msg
{
164 struct fastrpc_mem_unmap_req_msg
{
172 int pid
; /* process group id */
173 int tid
; /* thread id */
174 u64 ctx
; /* invoke caller context */
175 u32 handle
; /* handle to invoke */
176 u32 sc
; /* scalars structure describing the data */
177 u64 addr
; /* physical address */
178 u64 size
; /* size of contiguous region */
181 struct fastrpc_invoke_rsp
{
182 u64 ctx
; /* invoke caller context */
183 int retval
; /* invoke return value */
186 struct fastrpc_buf_overlap
{
196 struct fastrpc_user
*fl
;
197 struct dma_buf
*dmabuf
;
202 /* Lock for dma buf attachments */
204 struct list_head attachments
;
206 struct list_head node
; /* list of user requested mmaps */
210 struct fastrpc_dma_buf_attachment
{
213 struct list_head node
;
217 struct list_head node
;
218 struct fastrpc_user
*fl
;
221 struct sg_table
*table
;
222 struct dma_buf_attachment
*attach
;
229 struct kref refcount
;
232 struct fastrpc_invoke_ctx
{
242 struct kref refcount
;
243 struct list_head node
; /* list of ctxs */
244 struct completion work
;
245 struct work_struct put_work
;
246 struct fastrpc_msg msg
;
247 struct fastrpc_user
*fl
;
248 union fastrpc_remote_arg
*rpra
;
249 struct fastrpc_map
**maps
;
250 struct fastrpc_buf
*buf
;
251 struct fastrpc_invoke_args
*args
;
252 struct fastrpc_buf_overlap
*olaps
;
253 struct fastrpc_channel_ctx
*cctx
;
256 struct fastrpc_session_ctx
{
263 struct fastrpc_channel_ctx
{
267 struct qcom_scm_vmperm vmperms
[FASTRPC_MAX_VMIDS
];
268 struct rpmsg_device
*rpdev
;
269 struct fastrpc_session_ctx session
[FASTRPC_MAX_SESSIONS
];
272 struct list_head users
;
273 struct kref refcount
;
274 /* Flag if dsp attributes are cached */
275 bool valid_attributes
;
276 u32 dsp_attributes
[FASTRPC_MAX_DSP_ATTRIBUTES
];
277 struct fastrpc_device
*secure_fdevice
;
278 struct fastrpc_device
*fdevice
;
279 struct fastrpc_buf
*remote_heap
;
280 struct list_head invoke_interrupted_mmaps
;
282 bool unsigned_support
;
286 struct fastrpc_device
{
287 struct fastrpc_channel_ctx
*cctx
;
288 struct miscdevice miscdev
;
292 struct fastrpc_user
{
293 struct list_head user
;
294 struct list_head maps
;
295 struct list_head pending
;
296 struct list_head mmaps
;
298 struct fastrpc_channel_ctx
*cctx
;
299 struct fastrpc_session_ctx
*sctx
;
300 struct fastrpc_buf
*init_mem
;
307 /* lock for allocations */
311 static void fastrpc_free_map(struct kref
*ref
)
313 struct fastrpc_map
*map
;
315 map
= container_of(ref
, struct fastrpc_map
, refcount
);
318 if (map
->attr
& FASTRPC_ATTR_SECUREMAP
) {
319 struct qcom_scm_vmperm perm
;
320 int vmid
= map
->fl
->cctx
->vmperms
[0].vmid
;
321 u64 src_perms
= BIT(QCOM_SCM_VMID_HLOS
) | BIT(vmid
);
324 perm
.vmid
= QCOM_SCM_VMID_HLOS
;
325 perm
.perm
= QCOM_SCM_PERM_RWX
;
326 err
= qcom_scm_assign_mem(map
->phys
, map
->size
,
327 &src_perms
, &perm
, 1);
329 dev_err(map
->fl
->sctx
->dev
, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n",
330 map
->phys
, map
->size
, err
);
334 dma_buf_unmap_attachment_unlocked(map
->attach
, map
->table
,
336 dma_buf_detach(map
->buf
, map
->attach
);
337 dma_buf_put(map
->buf
);
341 spin_lock(&map
->fl
->lock
);
342 list_del(&map
->node
);
343 spin_unlock(&map
->fl
->lock
);
350 static void fastrpc_map_put(struct fastrpc_map
*map
)
353 kref_put(&map
->refcount
, fastrpc_free_map
);
356 static int fastrpc_map_get(struct fastrpc_map
*map
)
361 return kref_get_unless_zero(&map
->refcount
) ? 0 : -ENOENT
;
365 static int fastrpc_map_lookup(struct fastrpc_user
*fl
, int fd
,
366 struct fastrpc_map
**ppmap
, bool take_ref
)
368 struct fastrpc_session_ctx
*sess
= fl
->sctx
;
369 struct fastrpc_map
*map
= NULL
;
372 spin_lock(&fl
->lock
);
373 list_for_each_entry(map
, &fl
->maps
, node
) {
378 ret
= fastrpc_map_get(map
);
380 dev_dbg(sess
->dev
, "%s: Failed to get map fd=%d ret=%d\n",
390 spin_unlock(&fl
->lock
);
395 static void fastrpc_buf_free(struct fastrpc_buf
*buf
)
397 dma_free_coherent(buf
->dev
, buf
->size
, buf
->virt
,
398 FASTRPC_PHYS(buf
->phys
));
402 static int __fastrpc_buf_alloc(struct fastrpc_user
*fl
, struct device
*dev
,
403 u64 size
, struct fastrpc_buf
**obuf
)
405 struct fastrpc_buf
*buf
;
407 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
411 INIT_LIST_HEAD(&buf
->attachments
);
412 INIT_LIST_HEAD(&buf
->node
);
413 mutex_init(&buf
->lock
);
422 buf
->virt
= dma_alloc_coherent(dev
, buf
->size
, (dma_addr_t
*)&buf
->phys
,
425 mutex_destroy(&buf
->lock
);
435 static int fastrpc_buf_alloc(struct fastrpc_user
*fl
, struct device
*dev
,
436 u64 size
, struct fastrpc_buf
**obuf
)
439 struct fastrpc_buf
*buf
;
441 ret
= __fastrpc_buf_alloc(fl
, dev
, size
, obuf
);
447 if (fl
->sctx
&& fl
->sctx
->sid
)
448 buf
->phys
+= ((u64
)fl
->sctx
->sid
<< 32);
453 static int fastrpc_remote_heap_alloc(struct fastrpc_user
*fl
, struct device
*dev
,
454 u64 size
, struct fastrpc_buf
**obuf
)
456 struct device
*rdev
= &fl
->cctx
->rpdev
->dev
;
458 return __fastrpc_buf_alloc(fl
, rdev
, size
, obuf
);
461 static void fastrpc_channel_ctx_free(struct kref
*ref
)
463 struct fastrpc_channel_ctx
*cctx
;
465 cctx
= container_of(ref
, struct fastrpc_channel_ctx
, refcount
);
470 static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx
*cctx
)
472 kref_get(&cctx
->refcount
);
475 static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx
*cctx
)
477 kref_put(&cctx
->refcount
, fastrpc_channel_ctx_free
);
480 static void fastrpc_context_free(struct kref
*ref
)
482 struct fastrpc_invoke_ctx
*ctx
;
483 struct fastrpc_channel_ctx
*cctx
;
487 ctx
= container_of(ref
, struct fastrpc_invoke_ctx
, refcount
);
490 for (i
= 0; i
< ctx
->nbufs
; i
++)
491 fastrpc_map_put(ctx
->maps
[i
]);
494 fastrpc_buf_free(ctx
->buf
);
496 spin_lock_irqsave(&cctx
->lock
, flags
);
497 idr_remove(&cctx
->ctx_idr
, ctx
->ctxid
>> 4);
498 spin_unlock_irqrestore(&cctx
->lock
, flags
);
504 fastrpc_channel_ctx_put(cctx
);
507 static void fastrpc_context_get(struct fastrpc_invoke_ctx
*ctx
)
509 kref_get(&ctx
->refcount
);
512 static void fastrpc_context_put(struct fastrpc_invoke_ctx
*ctx
)
514 kref_put(&ctx
->refcount
, fastrpc_context_free
);
517 static void fastrpc_context_put_wq(struct work_struct
*work
)
519 struct fastrpc_invoke_ctx
*ctx
=
520 container_of(work
, struct fastrpc_invoke_ctx
, put_work
);
522 fastrpc_context_put(ctx
);
525 #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
526 static int olaps_cmp(const void *a
, const void *b
)
528 struct fastrpc_buf_overlap
*pa
= (struct fastrpc_buf_overlap
*)a
;
529 struct fastrpc_buf_overlap
*pb
= (struct fastrpc_buf_overlap
*)b
;
530 /* sort with lowest starting buffer first */
531 int st
= CMP(pa
->start
, pb
->start
);
532 /* sort with highest ending buffer first */
533 int ed
= CMP(pb
->end
, pa
->end
);
535 return st
== 0 ? ed
: st
;
538 static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx
*ctx
)
543 for (i
= 0; i
< ctx
->nbufs
; ++i
) {
544 ctx
->olaps
[i
].start
= ctx
->args
[i
].ptr
;
545 ctx
->olaps
[i
].end
= ctx
->olaps
[i
].start
+ ctx
->args
[i
].length
;
546 ctx
->olaps
[i
].raix
= i
;
549 sort(ctx
->olaps
, ctx
->nbufs
, sizeof(*ctx
->olaps
), olaps_cmp
, NULL
);
551 for (i
= 0; i
< ctx
->nbufs
; ++i
) {
552 /* Falling inside previous range */
553 if (ctx
->olaps
[i
].start
< max_end
) {
554 ctx
->olaps
[i
].mstart
= max_end
;
555 ctx
->olaps
[i
].mend
= ctx
->olaps
[i
].end
;
556 ctx
->olaps
[i
].offset
= max_end
- ctx
->olaps
[i
].start
;
558 if (ctx
->olaps
[i
].end
> max_end
) {
559 max_end
= ctx
->olaps
[i
].end
;
561 ctx
->olaps
[i
].mend
= 0;
562 ctx
->olaps
[i
].mstart
= 0;
566 ctx
->olaps
[i
].mend
= ctx
->olaps
[i
].end
;
567 ctx
->olaps
[i
].mstart
= ctx
->olaps
[i
].start
;
568 ctx
->olaps
[i
].offset
= 0;
569 max_end
= ctx
->olaps
[i
].end
;
574 static struct fastrpc_invoke_ctx
*fastrpc_context_alloc(
575 struct fastrpc_user
*user
, u32 kernel
, u32 sc
,
576 struct fastrpc_invoke_args
*args
)
578 struct fastrpc_channel_ctx
*cctx
= user
->cctx
;
579 struct fastrpc_invoke_ctx
*ctx
= NULL
;
583 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
585 return ERR_PTR(-ENOMEM
);
587 INIT_LIST_HEAD(&ctx
->node
);
589 ctx
->nscalars
= REMOTE_SCALARS_LENGTH(sc
);
590 ctx
->nbufs
= REMOTE_SCALARS_INBUFS(sc
) +
591 REMOTE_SCALARS_OUTBUFS(sc
);
594 ctx
->maps
= kcalloc(ctx
->nscalars
,
595 sizeof(*ctx
->maps
), GFP_KERNEL
);
598 return ERR_PTR(-ENOMEM
);
600 ctx
->olaps
= kcalloc(ctx
->nscalars
,
601 sizeof(*ctx
->olaps
), GFP_KERNEL
);
605 return ERR_PTR(-ENOMEM
);
608 fastrpc_get_buff_overlaps(ctx
);
611 /* Released in fastrpc_context_put() */
612 fastrpc_channel_ctx_get(cctx
);
616 ctx
->pid
= current
->pid
;
617 ctx
->tgid
= user
->tgid
;
619 init_completion(&ctx
->work
);
620 INIT_WORK(&ctx
->put_work
, fastrpc_context_put_wq
);
622 spin_lock(&user
->lock
);
623 list_add_tail(&ctx
->node
, &user
->pending
);
624 spin_unlock(&user
->lock
);
626 spin_lock_irqsave(&cctx
->lock
, flags
);
627 ret
= idr_alloc_cyclic(&cctx
->ctx_idr
, ctx
, 1,
628 FASTRPC_CTX_MAX
, GFP_ATOMIC
);
630 spin_unlock_irqrestore(&cctx
->lock
, flags
);
633 ctx
->ctxid
= ret
<< 4;
634 spin_unlock_irqrestore(&cctx
->lock
, flags
);
636 kref_init(&ctx
->refcount
);
640 spin_lock(&user
->lock
);
641 list_del(&ctx
->node
);
642 spin_unlock(&user
->lock
);
643 fastrpc_channel_ctx_put(cctx
);
651 static struct sg_table
*
652 fastrpc_map_dma_buf(struct dma_buf_attachment
*attachment
,
653 enum dma_data_direction dir
)
655 struct fastrpc_dma_buf_attachment
*a
= attachment
->priv
;
656 struct sg_table
*table
;
661 ret
= dma_map_sgtable(attachment
->dev
, table
, dir
, 0);
663 table
= ERR_PTR(ret
);
667 static void fastrpc_unmap_dma_buf(struct dma_buf_attachment
*attach
,
668 struct sg_table
*table
,
669 enum dma_data_direction dir
)
671 dma_unmap_sgtable(attach
->dev
, table
, dir
, 0);
674 static void fastrpc_release(struct dma_buf
*dmabuf
)
676 struct fastrpc_buf
*buffer
= dmabuf
->priv
;
678 fastrpc_buf_free(buffer
);
681 static int fastrpc_dma_buf_attach(struct dma_buf
*dmabuf
,
682 struct dma_buf_attachment
*attachment
)
684 struct fastrpc_dma_buf_attachment
*a
;
685 struct fastrpc_buf
*buffer
= dmabuf
->priv
;
688 a
= kzalloc(sizeof(*a
), GFP_KERNEL
);
692 ret
= dma_get_sgtable(buffer
->dev
, &a
->sgt
, buffer
->virt
,
693 FASTRPC_PHYS(buffer
->phys
), buffer
->size
);
695 dev_err(buffer
->dev
, "failed to get scatterlist from DMA API\n");
700 a
->dev
= attachment
->dev
;
701 INIT_LIST_HEAD(&a
->node
);
702 attachment
->priv
= a
;
704 mutex_lock(&buffer
->lock
);
705 list_add(&a
->node
, &buffer
->attachments
);
706 mutex_unlock(&buffer
->lock
);
711 static void fastrpc_dma_buf_detatch(struct dma_buf
*dmabuf
,
712 struct dma_buf_attachment
*attachment
)
714 struct fastrpc_dma_buf_attachment
*a
= attachment
->priv
;
715 struct fastrpc_buf
*buffer
= dmabuf
->priv
;
717 mutex_lock(&buffer
->lock
);
719 mutex_unlock(&buffer
->lock
);
720 sg_free_table(&a
->sgt
);
724 static int fastrpc_vmap(struct dma_buf
*dmabuf
, struct iosys_map
*map
)
726 struct fastrpc_buf
*buf
= dmabuf
->priv
;
728 iosys_map_set_vaddr(map
, buf
->virt
);
733 static int fastrpc_mmap(struct dma_buf
*dmabuf
,
734 struct vm_area_struct
*vma
)
736 struct fastrpc_buf
*buf
= dmabuf
->priv
;
737 size_t size
= vma
->vm_end
- vma
->vm_start
;
739 dma_resv_assert_held(dmabuf
->resv
);
741 return dma_mmap_coherent(buf
->dev
, vma
, buf
->virt
,
742 FASTRPC_PHYS(buf
->phys
), size
);
745 static const struct dma_buf_ops fastrpc_dma_buf_ops
= {
746 .attach
= fastrpc_dma_buf_attach
,
747 .detach
= fastrpc_dma_buf_detatch
,
748 .map_dma_buf
= fastrpc_map_dma_buf
,
749 .unmap_dma_buf
= fastrpc_unmap_dma_buf
,
750 .mmap
= fastrpc_mmap
,
751 .vmap
= fastrpc_vmap
,
752 .release
= fastrpc_release
,
755 static int fastrpc_map_create(struct fastrpc_user
*fl
, int fd
,
756 u64 len
, u32 attr
, struct fastrpc_map
**ppmap
)
758 struct fastrpc_session_ctx
*sess
= fl
->sctx
;
759 struct fastrpc_map
*map
= NULL
;
760 struct sg_table
*table
;
763 if (!fastrpc_map_lookup(fl
, fd
, ppmap
, true))
766 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
770 INIT_LIST_HEAD(&map
->node
);
771 kref_init(&map
->refcount
);
775 map
->buf
= dma_buf_get(fd
);
776 if (IS_ERR(map
->buf
)) {
777 err
= PTR_ERR(map
->buf
);
781 map
->attach
= dma_buf_attach(map
->buf
, sess
->dev
);
782 if (IS_ERR(map
->attach
)) {
783 dev_err(sess
->dev
, "Failed to attach dmabuf\n");
784 err
= PTR_ERR(map
->attach
);
788 table
= dma_buf_map_attachment_unlocked(map
->attach
, DMA_BIDIRECTIONAL
);
790 err
= PTR_ERR(table
);
795 if (attr
& FASTRPC_ATTR_SECUREMAP
) {
796 map
->phys
= sg_phys(map
->table
->sgl
);
798 map
->phys
= sg_dma_address(map
->table
->sgl
);
799 map
->phys
+= ((u64
)fl
->sctx
->sid
<< 32);
802 map
->va
= sg_virt(map
->table
->sgl
);
805 if (attr
& FASTRPC_ATTR_SECUREMAP
) {
807 * If subsystem VMIDs are defined in DTSI, then do
808 * hyp_assign from HLOS to those VM(s)
810 u64 src_perms
= BIT(QCOM_SCM_VMID_HLOS
);
811 struct qcom_scm_vmperm dst_perms
[2] = {0};
813 dst_perms
[0].vmid
= QCOM_SCM_VMID_HLOS
;
814 dst_perms
[0].perm
= QCOM_SCM_PERM_RW
;
815 dst_perms
[1].vmid
= fl
->cctx
->vmperms
[0].vmid
;
816 dst_perms
[1].perm
= QCOM_SCM_PERM_RWX
;
818 err
= qcom_scm_assign_mem(map
->phys
, (u64
)map
->size
, &src_perms
, dst_perms
, 2);
820 dev_err(sess
->dev
, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n",
821 map
->phys
, map
->size
, err
);
825 spin_lock(&fl
->lock
);
826 list_add_tail(&map
->node
, &fl
->maps
);
827 spin_unlock(&fl
->lock
);
833 dma_buf_detach(map
->buf
, map
->attach
);
835 dma_buf_put(map
->buf
);
837 fastrpc_map_put(map
);
843 * Fastrpc payload buffer with metadata looks like:
845 * >>>>>> START of METADATA <<<<<<<<<
846 * +---------------------------------+
848 * | type:(union fastrpc_remote_arg)|
850 * +---------------------------------+
851 * | Invoke Buffer list |
852 * | type:(struct fastrpc_invoke_buf)|
854 * +---------------------------------+
856 * | type:(struct fastrpc_phy_page) |
858 * +---------------------------------+
860 * |(can be specific to SoC/Firmware)|
861 * +---------------------------------+
862 * >>>>>>>> END of METADATA <<<<<<<<<
863 * +---------------------------------+
866 * +---------------------------------+
869 static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx
*ctx
)
873 size
= (sizeof(struct fastrpc_remote_buf
) +
874 sizeof(struct fastrpc_invoke_buf
) +
875 sizeof(struct fastrpc_phy_page
)) * ctx
->nscalars
+
876 sizeof(u64
) * FASTRPC_MAX_FDLIST
+
877 sizeof(u32
) * FASTRPC_MAX_CRCLIST
;
882 static u64
fastrpc_get_payload_size(struct fastrpc_invoke_ctx
*ctx
, int metalen
)
887 size
= ALIGN(metalen
, FASTRPC_ALIGN
);
888 for (oix
= 0; oix
< ctx
->nbufs
; oix
++) {
889 int i
= ctx
->olaps
[oix
].raix
;
891 if (ctx
->args
[i
].fd
== 0 || ctx
->args
[i
].fd
== -1) {
893 if (ctx
->olaps
[oix
].offset
== 0)
894 size
= ALIGN(size
, FASTRPC_ALIGN
);
896 size
+= (ctx
->olaps
[oix
].mend
- ctx
->olaps
[oix
].mstart
);
903 static int fastrpc_create_maps(struct fastrpc_invoke_ctx
*ctx
)
905 struct device
*dev
= ctx
->fl
->sctx
->dev
;
908 for (i
= 0; i
< ctx
->nscalars
; ++i
) {
910 if (ctx
->args
[i
].fd
== 0 || ctx
->args
[i
].fd
== -1 ||
911 ctx
->args
[i
].length
== 0)
914 err
= fastrpc_map_create(ctx
->fl
, ctx
->args
[i
].fd
,
915 ctx
->args
[i
].length
, ctx
->args
[i
].attr
, &ctx
->maps
[i
]);
917 dev_err(dev
, "Error Creating map %d\n", err
);
925 static struct fastrpc_invoke_buf
*fastrpc_invoke_buf_start(union fastrpc_remote_arg
*pra
, int len
)
927 return (struct fastrpc_invoke_buf
*)(&pra
[len
]);
930 static struct fastrpc_phy_page
*fastrpc_phy_page_start(struct fastrpc_invoke_buf
*buf
, int len
)
932 return (struct fastrpc_phy_page
*)(&buf
[len
]);
935 static int fastrpc_get_args(u32 kernel
, struct fastrpc_invoke_ctx
*ctx
)
937 struct device
*dev
= ctx
->fl
->sctx
->dev
;
938 union fastrpc_remote_arg
*rpra
;
939 struct fastrpc_invoke_buf
*list
;
940 struct fastrpc_phy_page
*pages
;
941 int inbufs
, i
, oix
, err
= 0;
942 u64 len
, rlen
, pkt_size
;
943 u64 pg_start
, pg_end
;
947 inbufs
= REMOTE_SCALARS_INBUFS(ctx
->sc
);
948 metalen
= fastrpc_get_meta_size(ctx
);
949 pkt_size
= fastrpc_get_payload_size(ctx
, metalen
);
951 err
= fastrpc_create_maps(ctx
);
955 ctx
->msg_sz
= pkt_size
;
957 if (ctx
->fl
->sctx
->sid
)
958 err
= fastrpc_buf_alloc(ctx
->fl
, dev
, pkt_size
, &ctx
->buf
);
960 err
= fastrpc_remote_heap_alloc(ctx
->fl
, dev
, pkt_size
, &ctx
->buf
);
964 memset(ctx
->buf
->virt
, 0, pkt_size
);
965 rpra
= ctx
->buf
->virt
;
966 list
= fastrpc_invoke_buf_start(rpra
, ctx
->nscalars
);
967 pages
= fastrpc_phy_page_start(list
, ctx
->nscalars
);
968 args
= (uintptr_t)ctx
->buf
->virt
+ metalen
;
969 rlen
= pkt_size
- metalen
;
972 for (oix
= 0; oix
< ctx
->nbufs
; ++oix
) {
975 i
= ctx
->olaps
[oix
].raix
;
976 len
= ctx
->args
[i
].length
;
979 rpra
[i
].buf
.len
= len
;
980 list
[i
].num
= len
? 1 : 0;
987 struct vm_area_struct
*vma
= NULL
;
989 rpra
[i
].buf
.pv
= (u64
) ctx
->args
[i
].ptr
;
990 pages
[i
].addr
= ctx
->maps
[i
]->phys
;
992 mmap_read_lock(current
->mm
);
993 vma
= find_vma(current
->mm
, ctx
->args
[i
].ptr
);
995 pages
[i
].addr
+= ctx
->args
[i
].ptr
-
997 mmap_read_unlock(current
->mm
);
999 pg_start
= (ctx
->args
[i
].ptr
& PAGE_MASK
) >> PAGE_SHIFT
;
1000 pg_end
= ((ctx
->args
[i
].ptr
+ len
- 1) & PAGE_MASK
) >>
1002 pages
[i
].size
= (pg_end
- pg_start
+ 1) * PAGE_SIZE
;
1006 if (ctx
->olaps
[oix
].offset
== 0) {
1007 rlen
-= ALIGN(args
, FASTRPC_ALIGN
) - args
;
1008 args
= ALIGN(args
, FASTRPC_ALIGN
);
1011 mlen
= ctx
->olaps
[oix
].mend
- ctx
->olaps
[oix
].mstart
;
1016 rpra
[i
].buf
.pv
= args
- ctx
->olaps
[oix
].offset
;
1017 pages
[i
].addr
= ctx
->buf
->phys
-
1018 ctx
->olaps
[oix
].offset
+
1020 pages
[i
].addr
= pages
[i
].addr
& PAGE_MASK
;
1022 pg_start
= (args
& PAGE_MASK
) >> PAGE_SHIFT
;
1023 pg_end
= ((args
+ len
- 1) & PAGE_MASK
) >> PAGE_SHIFT
;
1024 pages
[i
].size
= (pg_end
- pg_start
+ 1) * PAGE_SIZE
;
1029 if (i
< inbufs
&& !ctx
->maps
[i
]) {
1030 void *dst
= (void *)(uintptr_t)rpra
[i
].buf
.pv
;
1031 void *src
= (void *)(uintptr_t)ctx
->args
[i
].ptr
;
1034 if (copy_from_user(dst
, (void __user
*)src
,
1040 memcpy(dst
, src
, len
);
1045 for (i
= ctx
->nbufs
; i
< ctx
->nscalars
; ++i
) {
1046 list
[i
].num
= ctx
->args
[i
].length
? 1 : 0;
1049 pages
[i
].addr
= ctx
->maps
[i
]->phys
;
1050 pages
[i
].size
= ctx
->maps
[i
]->size
;
1052 rpra
[i
].dma
.fd
= ctx
->args
[i
].fd
;
1053 rpra
[i
].dma
.len
= ctx
->args
[i
].length
;
1054 rpra
[i
].dma
.offset
= (u64
) ctx
->args
[i
].ptr
;
1059 dev_err(dev
, "Error: get invoke args failed:%d\n", err
);
1064 static int fastrpc_put_args(struct fastrpc_invoke_ctx
*ctx
,
1067 union fastrpc_remote_arg
*rpra
= ctx
->rpra
;
1068 struct fastrpc_user
*fl
= ctx
->fl
;
1069 struct fastrpc_map
*mmap
= NULL
;
1070 struct fastrpc_invoke_buf
*list
;
1071 struct fastrpc_phy_page
*pages
;
1073 int i
, inbufs
, outbufs
, handles
;
1075 inbufs
= REMOTE_SCALARS_INBUFS(ctx
->sc
);
1076 outbufs
= REMOTE_SCALARS_OUTBUFS(ctx
->sc
);
1077 handles
= REMOTE_SCALARS_INHANDLES(ctx
->sc
) + REMOTE_SCALARS_OUTHANDLES(ctx
->sc
);
1078 list
= fastrpc_invoke_buf_start(rpra
, ctx
->nscalars
);
1079 pages
= fastrpc_phy_page_start(list
, ctx
->nscalars
);
1080 fdlist
= (uint64_t *)(pages
+ inbufs
+ outbufs
+ handles
);
1082 for (i
= inbufs
; i
< ctx
->nbufs
; ++i
) {
1083 if (!ctx
->maps
[i
]) {
1084 void *src
= (void *)(uintptr_t)rpra
[i
].buf
.pv
;
1085 void *dst
= (void *)(uintptr_t)ctx
->args
[i
].ptr
;
1086 u64 len
= rpra
[i
].buf
.len
;
1089 if (copy_to_user((void __user
*)dst
, src
, len
))
1092 memcpy(dst
, src
, len
);
1097 /* Clean up fdlist which is updated by DSP */
1098 for (i
= 0; i
< FASTRPC_MAX_FDLIST
; i
++) {
1101 if (!fastrpc_map_lookup(fl
, (int)fdlist
[i
], &mmap
, false))
1102 fastrpc_map_put(mmap
);
1108 static int fastrpc_invoke_send(struct fastrpc_session_ctx
*sctx
,
1109 struct fastrpc_invoke_ctx
*ctx
,
1110 u32 kernel
, uint32_t handle
)
1112 struct fastrpc_channel_ctx
*cctx
;
1113 struct fastrpc_user
*fl
= ctx
->fl
;
1114 struct fastrpc_msg
*msg
= &ctx
->msg
;
1118 msg
->pid
= fl
->tgid
;
1119 msg
->tid
= current
->pid
;
1124 msg
->ctx
= ctx
->ctxid
| fl
->pd
;
1125 msg
->handle
= handle
;
1127 msg
->addr
= ctx
->buf
? ctx
->buf
->phys
: 0;
1128 msg
->size
= roundup(ctx
->msg_sz
, PAGE_SIZE
);
1129 fastrpc_context_get(ctx
);
1131 ret
= rpmsg_send(cctx
->rpdev
->ept
, (void *)msg
, sizeof(*msg
));
1134 fastrpc_context_put(ctx
);
1140 static int fastrpc_internal_invoke(struct fastrpc_user
*fl
, u32 kernel
,
1142 struct fastrpc_invoke_args
*args
)
1144 struct fastrpc_invoke_ctx
*ctx
= NULL
;
1145 struct fastrpc_buf
*buf
, *b
;
1152 if (!fl
->cctx
->rpdev
)
1155 if (handle
== FASTRPC_INIT_HANDLE
&& !kernel
) {
1156 dev_warn_ratelimited(fl
->sctx
->dev
, "user app trying to send a kernel RPC message (%d)\n", handle
);
1160 ctx
= fastrpc_context_alloc(fl
, kernel
, sc
, args
);
1162 return PTR_ERR(ctx
);
1164 err
= fastrpc_get_args(kernel
, ctx
);
1168 /* make sure that all CPU memory writes are seen by DSP */
1170 /* Send invoke buffer to remote dsp */
1171 err
= fastrpc_invoke_send(fl
->sctx
, ctx
, kernel
, handle
);
1176 if (!wait_for_completion_timeout(&ctx
->work
, 10 * HZ
))
1179 err
= wait_for_completion_interruptible(&ctx
->work
);
1185 /* make sure that all memory writes by DSP are seen by CPU */
1187 /* populate all the output buffers with results */
1188 err
= fastrpc_put_args(ctx
, kernel
);
1192 /* Check the response from remote dsp */
1198 if (err
!= -ERESTARTSYS
&& err
!= -ETIMEDOUT
) {
1199 /* We are done with this compute context */
1200 spin_lock(&fl
->lock
);
1201 list_del(&ctx
->node
);
1202 spin_unlock(&fl
->lock
);
1203 fastrpc_context_put(ctx
);
1206 if (err
== -ERESTARTSYS
) {
1207 list_for_each_entry_safe(buf
, b
, &fl
->mmaps
, node
) {
1208 list_del(&buf
->node
);
1209 list_add_tail(&buf
->node
, &fl
->cctx
->invoke_interrupted_mmaps
);
1214 dev_dbg(fl
->sctx
->dev
, "Error: Invoke Failed %d\n", err
);
1219 static bool is_session_rejected(struct fastrpc_user
*fl
, bool unsigned_pd_request
)
1221 /* Check if the device node is non-secure and channel is secure*/
1222 if (!fl
->is_secure_dev
&& fl
->cctx
->secure
) {
1224 * Allow untrusted applications to offload only to Unsigned PD when
1225 * channel is configured as secure and block untrusted apps on channel
1226 * that does not support unsigned PD offload
1228 if (!fl
->cctx
->unsigned_support
|| !unsigned_pd_request
) {
1229 dev_err(&fl
->cctx
->rpdev
->dev
, "Error: Untrusted application trying to offload to signed PD\n");
1237 static int fastrpc_init_create_static_process(struct fastrpc_user
*fl
,
1240 struct fastrpc_init_create_static init
;
1241 struct fastrpc_invoke_args
*args
;
1242 struct fastrpc_phy_page pages
[1];
1245 bool scm_done
= false;
1253 args
= kcalloc(FASTRPC_CREATE_STATIC_PROCESS_NARGS
, sizeof(*args
), GFP_KERNEL
);
1257 if (copy_from_user(&init
, argp
, sizeof(init
))) {
1262 if (init
.namelen
> INIT_FILE_NAMELEN_MAX
) {
1267 name
= memdup_user(u64_to_user_ptr(init
.name
), init
.namelen
);
1269 err
= PTR_ERR(name
);
1273 if (!fl
->cctx
->remote_heap
) {
1274 err
= fastrpc_remote_heap_alloc(fl
, fl
->sctx
->dev
, init
.memlen
,
1275 &fl
->cctx
->remote_heap
);
1279 /* Map if we have any heap VMIDs associated with this ADSP Static Process. */
1280 if (fl
->cctx
->vmcount
) {
1281 u64 src_perms
= BIT(QCOM_SCM_VMID_HLOS
);
1283 err
= qcom_scm_assign_mem(fl
->cctx
->remote_heap
->phys
,
1284 (u64
)fl
->cctx
->remote_heap
->size
,
1286 fl
->cctx
->vmperms
, fl
->cctx
->vmcount
);
1288 dev_err(fl
->sctx
->dev
, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n",
1289 fl
->cctx
->remote_heap
->phys
, fl
->cctx
->remote_heap
->size
, err
);
1296 inbuf
.pgid
= fl
->tgid
;
1297 inbuf
.namelen
= init
.namelen
;
1301 args
[0].ptr
= (u64
)(uintptr_t)&inbuf
;
1302 args
[0].length
= sizeof(inbuf
);
1305 args
[1].ptr
= (u64
)(uintptr_t)name
;
1306 args
[1].length
= inbuf
.namelen
;
1309 pages
[0].addr
= fl
->cctx
->remote_heap
->phys
;
1310 pages
[0].size
= fl
->cctx
->remote_heap
->size
;
1312 args
[2].ptr
= (u64
)(uintptr_t) pages
;
1313 args
[2].length
= sizeof(*pages
);
1316 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_STATIC
, 3, 0);
1318 err
= fastrpc_internal_invoke(fl
, true, FASTRPC_INIT_HANDLE
,
1328 if (fl
->cctx
->vmcount
&& scm_done
) {
1330 struct qcom_scm_vmperm dst_perms
;
1333 for (i
= 0; i
< fl
->cctx
->vmcount
; i
++)
1334 src_perms
|= BIT(fl
->cctx
->vmperms
[i
].vmid
);
1336 dst_perms
.vmid
= QCOM_SCM_VMID_HLOS
;
1337 dst_perms
.perm
= QCOM_SCM_PERM_RWX
;
1338 err
= qcom_scm_assign_mem(fl
->cctx
->remote_heap
->phys
,
1339 (u64
)fl
->cctx
->remote_heap
->size
,
1340 &src_perms
, &dst_perms
, 1);
1342 dev_err(fl
->sctx
->dev
, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n",
1343 fl
->cctx
->remote_heap
->phys
, fl
->cctx
->remote_heap
->size
, err
);
1346 fastrpc_buf_free(fl
->cctx
->remote_heap
);
1355 static int fastrpc_init_create_process(struct fastrpc_user
*fl
,
1358 struct fastrpc_init_create init
;
1359 struct fastrpc_invoke_args
*args
;
1360 struct fastrpc_phy_page pages
[1];
1361 struct fastrpc_map
*map
= NULL
;
1362 struct fastrpc_buf
*imem
= NULL
;
1374 bool unsigned_module
= false;
1376 args
= kcalloc(FASTRPC_CREATE_PROCESS_NARGS
, sizeof(*args
), GFP_KERNEL
);
1380 if (copy_from_user(&init
, argp
, sizeof(init
))) {
1385 if (init
.attrs
& FASTRPC_MODE_UNSIGNED_MODULE
)
1386 unsigned_module
= true;
1388 if (is_session_rejected(fl
, unsigned_module
)) {
1389 err
= -ECONNREFUSED
;
1393 if (init
.filelen
> INIT_FILELEN_MAX
) {
1398 inbuf
.pgid
= fl
->tgid
;
1399 inbuf
.namelen
= strlen(current
->comm
) + 1;
1400 inbuf
.filelen
= init
.filelen
;
1402 inbuf
.attrs
= init
.attrs
;
1403 inbuf
.siglen
= init
.siglen
;
1406 if (init
.filelen
&& init
.filefd
) {
1407 err
= fastrpc_map_create(fl
, init
.filefd
, init
.filelen
, 0, &map
);
1412 memlen
= ALIGN(max(INIT_FILELEN_MAX
, (int)init
.filelen
* 4),
1414 err
= fastrpc_buf_alloc(fl
, fl
->sctx
->dev
, memlen
,
1419 fl
->init_mem
= imem
;
1420 args
[0].ptr
= (u64
)(uintptr_t)&inbuf
;
1421 args
[0].length
= sizeof(inbuf
);
1424 args
[1].ptr
= (u64
)(uintptr_t)current
->comm
;
1425 args
[1].length
= inbuf
.namelen
;
1428 args
[2].ptr
= (u64
) init
.file
;
1429 args
[2].length
= inbuf
.filelen
;
1430 args
[2].fd
= init
.filefd
;
1432 pages
[0].addr
= imem
->phys
;
1433 pages
[0].size
= imem
->size
;
1435 args
[3].ptr
= (u64
)(uintptr_t) pages
;
1436 args
[3].length
= 1 * sizeof(*pages
);
1439 args
[4].ptr
= (u64
)(uintptr_t)&inbuf
.attrs
;
1440 args
[4].length
= sizeof(inbuf
.attrs
);
1443 args
[5].ptr
= (u64
)(uintptr_t) &inbuf
.siglen
;
1444 args
[5].length
= sizeof(inbuf
.siglen
);
1447 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE
, 4, 0);
1449 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR
, 4, 0);
1451 err
= fastrpc_internal_invoke(fl
, true, FASTRPC_INIT_HANDLE
,
1461 fl
->init_mem
= NULL
;
1462 fastrpc_buf_free(imem
);
1464 fastrpc_map_put(map
);
1471 static struct fastrpc_session_ctx
*fastrpc_session_alloc(
1472 struct fastrpc_channel_ctx
*cctx
)
1474 struct fastrpc_session_ctx
*session
= NULL
;
1475 unsigned long flags
;
1478 spin_lock_irqsave(&cctx
->lock
, flags
);
1479 for (i
= 0; i
< cctx
->sesscount
; i
++) {
1480 if (!cctx
->session
[i
].used
&& cctx
->session
[i
].valid
) {
1481 cctx
->session
[i
].used
= true;
1482 session
= &cctx
->session
[i
];
1486 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1491 static void fastrpc_session_free(struct fastrpc_channel_ctx
*cctx
,
1492 struct fastrpc_session_ctx
*session
)
1494 unsigned long flags
;
1496 spin_lock_irqsave(&cctx
->lock
, flags
);
1497 session
->used
= false;
1498 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1501 static int fastrpc_release_current_dsp_process(struct fastrpc_user
*fl
)
1503 struct fastrpc_invoke_args args
[1];
1508 args
[0].ptr
= (u64
)(uintptr_t) &tgid
;
1509 args
[0].length
= sizeof(tgid
);
1511 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE
, 1, 0);
1513 return fastrpc_internal_invoke(fl
, true, FASTRPC_INIT_HANDLE
,
1517 static int fastrpc_device_release(struct inode
*inode
, struct file
*file
)
1519 struct fastrpc_user
*fl
= (struct fastrpc_user
*)file
->private_data
;
1520 struct fastrpc_channel_ctx
*cctx
= fl
->cctx
;
1521 struct fastrpc_invoke_ctx
*ctx
, *n
;
1522 struct fastrpc_map
*map
, *m
;
1523 struct fastrpc_buf
*buf
, *b
;
1524 unsigned long flags
;
1526 fastrpc_release_current_dsp_process(fl
);
1528 spin_lock_irqsave(&cctx
->lock
, flags
);
1529 list_del(&fl
->user
);
1530 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1533 fastrpc_buf_free(fl
->init_mem
);
1535 list_for_each_entry_safe(ctx
, n
, &fl
->pending
, node
) {
1536 list_del(&ctx
->node
);
1537 fastrpc_context_put(ctx
);
1540 list_for_each_entry_safe(map
, m
, &fl
->maps
, node
)
1541 fastrpc_map_put(map
);
1543 list_for_each_entry_safe(buf
, b
, &fl
->mmaps
, node
) {
1544 list_del(&buf
->node
);
1545 fastrpc_buf_free(buf
);
1548 fastrpc_session_free(cctx
, fl
->sctx
);
1549 fastrpc_channel_ctx_put(cctx
);
1551 mutex_destroy(&fl
->mutex
);
1553 file
->private_data
= NULL
;
1558 static int fastrpc_device_open(struct inode
*inode
, struct file
*filp
)
1560 struct fastrpc_channel_ctx
*cctx
;
1561 struct fastrpc_device
*fdevice
;
1562 struct fastrpc_user
*fl
= NULL
;
1563 unsigned long flags
;
1565 fdevice
= miscdev_to_fdevice(filp
->private_data
);
1566 cctx
= fdevice
->cctx
;
1568 fl
= kzalloc(sizeof(*fl
), GFP_KERNEL
);
1572 /* Released in fastrpc_device_release() */
1573 fastrpc_channel_ctx_get(cctx
);
1575 filp
->private_data
= fl
;
1576 spin_lock_init(&fl
->lock
);
1577 mutex_init(&fl
->mutex
);
1578 INIT_LIST_HEAD(&fl
->pending
);
1579 INIT_LIST_HEAD(&fl
->maps
);
1580 INIT_LIST_HEAD(&fl
->mmaps
);
1581 INIT_LIST_HEAD(&fl
->user
);
1582 fl
->tgid
= current
->tgid
;
1584 fl
->is_secure_dev
= fdevice
->secure
;
1586 fl
->sctx
= fastrpc_session_alloc(cctx
);
1588 dev_err(&cctx
->rpdev
->dev
, "No session available\n");
1589 mutex_destroy(&fl
->mutex
);
1595 spin_lock_irqsave(&cctx
->lock
, flags
);
1596 list_add_tail(&fl
->user
, &cctx
->users
);
1597 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1602 static int fastrpc_dmabuf_alloc(struct fastrpc_user
*fl
, char __user
*argp
)
1604 struct fastrpc_alloc_dma_buf bp
;
1605 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
1606 struct fastrpc_buf
*buf
= NULL
;
1609 if (copy_from_user(&bp
, argp
, sizeof(bp
)))
1612 err
= fastrpc_buf_alloc(fl
, fl
->sctx
->dev
, bp
.size
, &buf
);
1615 exp_info
.ops
= &fastrpc_dma_buf_ops
;
1616 exp_info
.size
= bp
.size
;
1617 exp_info
.flags
= O_RDWR
;
1618 exp_info
.priv
= buf
;
1619 buf
->dmabuf
= dma_buf_export(&exp_info
);
1620 if (IS_ERR(buf
->dmabuf
)) {
1621 err
= PTR_ERR(buf
->dmabuf
);
1622 fastrpc_buf_free(buf
);
1626 bp
.fd
= dma_buf_fd(buf
->dmabuf
, O_ACCMODE
);
1628 dma_buf_put(buf
->dmabuf
);
1632 if (copy_to_user(argp
, &bp
, sizeof(bp
))) {
1634 * The usercopy failed, but we can't do much about it, as
1635 * dma_buf_fd() already called fd_install() and made the
1636 * file descriptor accessible for the current process. It
1637 * might already be closed and dmabuf no longer valid when
1638 * we reach this point. Therefore "leak" the fd and rely on
1639 * the process exit path to do any required cleanup.
1647 static int fastrpc_init_attach(struct fastrpc_user
*fl
, int pd
)
1649 struct fastrpc_invoke_args args
[1];
1650 int tgid
= fl
->tgid
;
1653 args
[0].ptr
= (u64
)(uintptr_t) &tgid
;
1654 args
[0].length
= sizeof(tgid
);
1656 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH
, 1, 0);
1659 return fastrpc_internal_invoke(fl
, true, FASTRPC_INIT_HANDLE
,
1663 static int fastrpc_invoke(struct fastrpc_user
*fl
, char __user
*argp
)
1665 struct fastrpc_invoke_args
*args
= NULL
;
1666 struct fastrpc_invoke inv
;
1670 if (copy_from_user(&inv
, argp
, sizeof(inv
)))
1673 /* nscalars is truncated here to max supported value */
1674 nscalars
= REMOTE_SCALARS_LENGTH(inv
.sc
);
1676 args
= kcalloc(nscalars
, sizeof(*args
), GFP_KERNEL
);
1680 if (copy_from_user(args
, (void __user
*)(uintptr_t)inv
.args
,
1681 nscalars
* sizeof(*args
))) {
1687 err
= fastrpc_internal_invoke(fl
, false, inv
.handle
, inv
.sc
, args
);
1693 static int fastrpc_get_info_from_dsp(struct fastrpc_user
*fl
, uint32_t *dsp_attr_buf
,
1694 uint32_t dsp_attr_buf_len
)
1696 struct fastrpc_invoke_args args
[2] = { 0 };
1699 * Capability filled in userspace. This carries the information
1700 * about the remoteproc support which is fetched from the remoteproc
1701 * sysfs node by userspace.
1703 dsp_attr_buf
[0] = 0;
1704 dsp_attr_buf_len
-= 1;
1706 args
[0].ptr
= (u64
)(uintptr_t)&dsp_attr_buf_len
;
1707 args
[0].length
= sizeof(dsp_attr_buf_len
);
1709 args
[1].ptr
= (u64
)(uintptr_t)&dsp_attr_buf
[1];
1710 args
[1].length
= dsp_attr_buf_len
* sizeof(u32
);
1713 return fastrpc_internal_invoke(fl
, true, FASTRPC_DSP_UTILITIES_HANDLE
,
1714 FASTRPC_SCALARS(0, 1, 1), args
);
1717 static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability
*cap
,
1718 struct fastrpc_user
*fl
)
1720 struct fastrpc_channel_ctx
*cctx
= fl
->cctx
;
1721 uint32_t attribute_id
= cap
->attribute_id
;
1722 uint32_t *dsp_attributes
;
1723 unsigned long flags
;
1724 uint32_t domain
= cap
->domain
;
1727 spin_lock_irqsave(&cctx
->lock
, flags
);
1728 /* check if we already have queried dsp for attributes */
1729 if (cctx
->valid_attributes
) {
1730 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1733 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1735 dsp_attributes
= kzalloc(FASTRPC_MAX_DSP_ATTRIBUTES_LEN
, GFP_KERNEL
);
1736 if (!dsp_attributes
)
1739 err
= fastrpc_get_info_from_dsp(fl
, dsp_attributes
, FASTRPC_MAX_DSP_ATTRIBUTES
);
1740 if (err
== DSP_UNSUPPORTED_API
) {
1741 dev_info(&cctx
->rpdev
->dev
,
1742 "Warning: DSP capabilities not supported on domain: %d\n", domain
);
1743 kfree(dsp_attributes
);
1746 dev_err(&cctx
->rpdev
->dev
, "Error: dsp information is incorrect err: %d\n", err
);
1747 kfree(dsp_attributes
);
1751 spin_lock_irqsave(&cctx
->lock
, flags
);
1752 memcpy(cctx
->dsp_attributes
, dsp_attributes
, FASTRPC_MAX_DSP_ATTRIBUTES_LEN
);
1753 cctx
->valid_attributes
= true;
1754 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1755 kfree(dsp_attributes
);
1757 cap
->capability
= cctx
->dsp_attributes
[attribute_id
];
1761 static int fastrpc_get_dsp_info(struct fastrpc_user
*fl
, char __user
*argp
)
1763 struct fastrpc_ioctl_capability cap
= {0};
1766 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1770 if (cap
.domain
>= FASTRPC_DEV_MAX
) {
1771 dev_err(&fl
->cctx
->rpdev
->dev
, "Error: Invalid domain id:%d, err:%d\n",
1776 /* Fastrpc Capablities does not support modem domain */
1777 if (cap
.domain
== MDSP_DOMAIN_ID
) {
1778 dev_err(&fl
->cctx
->rpdev
->dev
, "Error: modem not supported %d\n", err
);
1782 if (cap
.attribute_id
>= FASTRPC_MAX_DSP_ATTRIBUTES
) {
1783 dev_err(&fl
->cctx
->rpdev
->dev
, "Error: invalid attribute: %d, err: %d\n",
1784 cap
.attribute_id
, err
);
1788 err
= fastrpc_get_info_from_kernel(&cap
, fl
);
1792 if (copy_to_user(argp
, &cap
, sizeof(cap
)))
1798 static int fastrpc_req_munmap_impl(struct fastrpc_user
*fl
, struct fastrpc_buf
*buf
)
1800 struct fastrpc_invoke_args args
[1] = { [0] = { 0 } };
1801 struct fastrpc_munmap_req_msg req_msg
;
1802 struct device
*dev
= fl
->sctx
->dev
;
1806 req_msg
.pgid
= fl
->tgid
;
1807 req_msg
.size
= buf
->size
;
1808 req_msg
.vaddr
= buf
->raddr
;
1810 args
[0].ptr
= (u64
) (uintptr_t) &req_msg
;
1811 args
[0].length
= sizeof(req_msg
);
1813 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP
, 1, 0);
1814 err
= fastrpc_internal_invoke(fl
, true, FASTRPC_INIT_HANDLE
, sc
,
1817 dev_dbg(dev
, "unmmap\tpt 0x%09lx OK\n", buf
->raddr
);
1818 spin_lock(&fl
->lock
);
1819 list_del(&buf
->node
);
1820 spin_unlock(&fl
->lock
);
1821 fastrpc_buf_free(buf
);
1823 dev_err(dev
, "unmmap\tpt 0x%09lx ERROR\n", buf
->raddr
);
1829 static int fastrpc_req_munmap(struct fastrpc_user
*fl
, char __user
*argp
)
1831 struct fastrpc_buf
*buf
= NULL
, *iter
, *b
;
1832 struct fastrpc_req_munmap req
;
1833 struct device
*dev
= fl
->sctx
->dev
;
1835 if (copy_from_user(&req
, argp
, sizeof(req
)))
1838 spin_lock(&fl
->lock
);
1839 list_for_each_entry_safe(iter
, b
, &fl
->mmaps
, node
) {
1840 if ((iter
->raddr
== req
.vaddrout
) && (iter
->size
== req
.size
)) {
1845 spin_unlock(&fl
->lock
);
1848 dev_err(dev
, "mmap\t\tpt 0x%09llx [len 0x%08llx] not in list\n",
1849 req
.vaddrout
, req
.size
);
1853 return fastrpc_req_munmap_impl(fl
, buf
);
1856 static int fastrpc_req_mmap(struct fastrpc_user
*fl
, char __user
*argp
)
1858 struct fastrpc_invoke_args args
[3] = { [0 ... 2] = { 0 } };
1859 struct fastrpc_buf
*buf
= NULL
;
1860 struct fastrpc_mmap_req_msg req_msg
;
1861 struct fastrpc_mmap_rsp_msg rsp_msg
;
1862 struct fastrpc_phy_page pages
;
1863 struct fastrpc_req_mmap req
;
1864 struct device
*dev
= fl
->sctx
->dev
;
1868 if (copy_from_user(&req
, argp
, sizeof(req
)))
1871 if (req
.flags
!= ADSP_MMAP_ADD_PAGES
&& req
.flags
!= ADSP_MMAP_REMOTE_HEAP_ADDR
) {
1872 dev_err(dev
, "flag not supported 0x%x\n", req
.flags
);
1878 dev_err(dev
, "adding user allocated pages is not supported\n");
1882 if (req
.flags
== ADSP_MMAP_REMOTE_HEAP_ADDR
)
1883 err
= fastrpc_remote_heap_alloc(fl
, dev
, req
.size
, &buf
);
1885 err
= fastrpc_buf_alloc(fl
, dev
, req
.size
, &buf
);
1888 dev_err(dev
, "failed to allocate buffer\n");
1892 req_msg
.pgid
= fl
->tgid
;
1893 req_msg
.flags
= req
.flags
;
1894 req_msg
.vaddr
= req
.vaddrin
;
1895 req_msg
.num
= sizeof(pages
);
1897 args
[0].ptr
= (u64
) (uintptr_t) &req_msg
;
1898 args
[0].length
= sizeof(req_msg
);
1900 pages
.addr
= buf
->phys
;
1901 pages
.size
= buf
->size
;
1903 args
[1].ptr
= (u64
) (uintptr_t) &pages
;
1904 args
[1].length
= sizeof(pages
);
1906 args
[2].ptr
= (u64
) (uintptr_t) &rsp_msg
;
1907 args
[2].length
= sizeof(rsp_msg
);
1909 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP
, 2, 1);
1910 err
= fastrpc_internal_invoke(fl
, true, FASTRPC_INIT_HANDLE
, sc
,
1913 dev_err(dev
, "mmap error (len 0x%08llx)\n", buf
->size
);
1914 fastrpc_buf_free(buf
);
1918 /* update the buffer to be able to deallocate the memory on the DSP */
1919 buf
->raddr
= (uintptr_t) rsp_msg
.vaddr
;
1921 /* let the client know the address to use */
1922 req
.vaddrout
= rsp_msg
.vaddr
;
1924 /* Add memory to static PD pool, protection thru hypervisor */
1925 if (req
.flags
== ADSP_MMAP_REMOTE_HEAP_ADDR
&& fl
->cctx
->vmcount
) {
1926 u64 src_perms
= BIT(QCOM_SCM_VMID_HLOS
);
1928 err
= qcom_scm_assign_mem(buf
->phys
, (u64
)buf
->size
,
1929 &src_perms
, fl
->cctx
->vmperms
, fl
->cctx
->vmcount
);
1931 dev_err(fl
->sctx
->dev
, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
1932 buf
->phys
, buf
->size
, err
);
1937 spin_lock(&fl
->lock
);
1938 list_add_tail(&buf
->node
, &fl
->mmaps
);
1939 spin_unlock(&fl
->lock
);
1941 if (copy_to_user((void __user
*)argp
, &req
, sizeof(req
))) {
1946 dev_dbg(dev
, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
1947 buf
->raddr
, buf
->size
);
1952 fastrpc_req_munmap_impl(fl
, buf
);
1957 static int fastrpc_req_mem_unmap_impl(struct fastrpc_user
*fl
, struct fastrpc_mem_unmap
*req
)
1959 struct fastrpc_invoke_args args
[1] = { [0] = { 0 } };
1960 struct fastrpc_map
*map
= NULL
, *iter
, *m
;
1961 struct fastrpc_mem_unmap_req_msg req_msg
= { 0 };
1964 struct device
*dev
= fl
->sctx
->dev
;
1966 spin_lock(&fl
->lock
);
1967 list_for_each_entry_safe(iter
, m
, &fl
->maps
, node
) {
1968 if ((req
->fd
< 0 || iter
->fd
== req
->fd
) && (iter
->raddr
== req
->vaddr
)) {
1974 spin_unlock(&fl
->lock
);
1977 dev_err(dev
, "map not in list\n");
1981 req_msg
.pgid
= fl
->tgid
;
1982 req_msg
.len
= map
->len
;
1983 req_msg
.vaddrin
= map
->raddr
;
1984 req_msg
.fd
= map
->fd
;
1986 args
[0].ptr
= (u64
) (uintptr_t) &req_msg
;
1987 args
[0].length
= sizeof(req_msg
);
1989 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP
, 1, 0);
1990 err
= fastrpc_internal_invoke(fl
, true, FASTRPC_INIT_HANDLE
, sc
,
1993 dev_err(dev
, "unmmap\tpt fd = %d, 0x%09llx error\n", map
->fd
, map
->raddr
);
1996 fastrpc_map_put(map
);
2001 static int fastrpc_req_mem_unmap(struct fastrpc_user
*fl
, char __user
*argp
)
2003 struct fastrpc_mem_unmap req
;
2005 if (copy_from_user(&req
, argp
, sizeof(req
)))
2008 return fastrpc_req_mem_unmap_impl(fl
, &req
);
2011 static int fastrpc_req_mem_map(struct fastrpc_user
*fl
, char __user
*argp
)
2013 struct fastrpc_invoke_args args
[4] = { [0 ... 3] = { 0 } };
2014 struct fastrpc_mem_map_req_msg req_msg
= { 0 };
2015 struct fastrpc_mmap_rsp_msg rsp_msg
= { 0 };
2016 struct fastrpc_mem_unmap req_unmap
= { 0 };
2017 struct fastrpc_phy_page pages
= { 0 };
2018 struct fastrpc_mem_map req
;
2019 struct device
*dev
= fl
->sctx
->dev
;
2020 struct fastrpc_map
*map
= NULL
;
2024 if (copy_from_user(&req
, argp
, sizeof(req
)))
2027 /* create SMMU mapping */
2028 err
= fastrpc_map_create(fl
, req
.fd
, req
.length
, 0, &map
);
2030 dev_err(dev
, "failed to map buffer, fd = %d\n", req
.fd
);
2034 req_msg
.pgid
= fl
->tgid
;
2035 req_msg
.fd
= req
.fd
;
2036 req_msg
.offset
= req
.offset
;
2037 req_msg
.vaddrin
= req
.vaddrin
;
2038 map
->va
= (void *) (uintptr_t) req
.vaddrin
;
2039 req_msg
.flags
= req
.flags
;
2040 req_msg
.num
= sizeof(pages
);
2041 req_msg
.data_len
= 0;
2043 args
[0].ptr
= (u64
) (uintptr_t) &req_msg
;
2044 args
[0].length
= sizeof(req_msg
);
2046 pages
.addr
= map
->phys
;
2047 pages
.size
= map
->size
;
2049 args
[1].ptr
= (u64
) (uintptr_t) &pages
;
2050 args
[1].length
= sizeof(pages
);
2052 args
[2].ptr
= (u64
) (uintptr_t) &pages
;
2055 args
[3].ptr
= (u64
) (uintptr_t) &rsp_msg
;
2056 args
[3].length
= sizeof(rsp_msg
);
2058 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP
, 3, 1);
2059 err
= fastrpc_internal_invoke(fl
, true, FASTRPC_INIT_HANDLE
, sc
, &args
[0]);
2061 dev_err(dev
, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
2062 req
.fd
, req
.vaddrin
, map
->size
);
2066 /* update the buffer to be able to deallocate the memory on the DSP */
2067 map
->raddr
= rsp_msg
.vaddr
;
2069 /* let the client know the address to use */
2070 req
.vaddrout
= rsp_msg
.vaddr
;
2072 if (copy_to_user((void __user
*)argp
, &req
, sizeof(req
))) {
2073 /* unmap the memory and release the buffer */
2074 req_unmap
.vaddr
= (uintptr_t) rsp_msg
.vaddr
;
2075 req_unmap
.length
= map
->size
;
2076 fastrpc_req_mem_unmap_impl(fl
, &req_unmap
);
2083 fastrpc_map_put(map
);
2088 static long fastrpc_device_ioctl(struct file
*file
, unsigned int cmd
,
2091 struct fastrpc_user
*fl
= (struct fastrpc_user
*)file
->private_data
;
2092 char __user
*argp
= (char __user
*)arg
;
2096 case FASTRPC_IOCTL_INVOKE
:
2097 err
= fastrpc_invoke(fl
, argp
);
2099 case FASTRPC_IOCTL_INIT_ATTACH
:
2100 err
= fastrpc_init_attach(fl
, ROOT_PD
);
2102 case FASTRPC_IOCTL_INIT_ATTACH_SNS
:
2103 err
= fastrpc_init_attach(fl
, SENSORS_PD
);
2105 case FASTRPC_IOCTL_INIT_CREATE_STATIC
:
2106 err
= fastrpc_init_create_static_process(fl
, argp
);
2108 case FASTRPC_IOCTL_INIT_CREATE
:
2109 err
= fastrpc_init_create_process(fl
, argp
);
2111 case FASTRPC_IOCTL_ALLOC_DMA_BUFF
:
2112 err
= fastrpc_dmabuf_alloc(fl
, argp
);
2114 case FASTRPC_IOCTL_MMAP
:
2115 err
= fastrpc_req_mmap(fl
, argp
);
2117 case FASTRPC_IOCTL_MUNMAP
:
2118 err
= fastrpc_req_munmap(fl
, argp
);
2120 case FASTRPC_IOCTL_MEM_MAP
:
2121 err
= fastrpc_req_mem_map(fl
, argp
);
2123 case FASTRPC_IOCTL_MEM_UNMAP
:
2124 err
= fastrpc_req_mem_unmap(fl
, argp
);
2126 case FASTRPC_IOCTL_GET_DSP_INFO
:
2127 err
= fastrpc_get_dsp_info(fl
, argp
);
2137 static const struct file_operations fastrpc_fops
= {
2138 .open
= fastrpc_device_open
,
2139 .release
= fastrpc_device_release
,
2140 .unlocked_ioctl
= fastrpc_device_ioctl
,
2141 .compat_ioctl
= fastrpc_device_ioctl
,
2144 static int fastrpc_cb_probe(struct platform_device
*pdev
)
2146 struct fastrpc_channel_ctx
*cctx
;
2147 struct fastrpc_session_ctx
*sess
;
2148 struct device
*dev
= &pdev
->dev
;
2149 int i
, sessions
= 0;
2150 unsigned long flags
;
2153 cctx
= dev_get_drvdata(dev
->parent
);
2157 of_property_read_u32(dev
->of_node
, "qcom,nsessions", &sessions
);
2159 spin_lock_irqsave(&cctx
->lock
, flags
);
2160 if (cctx
->sesscount
>= FASTRPC_MAX_SESSIONS
) {
2161 dev_err(&pdev
->dev
, "too many sessions\n");
2162 spin_unlock_irqrestore(&cctx
->lock
, flags
);
2165 sess
= &cctx
->session
[cctx
->sesscount
++];
2169 dev_set_drvdata(dev
, sess
);
2171 if (of_property_read_u32(dev
->of_node
, "reg", &sess
->sid
))
2172 dev_info(dev
, "FastRPC Session ID not specified in DT\n");
2175 struct fastrpc_session_ctx
*dup_sess
;
2177 for (i
= 1; i
< sessions
; i
++) {
2178 if (cctx
->sesscount
>= FASTRPC_MAX_SESSIONS
)
2180 dup_sess
= &cctx
->session
[cctx
->sesscount
++];
2181 memcpy(dup_sess
, sess
, sizeof(*dup_sess
));
2184 spin_unlock_irqrestore(&cctx
->lock
, flags
);
2185 rc
= dma_set_mask(dev
, DMA_BIT_MASK(32));
2187 dev_err(dev
, "32-bit DMA enable failed\n");
2194 static void fastrpc_cb_remove(struct platform_device
*pdev
)
2196 struct fastrpc_channel_ctx
*cctx
= dev_get_drvdata(pdev
->dev
.parent
);
2197 struct fastrpc_session_ctx
*sess
= dev_get_drvdata(&pdev
->dev
);
2198 unsigned long flags
;
2201 spin_lock_irqsave(&cctx
->lock
, flags
);
2202 for (i
= 0; i
< FASTRPC_MAX_SESSIONS
; i
++) {
2203 if (cctx
->session
[i
].sid
== sess
->sid
) {
2204 cctx
->session
[i
].valid
= false;
2208 spin_unlock_irqrestore(&cctx
->lock
, flags
);
2211 static const struct of_device_id fastrpc_match_table
[] = {
2212 { .compatible
= "qcom,fastrpc-compute-cb", },
2216 static struct platform_driver fastrpc_cb_driver
= {
2217 .probe
= fastrpc_cb_probe
,
2218 .remove_new
= fastrpc_cb_remove
,
2220 .name
= "qcom,fastrpc-cb",
2221 .of_match_table
= fastrpc_match_table
,
2222 .suppress_bind_attrs
= true,
2226 static int fastrpc_device_register(struct device
*dev
, struct fastrpc_channel_ctx
*cctx
,
2227 bool is_secured
, const char *domain
)
2229 struct fastrpc_device
*fdev
;
2232 fdev
= devm_kzalloc(dev
, sizeof(*fdev
), GFP_KERNEL
);
2236 fdev
->secure
= is_secured
;
2238 fdev
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
2239 fdev
->miscdev
.fops
= &fastrpc_fops
;
2240 fdev
->miscdev
.name
= devm_kasprintf(dev
, GFP_KERNEL
, "fastrpc-%s%s",
2241 domain
, is_secured
? "-secure" : "");
2242 if (!fdev
->miscdev
.name
)
2245 err
= misc_register(&fdev
->miscdev
);
2248 cctx
->secure_fdevice
= fdev
;
2250 cctx
->fdevice
= fdev
;
2256 static int fastrpc_rpmsg_probe(struct rpmsg_device
*rpdev
)
2258 struct device
*rdev
= &rpdev
->dev
;
2259 struct fastrpc_channel_ctx
*data
;
2260 int i
, err
, domain_id
= -1, vmcount
;
2263 struct device_node
*rmem_node
;
2264 struct reserved_mem
*rmem
;
2265 unsigned int vmids
[FASTRPC_MAX_VMIDS
];
2267 err
= of_property_read_string(rdev
->of_node
, "label", &domain
);
2269 dev_info(rdev
, "FastRPC Domain not specified in DT\n");
2273 for (i
= 0; i
< FASTRPC_DEV_MAX
; i
++) {
2274 if (!strcmp(domains
[i
], domain
)) {
2280 if (domain_id
< 0) {
2281 dev_info(rdev
, "FastRPC Invalid Domain ID %d\n", domain_id
);
2285 if (of_reserved_mem_device_init_by_idx(rdev
, rdev
->of_node
, 0))
2286 dev_info(rdev
, "no reserved DMA memory for FASTRPC\n");
2288 vmcount
= of_property_read_variable_u32_array(rdev
->of_node
,
2289 "qcom,vmids", &vmids
[0], 0, FASTRPC_MAX_VMIDS
);
2292 else if (!qcom_scm_is_available())
2293 return -EPROBE_DEFER
;
2295 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
2300 data
->vmcount
= vmcount
;
2301 for (i
= 0; i
< data
->vmcount
; i
++) {
2302 data
->vmperms
[i
].vmid
= vmids
[i
];
2303 data
->vmperms
[i
].perm
= QCOM_SCM_PERM_RWX
;
2307 rmem_node
= of_parse_phandle(rdev
->of_node
, "memory-region", 0);
2308 if (domain_id
== SDSP_DOMAIN_ID
&& rmem_node
) {
2311 rmem
= of_reserved_mem_lookup(rmem_node
);
2317 src_perms
= BIT(QCOM_SCM_VMID_HLOS
);
2319 qcom_scm_assign_mem(rmem
->base
, rmem
->size
, &src_perms
,
2320 data
->vmperms
, data
->vmcount
);
2324 secure_dsp
= !(of_property_read_bool(rdev
->of_node
, "qcom,non-secure-domain"));
2325 data
->secure
= secure_dsp
;
2327 switch (domain_id
) {
2328 case ADSP_DOMAIN_ID
:
2329 case MDSP_DOMAIN_ID
:
2330 case SDSP_DOMAIN_ID
:
2331 /* Unsigned PD offloading is only supported on CDSP and CDSP1 */
2332 data
->unsigned_support
= false;
2333 err
= fastrpc_device_register(rdev
, data
, secure_dsp
, domains
[domain_id
]);
2337 case CDSP_DOMAIN_ID
:
2338 case CDSP1_DOMAIN_ID
:
2339 data
->unsigned_support
= true;
2340 /* Create both device nodes so that we can allow both Signed and Unsigned PD */
2341 err
= fastrpc_device_register(rdev
, data
, true, domains
[domain_id
]);
2345 err
= fastrpc_device_register(rdev
, data
, false, domains
[domain_id
]);
2354 kref_init(&data
->refcount
);
2356 dev_set_drvdata(&rpdev
->dev
, data
);
2357 rdev
->dma_mask
= &data
->dma_mask
;
2358 dma_set_mask_and_coherent(rdev
, DMA_BIT_MASK(32));
2359 INIT_LIST_HEAD(&data
->users
);
2360 INIT_LIST_HEAD(&data
->invoke_interrupted_mmaps
);
2361 spin_lock_init(&data
->lock
);
2362 idr_init(&data
->ctx_idr
);
2363 data
->domain_id
= domain_id
;
2364 data
->rpdev
= rpdev
;
2366 err
= of_platform_populate(rdev
->of_node
, NULL
, NULL
, rdev
);
2368 goto populate_error
;
2374 misc_deregister(&data
->fdevice
->miscdev
);
2375 if (data
->secure_fdevice
)
2376 misc_deregister(&data
->secure_fdevice
->miscdev
);
2383 static void fastrpc_notify_users(struct fastrpc_user
*user
)
2385 struct fastrpc_invoke_ctx
*ctx
;
2387 spin_lock(&user
->lock
);
2388 list_for_each_entry(ctx
, &user
->pending
, node
) {
2389 ctx
->retval
= -EPIPE
;
2390 complete(&ctx
->work
);
2392 spin_unlock(&user
->lock
);
2395 static void fastrpc_rpmsg_remove(struct rpmsg_device
*rpdev
)
2397 struct fastrpc_channel_ctx
*cctx
= dev_get_drvdata(&rpdev
->dev
);
2398 struct fastrpc_buf
*buf
, *b
;
2399 struct fastrpc_user
*user
;
2400 unsigned long flags
;
2402 /* No invocations past this point */
2403 spin_lock_irqsave(&cctx
->lock
, flags
);
2405 list_for_each_entry(user
, &cctx
->users
, user
)
2406 fastrpc_notify_users(user
);
2407 spin_unlock_irqrestore(&cctx
->lock
, flags
);
2410 misc_deregister(&cctx
->fdevice
->miscdev
);
2412 if (cctx
->secure_fdevice
)
2413 misc_deregister(&cctx
->secure_fdevice
->miscdev
);
2415 list_for_each_entry_safe(buf
, b
, &cctx
->invoke_interrupted_mmaps
, node
)
2416 list_del(&buf
->node
);
2418 if (cctx
->remote_heap
)
2419 fastrpc_buf_free(cctx
->remote_heap
);
2421 of_platform_depopulate(&rpdev
->dev
);
2423 fastrpc_channel_ctx_put(cctx
);
2426 static int fastrpc_rpmsg_callback(struct rpmsg_device
*rpdev
, void *data
,
2427 int len
, void *priv
, u32 addr
)
2429 struct fastrpc_channel_ctx
*cctx
= dev_get_drvdata(&rpdev
->dev
);
2430 struct fastrpc_invoke_rsp
*rsp
= data
;
2431 struct fastrpc_invoke_ctx
*ctx
;
2432 unsigned long flags
;
2433 unsigned long ctxid
;
2435 if (len
< sizeof(*rsp
))
2438 ctxid
= ((rsp
->ctx
& FASTRPC_CTXID_MASK
) >> 4);
2440 spin_lock_irqsave(&cctx
->lock
, flags
);
2441 ctx
= idr_find(&cctx
->ctx_idr
, ctxid
);
2442 spin_unlock_irqrestore(&cctx
->lock
, flags
);
2445 dev_err(&rpdev
->dev
, "No context ID matches response\n");
2449 ctx
->retval
= rsp
->retval
;
2450 complete(&ctx
->work
);
2453 * The DMA buffer associated with the context cannot be freed in
2454 * interrupt context so schedule it through a worker thread to
2455 * avoid a kernel BUG.
2457 schedule_work(&ctx
->put_work
);
2462 static const struct of_device_id fastrpc_rpmsg_of_match
[] = {
2463 { .compatible
= "qcom,fastrpc" },
2466 MODULE_DEVICE_TABLE(of
, fastrpc_rpmsg_of_match
);
2468 static struct rpmsg_driver fastrpc_driver
= {
2469 .probe
= fastrpc_rpmsg_probe
,
2470 .remove
= fastrpc_rpmsg_remove
,
2471 .callback
= fastrpc_rpmsg_callback
,
2473 .name
= "qcom,fastrpc",
2474 .of_match_table
= fastrpc_rpmsg_of_match
,
2478 static int fastrpc_init(void)
2482 ret
= platform_driver_register(&fastrpc_cb_driver
);
2484 pr_err("fastrpc: failed to register cb driver\n");
2488 ret
= register_rpmsg_driver(&fastrpc_driver
);
2490 pr_err("fastrpc: failed to register rpmsg driver\n");
2491 platform_driver_unregister(&fastrpc_cb_driver
);
2497 module_init(fastrpc_init
);
2499 static void fastrpc_exit(void)
2501 platform_driver_unregister(&fastrpc_cb_driver
);
2502 unregister_rpmsg_driver(&fastrpc_driver
);
2504 module_exit(fastrpc_exit
);
2506 MODULE_DESCRIPTION("Qualcomm FastRPC");
2507 MODULE_LICENSE("GPL v2");
2508 MODULE_IMPORT_NS(DMA_BUF
);