2 * Copyright (C) 2008-2009 QUALCOMM Incorporated.
5 //FIXME: most allocations need not be GFP_ATOMIC
6 /* FIXME: management of mutexes */
7 /* FIXME: msm_pmem_region_lookup return values */
8 /* FIXME: way too many copy to/from user */
9 /* FIXME: does region->active mean free */
10 /* FIXME: check limits on command lenghts passed from userspace */
11 /* FIXME: __msm_release: which queues should we flush when opencnt != 0 */
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <mach/board.h>
19 #include <linux/list.h>
20 #include <linux/uaccess.h>
21 #include <linux/android_pmem.h>
22 #include <linux/poll.h>
23 #include <media/msm_camera.h>
24 #include <mach/camera.h>
26 #define MSM_MAX_CAMERA_SENSORS 5
28 #define ERR_USER_COPY(to) pr_err("%s(%d): copy %s user\n", \
29 __func__, __LINE__, ((to) ? "to" : "from"))
30 #define ERR_COPY_FROM_USER() ERR_USER_COPY(0)
31 #define ERR_COPY_TO_USER() ERR_USER_COPY(1)
33 static struct class *msm_class
;
34 static dev_t msm_devno
;
35 static LIST_HEAD(msm_sensors
);
37 #define __CONTAINS(r, v, l, field) ({ \
40 typeof(v) __e = __v + l; \
41 int res = __v >= __r->field && \
42 __e <= __r->field + __r->len; \
46 #define CONTAINS(r1, r2, field) ({ \
47 typeof(r2) __r2 = r2; \
48 __CONTAINS(r1, __r2->field, __r2->len, field); \
51 #define IN_RANGE(r, v, field) ({ \
54 int res = ((__vv >= __r->field) && \
55 (__vv < (__r->field + __r->len))); \
59 #define OVERLAPS(r1, r2, field) ({ \
60 typeof(r1) __r1 = r1; \
61 typeof(r2) __r2 = r2; \
62 typeof(__r2->field) __v = __r2->field; \
63 typeof(__v) __e = __v + __r2->len - 1; \
64 int res = (IN_RANGE(__r1, __v, field) || \
65 IN_RANGE(__r1, __e, field)); \
69 #define MSM_DRAIN_QUEUE_NOSYNC(sync, name) do { \
70 struct msm_queue_cmd *qcmd = NULL; \
71 CDBG("%s: draining queue "#name"\n", __func__); \
72 while (!list_empty(&(sync)->name)) { \
73 qcmd = list_first_entry(&(sync)->name, \
74 struct msm_queue_cmd, list); \
75 list_del_init(&qcmd->list); \
80 #define MSM_DRAIN_QUEUE(sync, name) do { \
81 unsigned long flags; \
82 spin_lock_irqsave(&(sync)->name##_lock, flags); \
83 MSM_DRAIN_QUEUE_NOSYNC(sync, name); \
84 spin_unlock_irqrestore(&(sync)->name##_lock, flags); \
87 static int check_overlap(struct hlist_head
*ptype
,
91 struct msm_pmem_region
*region
;
92 struct msm_pmem_region t
= { .paddr
= paddr
, .len
= len
};
93 struct hlist_node
*node
;
95 hlist_for_each_entry(region
, node
, ptype
, list
) {
96 if (CONTAINS(region
, &t
, paddr
) ||
97 CONTAINS(&t
, region
, paddr
) ||
98 OVERLAPS(region
, &t
, paddr
)) {
100 " region (PHYS %p len %ld)"
101 " clashes with registered region"
102 " (paddr %p len %ld)\n",
103 (void *)t
.paddr
, t
.len
,
104 (void *)region
->paddr
, region
->len
);
112 static int msm_pmem_table_add(struct hlist_head
*ptype
,
113 struct msm_pmem_info
*info
)
117 unsigned long vstart
;
120 struct msm_pmem_region
*region
;
122 rc
= get_pmem_file(info
->fd
, &paddr
, &vstart
, &len
, &file
);
124 pr_err("msm_pmem_table_add: get_pmem_file fd %d error %d\n",
129 if (check_overlap(ptype
, paddr
, len
) < 0)
132 CDBG("%s: type = %d, paddr = 0x%lx, vaddr = 0x%lx\n",
134 info
->type
, paddr
, (unsigned long)info
->vaddr
);
136 region
= kmalloc(sizeof(*region
), GFP_KERNEL
);
140 INIT_HLIST_NODE(®ion
->list
);
142 region
->type
= info
->type
;
143 region
->vaddr
= info
->vaddr
;
144 region
->paddr
= paddr
;
147 region
->y_off
= info
->y_off
;
148 region
->cbcr_off
= info
->cbcr_off
;
149 region
->fd
= info
->fd
;
150 region
->active
= info
->active
;
152 hlist_add_head(&(region
->list
), ptype
);
157 /* return of 0 means failure */
158 static uint8_t msm_pmem_region_lookup(struct hlist_head
*ptype
,
159 int pmem_type
, struct msm_pmem_region
*reg
, uint8_t maxcount
)
161 struct msm_pmem_region
*region
;
162 struct msm_pmem_region
*regptr
;
163 struct hlist_node
*node
, *n
;
169 hlist_for_each_entry_safe(region
, node
, n
, ptype
, list
) {
170 if (region
->type
== pmem_type
&& region
->active
) {
182 static unsigned long msm_pmem_frame_ptov_lookup(struct msm_sync
*sync
,
183 unsigned long pyaddr
,
184 unsigned long pcbcraddr
,
185 uint32_t *yoff
, uint32_t *cbcroff
, int *fd
)
187 struct msm_pmem_region
*region
;
188 struct hlist_node
*node
, *n
;
190 hlist_for_each_entry_safe(region
, node
, n
, &sync
->frame
, list
) {
191 if (pyaddr
== (region
->paddr
+ region
->y_off
) &&
192 pcbcraddr
== (region
->paddr
+
195 /* offset since we could pass vaddr inside
196 * a registerd pmem buffer
198 *yoff
= region
->y_off
;
199 *cbcroff
= region
->cbcr_off
;
202 return (unsigned long)(region
->vaddr
);
209 static unsigned long msm_pmem_stats_ptov_lookup(struct msm_sync
*sync
,
210 unsigned long addr
, int *fd
)
212 struct msm_pmem_region
*region
;
213 struct hlist_node
*node
, *n
;
215 hlist_for_each_entry_safe(region
, node
, n
, &sync
->stats
, list
) {
216 if (addr
== region
->paddr
&& region
->active
) {
217 /* offset since we could pass vaddr inside a
218 * registered pmem buffer */
221 return (unsigned long)(region
->vaddr
);
228 static unsigned long msm_pmem_frame_vtop_lookup(struct msm_sync
*sync
,
229 unsigned long buffer
,
230 uint32_t yoff
, uint32_t cbcroff
, int fd
)
232 struct msm_pmem_region
*region
;
233 struct hlist_node
*node
, *n
;
235 hlist_for_each_entry_safe(region
,
236 node
, n
, &sync
->frame
, list
) {
237 if (((unsigned long)(region
->vaddr
) == buffer
) &&
238 (region
->y_off
== yoff
) &&
239 (region
->cbcr_off
== cbcroff
) &&
240 (region
->fd
== fd
) &&
241 (region
->active
== 0)) {
244 return region
->paddr
;
251 static unsigned long msm_pmem_stats_vtop_lookup(
252 struct msm_sync
*sync
,
253 unsigned long buffer
,
256 struct msm_pmem_region
*region
;
257 struct hlist_node
*node
, *n
;
259 hlist_for_each_entry_safe(region
, node
, n
, &sync
->stats
, list
) {
260 if (((unsigned long)(region
->vaddr
) == buffer
) &&
261 (region
->fd
== fd
) && region
->active
== 0) {
263 return region
->paddr
;
270 static int __msm_pmem_table_del(struct msm_sync
*sync
,
271 struct msm_pmem_info
*pinfo
)
274 struct msm_pmem_region
*region
;
275 struct hlist_node
*node
, *n
;
277 switch (pinfo
->type
) {
278 case MSM_PMEM_OUTPUT1
:
279 case MSM_PMEM_OUTPUT2
:
280 case MSM_PMEM_THUMBAIL
:
281 case MSM_PMEM_MAINIMG
:
282 case MSM_PMEM_RAW_MAINIMG
:
283 hlist_for_each_entry_safe(region
, node
, n
,
284 &sync
->frame
, list
) {
286 if (pinfo
->type
== region
->type
&&
287 pinfo
->vaddr
== region
->vaddr
&&
288 pinfo
->fd
== region
->fd
) {
290 put_pmem_file(region
->file
);
296 case MSM_PMEM_AEC_AWB
:
298 hlist_for_each_entry_safe(region
, node
, n
,
299 &sync
->stats
, list
) {
301 if (pinfo
->type
== region
->type
&&
302 pinfo
->vaddr
== region
->vaddr
&&
303 pinfo
->fd
== region
->fd
) {
305 put_pmem_file(region
->file
);
319 static int msm_pmem_table_del(struct msm_sync
*sync
, void __user
*arg
)
321 struct msm_pmem_info info
;
323 if (copy_from_user(&info
, arg
, sizeof(info
))) {
324 ERR_COPY_FROM_USER();
328 return __msm_pmem_table_del(sync
, &info
);
331 static int __msm_get_frame(struct msm_sync
*sync
,
332 struct msm_frame
*frame
)
337 struct msm_queue_cmd
*qcmd
= NULL
;
338 struct msm_vfe_phy_info
*pphy
;
340 spin_lock_irqsave(&sync
->prev_frame_q_lock
, flags
);
341 if (!list_empty(&sync
->prev_frame_q
)) {
342 qcmd
= list_first_entry(&sync
->prev_frame_q
,
343 struct msm_queue_cmd
, list
);
344 list_del_init(&qcmd
->list
);
346 spin_unlock_irqrestore(&sync
->prev_frame_q_lock
, flags
);
349 pr_err("%s: no preview frame.\n", __func__
);
353 pphy
= (struct msm_vfe_phy_info
*)(qcmd
->command
);
356 msm_pmem_frame_ptov_lookup(sync
,
358 pphy
->cbcr_phy
, &(frame
->y_off
),
359 &(frame
->cbcr_off
), &(frame
->fd
));
360 if (!frame
->buffer
) {
361 pr_err("%s: cannot get frame, invalid lookup address "
362 "y=%x cbcr=%x offset=%d\n",
370 CDBG("__msm_get_frame: y=0x%x, cbcr=0x%x, qcmd=0x%x, virt_addr=0x%x\n",
371 pphy
->y_phy
, pphy
->cbcr_phy
, (int) qcmd
, (int) frame
->buffer
);
377 static int msm_get_frame(struct msm_sync
*sync
, void __user
*arg
)
380 struct msm_frame frame
;
382 if (copy_from_user(&frame
,
384 sizeof(struct msm_frame
))) {
385 ERR_COPY_FROM_USER();
389 rc
= __msm_get_frame(sync
, &frame
);
394 if (frame
.croplen
> sync
->croplen
) {
395 pr_err("msm_get_frame: invalid frame croplen %d\n",
400 if (copy_to_user((void *)frame
.cropinfo
,
408 if (copy_to_user((void *)arg
,
409 &frame
, sizeof(struct msm_frame
))) {
414 CDBG("Got frame!!!\n");
419 static int msm_enable_vfe(struct msm_sync
*sync
, void __user
*arg
)
422 struct camera_enable_cmd cfg
;
424 if (copy_from_user(&cfg
,
426 sizeof(struct camera_enable_cmd
))) {
427 ERR_COPY_FROM_USER();
431 if (sync
->vfefn
.vfe_enable
)
432 rc
= sync
->vfefn
.vfe_enable(&cfg
);
434 CDBG("msm_enable_vfe: returned rc = %d\n", rc
);
438 static int msm_disable_vfe(struct msm_sync
*sync
, void __user
*arg
)
441 struct camera_enable_cmd cfg
;
443 if (copy_from_user(&cfg
,
445 sizeof(struct camera_enable_cmd
))) {
446 ERR_COPY_FROM_USER();
450 if (sync
->vfefn
.vfe_disable
)
451 rc
= sync
->vfefn
.vfe_disable(&cfg
, NULL
);
453 CDBG("msm_disable_vfe: returned rc = %d\n", rc
);
457 static struct msm_queue_cmd
* __msm_control(struct msm_sync
*sync
,
458 struct msm_control_device_queue
*queue
,
459 struct msm_queue_cmd
*qcmd
,
465 spin_lock_irqsave(&sync
->msg_event_q_lock
, flags
);
466 list_add_tail(&qcmd
->list
, &sync
->msg_event_q
);
467 /* wake up config thread */
468 wake_up(&sync
->msg_event_wait
);
469 spin_unlock_irqrestore(&sync
->msg_event_q_lock
, flags
);
474 /* wait for config status */
475 rc
= wait_event_interruptible_timeout(
476 queue
->ctrl_status_wait
,
477 !list_empty_careful(&queue
->ctrl_status_q
),
479 if (list_empty_careful(&queue
->ctrl_status_q
)) {
483 pr_err("msm_control: wait_event error %d\n", rc
);
485 /* This is a bit scary. If we time out too early, we
486 * will free qcmd at the end of this function, and the
487 * dsp may do the same when it does respond, so we
488 * remove the message from the source queue.
490 pr_err("%s: error waiting for ctrl_status_q: %d\n",
492 spin_lock_irqsave(&sync
->msg_event_q_lock
, flags
);
493 list_del_init(&qcmd
->list
);
494 spin_unlock_irqrestore(&sync
->msg_event_q_lock
, flags
);
500 /* control command status is ready */
501 spin_lock_irqsave(&queue
->ctrl_status_q_lock
, flags
);
502 BUG_ON(list_empty(&queue
->ctrl_status_q
));
503 qcmd
= list_first_entry(&queue
->ctrl_status_q
,
504 struct msm_queue_cmd
, list
);
505 list_del_init(&qcmd
->list
);
506 spin_unlock_irqrestore(&queue
->ctrl_status_q_lock
, flags
);
511 static int msm_control(struct msm_control_device
*ctrl_pmsm
,
517 struct msm_sync
*sync
= ctrl_pmsm
->pmsm
->sync
;
518 struct msm_ctrl_cmd udata
, *ctrlcmd
;
519 struct msm_queue_cmd
*qcmd
= NULL
, *qcmd_temp
;
521 if (copy_from_user(&udata
, arg
, sizeof(struct msm_ctrl_cmd
))) {
522 ERR_COPY_FROM_USER();
527 qcmd
= kmalloc(sizeof(struct msm_queue_cmd
) +
528 sizeof(struct msm_ctrl_cmd
) + udata
.length
,
531 pr_err("msm_control: cannot allocate buffer\n");
536 qcmd
->type
= MSM_CAM_Q_CTRL
;
537 qcmd
->command
= ctrlcmd
= (struct msm_ctrl_cmd
*)(qcmd
+ 1);
539 ctrlcmd
->value
= ctrlcmd
+ 1;
542 if (copy_from_user(ctrlcmd
->value
,
543 udata
.value
, udata
.length
)) {
544 ERR_COPY_FROM_USER();
551 /* qcmd will be set to NULL */
552 qcmd
= __msm_control(sync
, NULL
, qcmd
, 0);
556 qcmd_temp
= __msm_control(sync
,
558 qcmd
, MAX_SCHEDULE_TIMEOUT
);
560 if (IS_ERR(qcmd_temp
)) {
561 rc
= PTR_ERR(qcmd_temp
);
567 void __user
*to
= udata
.value
;
568 udata
= *(struct msm_ctrl_cmd
*)qcmd
->command
;
569 if (udata
.length
> 0) {
580 if (copy_to_user((void *)arg
, &udata
,
581 sizeof(struct msm_ctrl_cmd
))) {
589 /* Note: if we get here as a result of an error, we will free the
590 * qcmd that we kmalloc() in this function. When we come here as
591 * a result of a successful completion, we are freeing the qcmd that
592 * we dequeued from queue->ctrl_status_q.
597 CDBG("msm_control: end rc = %d\n", rc
);
601 static int msm_get_stats(struct msm_sync
*sync
, void __user
*arg
)
607 struct msm_stats_event_ctrl se
;
609 struct msm_queue_cmd
*qcmd
= NULL
;
610 struct msm_ctrl_cmd
*ctrl
= NULL
;
611 struct msm_vfe_resp
*data
= NULL
;
612 struct msm_stats_buf stats
;
614 if (copy_from_user(&se
, arg
,
615 sizeof(struct msm_stats_event_ctrl
))) {
616 ERR_COPY_FROM_USER();
620 timeout
= (int)se
.timeout_ms
;
622 CDBG("msm_get_stats timeout %d\n", timeout
);
623 rc
= wait_event_interruptible_timeout(
624 sync
->msg_event_wait
,
625 !list_empty_careful(&sync
->msg_event_q
),
626 msecs_to_jiffies(timeout
));
627 if (list_empty_careful(&sync
->msg_event_q
)) {
631 pr_err("msm_get_stats error %d\n", rc
);
635 CDBG("msm_get_stats returned from wait: %d\n", rc
);
637 spin_lock_irqsave(&sync
->msg_event_q_lock
, flags
);
638 BUG_ON(list_empty(&sync
->msg_event_q
));
639 qcmd
= list_first_entry(&sync
->msg_event_q
,
640 struct msm_queue_cmd
, list
);
641 list_del_init(&qcmd
->list
);
642 spin_unlock_irqrestore(&sync
->msg_event_q_lock
, flags
);
644 CDBG("=== received from DSP === %d\n", qcmd
->type
);
646 switch (qcmd
->type
) {
647 case MSM_CAM_Q_VFE_EVT
:
648 case MSM_CAM_Q_VFE_MSG
:
649 data
= (struct msm_vfe_resp
*)(qcmd
->command
);
651 /* adsp event and message */
652 se
.resptype
= MSM_CAM_RESP_STAT_EVT_MSG
;
654 /* 0 - msg from aDSP, 1 - event from mARM */
655 se
.stats_event
.type
= data
->evt_msg
.type
;
656 se
.stats_event
.msg_id
= data
->evt_msg
.msg_id
;
657 se
.stats_event
.len
= data
->evt_msg
.len
;
659 CDBG("msm_get_stats, qcmd->type = %d\n", qcmd
->type
);
660 CDBG("length = %d\n", se
.stats_event
.len
);
661 CDBG("msg_id = %d\n", se
.stats_event
.msg_id
);
663 if ((data
->type
== VFE_MSG_STATS_AF
) ||
664 (data
->type
== VFE_MSG_STATS_WE
)) {
667 msm_pmem_stats_ptov_lookup(sync
,
671 pr_err("%s: msm_pmem_stats_ptov_lookup error\n",
677 if (copy_to_user((void *)(se
.stats_event
.data
),
679 sizeof(struct msm_stats_buf
))) {
684 } else if ((data
->evt_msg
.len
> 0) &&
685 (data
->type
== VFE_MSG_GENERAL
)) {
686 if (copy_to_user((void *)(se
.stats_event
.data
),
688 data
->evt_msg
.len
)) {
692 } else if (data
->type
== VFE_MSG_OUTPUT1
||
693 data
->type
== VFE_MSG_OUTPUT2
) {
694 if (copy_to_user((void *)(se
.stats_event
.data
),
700 } else if (data
->type
== VFE_MSG_SNAPSHOT
&& sync
->pict_pp
) {
701 struct msm_postproc buf
;
702 struct msm_pmem_region region
;
703 buf
.fmnum
= msm_pmem_region_lookup(&sync
->frame
,
706 if (buf
.fmnum
== 1) {
707 buf
.fmain
.buffer
= (unsigned long)region
.vaddr
;
708 buf
.fmain
.y_off
= region
.y_off
;
709 buf
.fmain
.cbcr_off
= region
.cbcr_off
;
710 buf
.fmain
.fd
= region
.fd
;
712 buf
.fmnum
= msm_pmem_region_lookup(&sync
->frame
,
713 MSM_PMEM_RAW_MAINIMG
,
715 if (buf
.fmnum
== 1) {
716 buf
.fmain
.path
= MSM_FRAME_PREV_2
;
718 (unsigned long)region
.vaddr
;
719 buf
.fmain
.fd
= region
.fd
;
722 pr_err("%s: pmem lookup failed\n",
728 if (copy_to_user((void *)(se
.stats_event
.data
), &buf
,
734 CDBG("snapshot copy_to_user!\n");
739 /* control command from control thread */
740 ctrl
= (struct msm_ctrl_cmd
*)(qcmd
->command
);
742 CDBG("msm_get_stats, qcmd->type = %d\n", qcmd
->type
);
743 CDBG("length = %d\n", ctrl
->length
);
745 if (ctrl
->length
> 0) {
746 if (copy_to_user((void *)(se
.ctrl_cmd
.value
),
755 se
.resptype
= MSM_CAM_RESP_CTRL
;
757 /* what to control */
758 se
.ctrl_cmd
.type
= ctrl
->type
;
759 se
.ctrl_cmd
.length
= ctrl
->length
;
760 se
.ctrl_cmd
.resp_fd
= ctrl
->resp_fd
;
763 case MSM_CAM_Q_V4L2_REQ
:
764 /* control command from v4l2 client */
765 ctrl
= (struct msm_ctrl_cmd
*)(qcmd
->command
);
767 CDBG("msm_get_stats, qcmd->type = %d\n", qcmd
->type
);
768 CDBG("length = %d\n", ctrl
->length
);
770 if (ctrl
->length
> 0) {
771 if (copy_to_user((void *)(se
.ctrl_cmd
.value
),
772 ctrl
->value
, ctrl
->length
)) {
779 /* 2 tells config thread this is v4l2 request */
780 se
.resptype
= MSM_CAM_RESP_V4L2
;
782 /* what to control */
783 se
.ctrl_cmd
.type
= ctrl
->type
;
784 se
.ctrl_cmd
.length
= ctrl
->length
;
790 } /* switch qcmd->type */
792 if (copy_to_user((void *)arg
, &se
, sizeof(se
))) {
801 CDBG("msm_get_stats: %d\n", rc
);
805 static int msm_ctrl_cmd_done(struct msm_control_device
*ctrl_pmsm
,
811 struct msm_ctrl_cmd udata
, *ctrlcmd
;
812 struct msm_queue_cmd
*qcmd
= NULL
;
814 if (copy_from_user(&udata
, arg
, sizeof(struct msm_ctrl_cmd
))) {
815 ERR_COPY_FROM_USER();
820 qcmd
= kmalloc(sizeof(struct msm_queue_cmd
) +
821 sizeof(struct msm_ctrl_cmd
) + udata
.length
,
828 qcmd
->command
= ctrlcmd
= (struct msm_ctrl_cmd
*)(qcmd
+ 1);
830 if (udata
.length
> 0) {
831 ctrlcmd
->value
= ctrlcmd
+ 1;
832 if (copy_from_user(ctrlcmd
->value
,
835 ERR_COPY_FROM_USER();
841 else ctrlcmd
->value
= NULL
;
844 CDBG("msm_ctrl_cmd_done: end rc = %d\n", rc
);
846 /* wake up control thread */
847 spin_lock_irqsave(&ctrl_pmsm
->ctrl_q
.ctrl_status_q_lock
, flags
);
848 list_add_tail(&qcmd
->list
, &ctrl_pmsm
->ctrl_q
.ctrl_status_q
);
849 wake_up(&ctrl_pmsm
->ctrl_q
.ctrl_status_wait
);
850 spin_unlock_irqrestore(&ctrl_pmsm
->ctrl_q
.ctrl_status_q_lock
, flags
);
856 static int msm_config_vfe(struct msm_sync
*sync
, void __user
*arg
)
858 struct msm_vfe_cfg_cmd cfgcmd
;
859 struct msm_pmem_region region
[8];
860 struct axidata axi_data
;
864 memset(&axi_data
, 0, sizeof(axi_data
));
866 if (copy_from_user(&cfgcmd
, arg
, sizeof(cfgcmd
))) {
867 ERR_COPY_FROM_USER();
871 switch(cfgcmd
.cmd_type
) {
872 case CMD_STATS_ENABLE
:
874 msm_pmem_region_lookup(&sync
->stats
,
875 MSM_PMEM_AEC_AWB
, ®ion
[0],
876 NUM_WB_EXP_STAT_OUTPUT_BUFFERS
);
877 if (!axi_data
.bufnum1
) {
878 pr_err("%s: pmem region lookup error\n", __FUNCTION__
);
881 axi_data
.region
= ®ion
[0];
884 case CMD_STATS_AF_ENABLE
:
886 msm_pmem_region_lookup(&sync
->stats
,
887 MSM_PMEM_AF
, ®ion
[0],
888 NUM_AF_STAT_OUTPUT_BUFFERS
);
889 if (!axi_data
.bufnum1
) {
890 pr_err("%s: pmem region lookup error\n", __FUNCTION__
);
893 axi_data
.region
= ®ion
[0];
897 case CMD_STATS_DISABLE
:
900 pr_err("%s: unknown command type %d\n",
901 __FUNCTION__
, cfgcmd
.cmd_type
);
906 if (sync
->vfefn
.vfe_config
)
907 rc
= sync
->vfefn
.vfe_config(&cfgcmd
, data
);
912 static int msm_frame_axi_cfg(struct msm_sync
*sync
,
913 struct msm_vfe_cfg_cmd
*cfgcmd
)
916 struct axidata axi_data
;
917 void *data
= &axi_data
;
918 struct msm_pmem_region region
[8];
921 memset(&axi_data
, 0, sizeof(axi_data
));
923 switch (cfgcmd
->cmd_type
) {
924 case CMD_AXI_CFG_OUT1
:
925 pmem_type
= MSM_PMEM_OUTPUT1
;
927 msm_pmem_region_lookup(&sync
->frame
, pmem_type
,
929 if (!axi_data
.bufnum1
) {
930 pr_err("%s: pmem region lookup error\n", __FUNCTION__
);
935 case CMD_AXI_CFG_OUT2
:
936 pmem_type
= MSM_PMEM_OUTPUT2
;
938 msm_pmem_region_lookup(&sync
->frame
, pmem_type
,
940 if (!axi_data
.bufnum2
) {
941 pr_err("%s: pmem region lookup error\n", __FUNCTION__
);
946 case CMD_AXI_CFG_SNAP_O1_AND_O2
:
947 pmem_type
= MSM_PMEM_THUMBAIL
;
949 msm_pmem_region_lookup(&sync
->frame
, pmem_type
,
951 if (!axi_data
.bufnum1
) {
952 pr_err("%s: pmem region lookup error\n", __FUNCTION__
);
956 pmem_type
= MSM_PMEM_MAINIMG
;
958 msm_pmem_region_lookup(&sync
->frame
, pmem_type
,
959 ®ion
[axi_data
.bufnum1
], 8);
960 if (!axi_data
.bufnum2
) {
961 pr_err("%s: pmem region lookup error\n", __FUNCTION__
);
966 case CMD_RAW_PICT_AXI_CFG
:
967 pmem_type
= MSM_PMEM_RAW_MAINIMG
;
969 msm_pmem_region_lookup(&sync
->frame
, pmem_type
,
971 if (!axi_data
.bufnum2
) {
972 pr_err("%s: pmem region lookup error\n", __FUNCTION__
);
982 pr_err("%s: unknown command type %d\n",
983 __FUNCTION__
, cfgcmd
->cmd_type
);
987 axi_data
.region
= ®ion
[0];
989 /* send the AXI configuration command to driver */
990 if (sync
->vfefn
.vfe_config
)
991 rc
= sync
->vfefn
.vfe_config(cfgcmd
, data
);
996 static int msm_get_sensor_info(struct msm_sync
*sync
, void __user
*arg
)
999 struct msm_camsensor_info info
;
1000 struct msm_camera_sensor_info
*sdata
;
1002 if (copy_from_user(&info
,
1004 sizeof(struct msm_camsensor_info
))) {
1005 ERR_COPY_FROM_USER();
1009 sdata
= sync
->pdev
->dev
.platform_data
;
1010 CDBG("sensor_name %s\n", sdata
->sensor_name
);
1012 memcpy(&info
.name
[0],
1015 info
.flash_enabled
= sdata
->flash_type
!= MSM_CAMERA_FLASH_NONE
;
1017 /* copy back to user space */
1018 if (copy_to_user((void *)arg
,
1020 sizeof(struct msm_camsensor_info
))) {
1028 static int __msm_put_frame_buf(struct msm_sync
*sync
,
1029 struct msm_frame
*pb
)
1032 struct msm_vfe_cfg_cmd cfgcmd
;
1036 pphy
= msm_pmem_frame_vtop_lookup(sync
,
1038 pb
->y_off
, pb
->cbcr_off
, pb
->fd
);
1041 CDBG("rel: vaddr = 0x%lx, paddr = 0x%lx\n",
1043 cfgcmd
.cmd_type
= CMD_FRAME_BUF_RELEASE
;
1044 cfgcmd
.value
= (void *)pb
;
1045 if (sync
->vfefn
.vfe_config
)
1046 rc
= sync
->vfefn
.vfe_config(&cfgcmd
, &pphy
);
1048 pr_err("%s: msm_pmem_frame_vtop_lookup failed\n",
1056 static int msm_put_frame_buffer(struct msm_sync
*sync
, void __user
*arg
)
1058 struct msm_frame buf_t
;
1060 if (copy_from_user(&buf_t
,
1062 sizeof(struct msm_frame
))) {
1063 ERR_COPY_FROM_USER();
1067 return __msm_put_frame_buf(sync
, &buf_t
);
1070 static int __msm_register_pmem(struct msm_sync
*sync
,
1071 struct msm_pmem_info
*pinfo
)
1075 switch (pinfo
->type
) {
1076 case MSM_PMEM_OUTPUT1
:
1077 case MSM_PMEM_OUTPUT2
:
1078 case MSM_PMEM_THUMBAIL
:
1079 case MSM_PMEM_MAINIMG
:
1080 case MSM_PMEM_RAW_MAINIMG
:
1081 rc
= msm_pmem_table_add(&sync
->frame
, pinfo
);
1084 case MSM_PMEM_AEC_AWB
:
1086 rc
= msm_pmem_table_add(&sync
->stats
, pinfo
);
1097 static int msm_register_pmem(struct msm_sync
*sync
, void __user
*arg
)
1099 struct msm_pmem_info info
;
1101 if (copy_from_user(&info
, arg
, sizeof(info
))) {
1102 ERR_COPY_FROM_USER();
1106 return __msm_register_pmem(sync
, &info
);
1109 static int msm_stats_axi_cfg(struct msm_sync
*sync
,
1110 struct msm_vfe_cfg_cmd
*cfgcmd
)
1113 struct axidata axi_data
;
1114 void *data
= &axi_data
;
1116 struct msm_pmem_region region
[3];
1117 int pmem_type
= MSM_PMEM_MAX
;
1119 memset(&axi_data
, 0, sizeof(axi_data
));
1121 switch (cfgcmd
->cmd_type
) {
1122 case CMD_STATS_AXI_CFG
:
1123 pmem_type
= MSM_PMEM_AEC_AWB
;
1125 case CMD_STATS_AF_AXI_CFG
:
1126 pmem_type
= MSM_PMEM_AF
;
1132 pr_err("%s: unknown command type %d\n",
1133 __FUNCTION__
, cfgcmd
->cmd_type
);
1137 if (cfgcmd
->cmd_type
!= CMD_GENERAL
) {
1139 msm_pmem_region_lookup(&sync
->stats
, pmem_type
,
1140 ®ion
[0], NUM_WB_EXP_STAT_OUTPUT_BUFFERS
);
1141 if (!axi_data
.bufnum1
) {
1142 pr_err("%s: pmem region lookup error\n", __FUNCTION__
);
1145 axi_data
.region
= ®ion
[0];
1148 /* send the AEC/AWB STATS configuration command to driver */
1149 if (sync
->vfefn
.vfe_config
)
1150 rc
= sync
->vfefn
.vfe_config(cfgcmd
, &axi_data
);
1155 static int msm_put_stats_buffer(struct msm_sync
*sync
, void __user
*arg
)
1159 struct msm_stats_buf buf
;
1161 struct msm_vfe_cfg_cmd cfgcmd
;
1163 if (copy_from_user(&buf
, arg
,
1164 sizeof(struct msm_stats_buf
))) {
1165 ERR_COPY_FROM_USER();
1169 CDBG("msm_put_stats_buffer\n");
1170 pphy
= msm_pmem_stats_vtop_lookup(sync
, buf
.buffer
, buf
.fd
);
1173 if (buf
.type
== STAT_AEAW
)
1174 cfgcmd
.cmd_type
= CMD_STATS_BUF_RELEASE
;
1175 else if (buf
.type
== STAT_AF
)
1176 cfgcmd
.cmd_type
= CMD_STATS_AF_BUF_RELEASE
;
1178 pr_err("%s: invalid buf type %d\n",
1185 cfgcmd
.value
= (void *)&buf
;
1187 if (sync
->vfefn
.vfe_config
) {
1188 rc
= sync
->vfefn
.vfe_config(&cfgcmd
, &pphy
);
1190 pr_err("msm_put_stats_buffer: "\
1191 "vfe_config err %d\n", rc
);
1193 pr_err("msm_put_stats_buffer: vfe_config is NULL\n");
1195 pr_err("msm_put_stats_buffer: NULL physical address\n");
1203 static int msm_axi_config(struct msm_sync
*sync
, void __user
*arg
)
1205 struct msm_vfe_cfg_cmd cfgcmd
;
1207 if (copy_from_user(&cfgcmd
, arg
, sizeof(cfgcmd
))) {
1208 ERR_COPY_FROM_USER();
1212 switch (cfgcmd
.cmd_type
) {
1213 case CMD_AXI_CFG_OUT1
:
1214 case CMD_AXI_CFG_OUT2
:
1215 case CMD_AXI_CFG_SNAP_O1_AND_O2
:
1216 case CMD_RAW_PICT_AXI_CFG
:
1217 return msm_frame_axi_cfg(sync
, &cfgcmd
);
1219 case CMD_STATS_AXI_CFG
:
1220 case CMD_STATS_AF_AXI_CFG
:
1221 return msm_stats_axi_cfg(sync
, &cfgcmd
);
1224 pr_err("%s: unknown command type %d\n",
1233 static int __msm_get_pic(struct msm_sync
*sync
, struct msm_ctrl_cmd
*ctrl
)
1235 unsigned long flags
;
1239 struct msm_queue_cmd
*qcmd
= NULL
;
1241 tm
= (int)ctrl
->timeout_ms
;
1243 rc
= wait_event_interruptible_timeout(
1244 sync
->pict_frame_wait
,
1245 !list_empty_careful(&sync
->pict_frame_q
),
1246 msecs_to_jiffies(tm
));
1247 if (list_empty_careful(&sync
->pict_frame_q
)) {
1251 pr_err("msm_camera_get_picture, rc = %d\n", rc
);
1256 spin_lock_irqsave(&sync
->pict_frame_q_lock
, flags
);
1257 BUG_ON(list_empty(&sync
->pict_frame_q
));
1258 qcmd
= list_first_entry(&sync
->pict_frame_q
,
1259 struct msm_queue_cmd
, list
);
1260 list_del_init(&qcmd
->list
);
1261 spin_unlock_irqrestore(&sync
->pict_frame_q_lock
, flags
);
1263 if (qcmd
->command
!= NULL
) {
1264 struct msm_ctrl_cmd
*q
=
1265 (struct msm_ctrl_cmd
*)qcmd
->command
;
1266 ctrl
->type
= q
->type
;
1267 ctrl
->status
= q
->status
;
1277 static int msm_get_pic(struct msm_sync
*sync
, void __user
*arg
)
1279 struct msm_ctrl_cmd ctrlcmd_t
;
1282 if (copy_from_user(&ctrlcmd_t
,
1284 sizeof(struct msm_ctrl_cmd
))) {
1285 ERR_COPY_FROM_USER();
1289 rc
= __msm_get_pic(sync
, &ctrlcmd_t
);
1293 if (sync
->croplen
) {
1294 if (ctrlcmd_t
.length
< sync
->croplen
) {
1295 pr_err("msm_get_pic: invalid len %d\n",
1299 if (copy_to_user(ctrlcmd_t
.value
,
1307 if (copy_to_user((void *)arg
,
1309 sizeof(struct msm_ctrl_cmd
))) {
1316 static int msm_set_crop(struct msm_sync
*sync
, void __user
*arg
)
1318 struct crop_info crop
;
1320 if (copy_from_user(&crop
,
1322 sizeof(struct crop_info
))) {
1323 ERR_COPY_FROM_USER();
1327 if (!sync
->croplen
) {
1328 sync
->cropinfo
= kmalloc(crop
.len
, GFP_KERNEL
);
1329 if (!sync
->cropinfo
)
1331 } else if (sync
->croplen
< crop
.len
)
1334 if (copy_from_user(sync
->cropinfo
,
1337 ERR_COPY_FROM_USER();
1338 kfree(sync
->cropinfo
);
1342 sync
->croplen
= crop
.len
;
1347 static int msm_pict_pp_done(struct msm_sync
*sync
, void __user
*arg
)
1349 struct msm_ctrl_cmd udata
;
1350 struct msm_ctrl_cmd
*ctrlcmd
= NULL
;
1351 struct msm_queue_cmd
*qcmd
= NULL
;
1352 unsigned long flags
;
1358 if (copy_from_user(&udata
, arg
, sizeof(struct msm_ctrl_cmd
))) {
1359 ERR_COPY_FROM_USER();
1364 qcmd
= kmalloc(sizeof(struct msm_queue_cmd
) +
1365 sizeof(struct msm_ctrl_cmd
),
1372 qcmd
->type
= MSM_CAM_Q_VFE_MSG
;
1373 qcmd
->command
= ctrlcmd
= (struct msm_ctrl_cmd
*)(qcmd
+ 1);
1374 memset(ctrlcmd
, 0, sizeof(struct msm_ctrl_cmd
));
1375 ctrlcmd
->type
= udata
.type
;
1376 ctrlcmd
->status
= udata
.status
;
1378 spin_lock_irqsave(&sync
->pict_frame_q_lock
, flags
);
1379 list_add_tail(&qcmd
->list
, &sync
->pict_frame_q
);
1380 spin_unlock_irqrestore(&sync
->pict_frame_q_lock
, flags
);
1381 wake_up(&sync
->pict_frame_wait
);
1387 static long msm_ioctl_common(struct msm_device
*pmsm
,
1391 CDBG("msm_ioctl_common\n");
1393 case MSM_CAM_IOCTL_REGISTER_PMEM
:
1394 return msm_register_pmem(pmsm
->sync
, argp
);
1395 case MSM_CAM_IOCTL_UNREGISTER_PMEM
:
1396 return msm_pmem_table_del(pmsm
->sync
, argp
);
1402 static long msm_ioctl_config(struct file
*filep
, unsigned int cmd
,
1406 void __user
*argp
= (void __user
*)arg
;
1407 struct msm_device
*pmsm
= filep
->private_data
;
1409 CDBG("msm_ioctl_config cmd = %d\n", _IOC_NR(cmd
));
1412 case MSM_CAM_IOCTL_GET_SENSOR_INFO
:
1413 rc
= msm_get_sensor_info(pmsm
->sync
, argp
);
1416 case MSM_CAM_IOCTL_CONFIG_VFE
:
1417 /* Coming from config thread for update */
1418 rc
= msm_config_vfe(pmsm
->sync
, argp
);
1421 case MSM_CAM_IOCTL_GET_STATS
:
1422 /* Coming from config thread wait
1423 * for vfe statistics and control requests */
1424 rc
= msm_get_stats(pmsm
->sync
, argp
);
1427 case MSM_CAM_IOCTL_ENABLE_VFE
:
1428 /* This request comes from control thread:
1429 * enable either QCAMTASK or VFETASK */
1430 rc
= msm_enable_vfe(pmsm
->sync
, argp
);
1433 case MSM_CAM_IOCTL_DISABLE_VFE
:
1434 /* This request comes from control thread:
1435 * disable either QCAMTASK or VFETASK */
1436 rc
= msm_disable_vfe(pmsm
->sync
, argp
);
1439 case MSM_CAM_IOCTL_VFE_APPS_RESET
:
1440 msm_camio_vfe_blk_reset();
1444 case MSM_CAM_IOCTL_RELEASE_STATS_BUFFER
:
1445 rc
= msm_put_stats_buffer(pmsm
->sync
, argp
);
1448 case MSM_CAM_IOCTL_AXI_CONFIG
:
1449 rc
= msm_axi_config(pmsm
->sync
, argp
);
1452 case MSM_CAM_IOCTL_SET_CROP
:
1453 rc
= msm_set_crop(pmsm
->sync
, argp
);
1456 case MSM_CAM_IOCTL_PICT_PP
: {
1458 if (copy_from_user(&enable
, argp
, sizeof(enable
))) {
1459 ERR_COPY_FROM_USER();
1462 pmsm
->sync
->pict_pp
= enable
;
1468 case MSM_CAM_IOCTL_PICT_PP_DONE
:
1469 rc
= msm_pict_pp_done(pmsm
->sync
, argp
);
1472 case MSM_CAM_IOCTL_SENSOR_IO_CFG
:
1473 rc
= pmsm
->sync
->sctrl
.s_config(argp
);
1476 case MSM_CAM_IOCTL_FLASH_LED_CFG
: {
1478 if (copy_from_user(&led_state
, argp
, sizeof(led_state
))) {
1479 ERR_COPY_FROM_USER();
1482 rc
= msm_camera_flash_set_led_state(led_state
);
1487 rc
= msm_ioctl_common(pmsm
, cmd
, argp
);
1491 CDBG("msm_ioctl_config cmd = %d DONE\n", _IOC_NR(cmd
));
1495 static int msm_unblock_poll_frame(struct msm_sync
*);
1497 static long msm_ioctl_frame(struct file
*filep
, unsigned int cmd
,
1501 void __user
*argp
= (void __user
*)arg
;
1502 struct msm_device
*pmsm
= filep
->private_data
;
1506 case MSM_CAM_IOCTL_GETFRAME
:
1507 /* Coming from frame thread to get frame
1508 * after SELECT is done */
1509 rc
= msm_get_frame(pmsm
->sync
, argp
);
1511 case MSM_CAM_IOCTL_RELEASE_FRAME_BUFFER
:
1512 rc
= msm_put_frame_buffer(pmsm
->sync
, argp
);
1514 case MSM_CAM_IOCTL_UNBLOCK_POLL_FRAME
:
1515 rc
= msm_unblock_poll_frame(pmsm
->sync
);
1525 static long msm_ioctl_control(struct file
*filep
, unsigned int cmd
,
1529 void __user
*argp
= (void __user
*)arg
;
1530 struct msm_control_device
*ctrl_pmsm
= filep
->private_data
;
1531 struct msm_device
*pmsm
= ctrl_pmsm
->pmsm
;
1534 case MSM_CAM_IOCTL_CTRL_COMMAND
:
1535 /* Coming from control thread, may need to wait for
1537 rc
= msm_control(ctrl_pmsm
, 1, argp
);
1539 case MSM_CAM_IOCTL_CTRL_COMMAND_2
:
1540 /* Sends a message, returns immediately */
1541 rc
= msm_control(ctrl_pmsm
, 0, argp
);
1543 case MSM_CAM_IOCTL_CTRL_CMD_DONE
:
1544 /* Config thread calls the control thread to notify it
1545 * of the result of a MSM_CAM_IOCTL_CTRL_COMMAND.
1547 rc
= msm_ctrl_cmd_done(ctrl_pmsm
, argp
);
1549 case MSM_CAM_IOCTL_GET_PICTURE
:
1550 rc
= msm_get_pic(pmsm
->sync
, argp
);
1553 rc
= msm_ioctl_common(pmsm
, cmd
, argp
);
1560 static int __msm_release(struct msm_sync
*sync
)
1562 struct msm_pmem_region
*region
;
1563 struct hlist_node
*hnode
;
1564 struct hlist_node
*n
;
1566 mutex_lock(&sync
->lock
);
1570 if (!sync
->opencnt
) {
1571 /* need to clean up system resource */
1572 if (sync
->vfefn
.vfe_release
)
1573 sync
->vfefn
.vfe_release(sync
->pdev
);
1575 if (sync
->cropinfo
) {
1576 kfree(sync
->cropinfo
);
1577 sync
->cropinfo
= NULL
;
1581 hlist_for_each_entry_safe(region
, hnode
, n
,
1582 &sync
->frame
, list
) {
1584 put_pmem_file(region
->file
);
1588 hlist_for_each_entry_safe(region
, hnode
, n
,
1589 &sync
->stats
, list
) {
1591 put_pmem_file(region
->file
);
1595 MSM_DRAIN_QUEUE(sync
, msg_event_q
);
1596 MSM_DRAIN_QUEUE(sync
, prev_frame_q
);
1597 MSM_DRAIN_QUEUE(sync
, pict_frame_q
);
1599 sync
->sctrl
.s_release();
1600 wake_unlock(&sync
->wake_lock
);
1602 sync
->apps_id
= NULL
;
1603 CDBG("msm_release completed!\n");
1605 mutex_unlock(&sync
->lock
);
1610 static int msm_release_config(struct inode
*node
, struct file
*filep
)
1613 struct msm_device
*pmsm
= filep
->private_data
;
1614 printk("msm_camera: RELEASE %s\n", filep
->f_path
.dentry
->d_name
.name
);
1615 rc
= __msm_release(pmsm
->sync
);
1616 atomic_set(&pmsm
->opened
, 0);
1620 static int msm_release_control(struct inode
*node
, struct file
*filep
)
1623 struct msm_control_device
*ctrl_pmsm
= filep
->private_data
;
1624 struct msm_device
*pmsm
= ctrl_pmsm
->pmsm
;
1625 printk("msm_camera: RELEASE %s\n", filep
->f_path
.dentry
->d_name
.name
);
1626 rc
= __msm_release(pmsm
->sync
);
1628 MSM_DRAIN_QUEUE(&ctrl_pmsm
->ctrl_q
, ctrl_status_q
);
1629 MSM_DRAIN_QUEUE(pmsm
->sync
, pict_frame_q
);
1635 static int msm_release_frame(struct inode
*node
, struct file
*filep
)
1638 struct msm_device
*pmsm
= filep
->private_data
;
1639 printk("msm_camera: RELEASE %s\n", filep
->f_path
.dentry
->d_name
.name
);
1640 rc
= __msm_release(pmsm
->sync
);
1642 MSM_DRAIN_QUEUE(pmsm
->sync
, prev_frame_q
);
1643 atomic_set(&pmsm
->opened
, 0);
1648 static int msm_unblock_poll_frame(struct msm_sync
*sync
)
1650 unsigned long flags
;
1651 CDBG("msm_unblock_poll_frame\n");
1652 spin_lock_irqsave(&sync
->prev_frame_q_lock
, flags
);
1653 sync
->unblock_poll_frame
= 1;
1654 wake_up(&sync
->prev_frame_wait
);
1655 spin_unlock_irqrestore(&sync
->prev_frame_q_lock
, flags
);
1659 static unsigned int __msm_poll_frame(struct msm_sync
*sync
,
1661 struct poll_table_struct
*pll_table
)
1664 unsigned long flags
;
1666 poll_wait(filep
, &sync
->prev_frame_wait
, pll_table
);
1668 spin_lock_irqsave(&sync
->prev_frame_q_lock
, flags
);
1669 if (!list_empty_careful(&sync
->prev_frame_q
))
1671 rc
= POLLIN
| POLLRDNORM
;
1672 if (sync
->unblock_poll_frame
) {
1673 CDBG("%s: sync->unblock_poll_frame is true\n", __func__
);
1675 sync
->unblock_poll_frame
= 0;
1677 spin_unlock_irqrestore(&sync
->prev_frame_q_lock
, flags
);
1682 static unsigned int msm_poll_frame(struct file
*filep
,
1683 struct poll_table_struct
*pll_table
)
1685 struct msm_device
*pmsm
= filep
->private_data
;
1686 return __msm_poll_frame(pmsm
->sync
, filep
, pll_table
);
1690 * This function executes in interrupt context.
1693 static void *msm_vfe_sync_alloc(int size
,
1694 void *syncdata
__attribute__((unused
)))
1696 struct msm_queue_cmd
*qcmd
=
1697 kmalloc(sizeof(struct msm_queue_cmd
) + size
, GFP_ATOMIC
);
1698 return qcmd
? qcmd
+ 1 : NULL
;
1702 * This function executes in interrupt context.
1705 static void msm_vfe_sync(struct msm_vfe_resp
*vdata
,
1706 enum msm_queue qtype
, void *syncdata
)
1708 struct msm_queue_cmd
*qcmd
= NULL
;
1709 struct msm_queue_cmd
*qcmd_frame
= NULL
;
1710 struct msm_vfe_phy_info
*fphy
;
1712 unsigned long flags
;
1713 struct msm_sync
*sync
= (struct msm_sync
*)syncdata
;
1715 pr_err("msm_camera: no context in dsp callback.\n");
1719 qcmd
= ((struct msm_queue_cmd
*)vdata
) - 1;
1722 if (qtype
== MSM_CAM_Q_VFE_MSG
) {
1723 switch(vdata
->type
) {
1724 case VFE_MSG_OUTPUT1
:
1725 case VFE_MSG_OUTPUT2
:
1727 kmalloc(sizeof(struct msm_queue_cmd
) +
1728 sizeof(struct msm_vfe_phy_info
),
1732 fphy
= (struct msm_vfe_phy_info
*)(qcmd_frame
+ 1);
1735 qcmd_frame
->type
= MSM_CAM_Q_VFE_MSG
;
1736 qcmd_frame
->command
= fphy
;
1738 CDBG("qcmd_frame= 0x%x phy_y= 0x%x, phy_cbcr= 0x%x\n",
1739 (int) qcmd_frame
, fphy
->y_phy
, fphy
->cbcr_phy
);
1741 spin_lock_irqsave(&sync
->prev_frame_q_lock
, flags
);
1742 list_add_tail(&qcmd_frame
->list
, &sync
->prev_frame_q
);
1743 wake_up(&sync
->prev_frame_wait
);
1744 spin_unlock_irqrestore(&sync
->prev_frame_q_lock
, flags
);
1745 CDBG("woke up frame thread\n");
1747 case VFE_MSG_SNAPSHOT
:
1751 CDBG("snapshot pp = %d\n", sync
->pict_pp
);
1753 kmalloc(sizeof(struct msm_queue_cmd
),
1757 qcmd_frame
->type
= MSM_CAM_Q_VFE_MSG
;
1758 qcmd_frame
->command
= NULL
;
1759 spin_lock_irqsave(&sync
->pict_frame_q_lock
,
1761 list_add_tail(&qcmd_frame
->list
, &sync
->pict_frame_q
);
1762 wake_up(&sync
->pict_frame_wait
);
1763 spin_unlock_irqrestore(&sync
->pict_frame_q_lock
, flags
);
1764 CDBG("woke up picture thread\n");
1767 CDBG("%s: qtype = %d not handled\n",
1768 __func__
, vdata
->type
);
1773 qcmd
->command
= (void *)vdata
;
1774 CDBG("vdata->type = %d\n", vdata
->type
);
1776 spin_lock_irqsave(&sync
->msg_event_q_lock
, flags
);
1777 list_add_tail(&qcmd
->list
, &sync
->msg_event_q
);
1778 wake_up(&sync
->msg_event_wait
);
1779 spin_unlock_irqrestore(&sync
->msg_event_q_lock
, flags
);
1780 CDBG("woke up config thread\n");
1787 static struct msm_vfe_callback msm_vfe_s
= {
1788 .vfe_resp
= msm_vfe_sync
,
1789 .vfe_alloc
= msm_vfe_sync_alloc
,
1792 static int __msm_open(struct msm_sync
*sync
, const char *const apps_id
)
1796 mutex_lock(&sync
->lock
);
1797 if (sync
->apps_id
&& strcmp(sync
->apps_id
, apps_id
)) {
1798 pr_err("msm_camera(%s): sensor %s is already opened for %s\n",
1800 sync
->sdata
->sensor_name
,
1806 sync
->apps_id
= apps_id
;
1808 if (!sync
->opencnt
) {
1809 wake_lock(&sync
->wake_lock
);
1811 msm_camvfe_fn_init(&sync
->vfefn
, sync
);
1812 if (sync
->vfefn
.vfe_init
) {
1813 rc
= sync
->vfefn
.vfe_init(&msm_vfe_s
,
1816 pr_err("vfe_init failed at %d\n", rc
);
1819 rc
= sync
->sctrl
.s_init(sync
->sdata
);
1821 pr_err("sensor init failed: %d\n", rc
);
1825 pr_err("no sensor init func\n");
1831 INIT_HLIST_HEAD(&sync
->frame
);
1832 INIT_HLIST_HEAD(&sync
->stats
);
1833 sync
->unblock_poll_frame
= 0;
1839 mutex_unlock(&sync
->lock
);
1843 static int msm_open_common(struct inode
*inode
, struct file
*filep
,
1847 struct msm_device
*pmsm
=
1848 container_of(inode
->i_cdev
, struct msm_device
, cdev
);
1850 CDBG("msm_camera: open %s\n", filep
->f_path
.dentry
->d_name
.name
);
1852 if (atomic_cmpxchg(&pmsm
->opened
, 0, 1) && once
) {
1853 pr_err("msm_camera: %s is already opened.\n",
1854 filep
->f_path
.dentry
->d_name
.name
);
1858 rc
= nonseekable_open(inode
, filep
);
1860 pr_err("msm_open: nonseekable_open error %d\n", rc
);
1864 rc
= __msm_open(pmsm
->sync
, MSM_APPS_ID_PROP
);
1868 filep
->private_data
= pmsm
;
1870 CDBG("msm_open() open: rc = %d\n", rc
);
1874 static int msm_open(struct inode
*inode
, struct file
*filep
)
1876 return msm_open_common(inode
, filep
, 1);
1879 static int msm_open_control(struct inode
*inode
, struct file
*filep
)
1883 struct msm_control_device
*ctrl_pmsm
=
1884 kmalloc(sizeof(struct msm_control_device
), GFP_KERNEL
);
1888 rc
= msm_open_common(inode
, filep
, 0);
1892 ctrl_pmsm
->pmsm
= filep
->private_data
;
1893 filep
->private_data
= ctrl_pmsm
;
1894 spin_lock_init(&ctrl_pmsm
->ctrl_q
.ctrl_status_q_lock
);
1895 INIT_LIST_HEAD(&ctrl_pmsm
->ctrl_q
.ctrl_status_q
);
1896 init_waitqueue_head(&ctrl_pmsm
->ctrl_q
.ctrl_status_wait
);
1898 CDBG("msm_open() open: rc = %d\n", rc
);
1902 static int __msm_v4l2_control(struct msm_sync
*sync
,
1903 struct msm_ctrl_cmd
*out
)
1907 struct msm_queue_cmd
*qcmd
= NULL
, *rcmd
= NULL
;
1908 struct msm_ctrl_cmd
*ctrl
;
1909 struct msm_control_device_queue FIXME
;
1911 /* wake up config thread, 4 is for V4L2 application */
1912 qcmd
= kmalloc(sizeof(struct msm_queue_cmd
), GFP_KERNEL
);
1914 pr_err("msm_control: cannot allocate buffer\n");
1918 qcmd
->type
= MSM_CAM_Q_V4L2_REQ
;
1919 qcmd
->command
= out
;
1921 rcmd
= __msm_control(sync
, &FIXME
, qcmd
, out
->timeout_ms
);
1927 ctrl
= (struct msm_ctrl_cmd
*)(rcmd
->command
);
1928 /* FIXME: we should just set out->length = ctrl->length; */
1929 BUG_ON(out
->length
< ctrl
->length
);
1930 memcpy(out
->value
, ctrl
->value
, ctrl
->length
);
1933 if (rcmd
) kfree(rcmd
);
1934 CDBG("__msm_v4l2_control: end rc = %d\n", rc
);
1938 static const struct file_operations msm_fops_config
= {
1939 .owner
= THIS_MODULE
,
1941 .unlocked_ioctl
= msm_ioctl_config
,
1942 .release
= msm_release_config
,
1945 static const struct file_operations msm_fops_control
= {
1946 .owner
= THIS_MODULE
,
1947 .open
= msm_open_control
,
1948 .unlocked_ioctl
= msm_ioctl_control
,
1949 .release
= msm_release_control
,
1952 static const struct file_operations msm_fops_frame
= {
1953 .owner
= THIS_MODULE
,
1955 .unlocked_ioctl
= msm_ioctl_frame
,
1956 .release
= msm_release_frame
,
1957 .poll
= msm_poll_frame
,
1960 static int msm_setup_cdev(struct msm_device
*msm
,
1964 const struct file_operations
*fops
)
1968 struct device
*device
=
1969 device_create(msm_class
, NULL
,
1971 "%s%d", suffix
, node
);
1973 if (IS_ERR(device
)) {
1974 rc
= PTR_ERR(device
);
1975 pr_err("msm_camera: error creating device: %d\n", rc
);
1979 cdev_init(&msm
->cdev
, fops
);
1980 msm
->cdev
.owner
= THIS_MODULE
;
1982 rc
= cdev_add(&msm
->cdev
, devno
, 1);
1984 pr_err("msm_camera: error adding cdev: %d\n", rc
);
1985 device_destroy(msm_class
, devno
);
1992 static int msm_tear_down_cdev(struct msm_device
*msm
, dev_t devno
)
1994 cdev_del(&msm
->cdev
);
1995 device_destroy(msm_class
, devno
);
1999 int msm_v4l2_register(struct msm_v4l2_driver
*drv
)
2001 /* FIXME: support multiple sensors */
2002 if (list_empty(&msm_sensors
))
2005 drv
->sync
= list_first_entry(&msm_sensors
, struct msm_sync
, list
);
2006 drv
->open
= __msm_open
;
2007 drv
->release
= __msm_release
;
2008 drv
->ctrl
= __msm_v4l2_control
;
2009 drv
->reg_pmem
= __msm_register_pmem
;
2010 drv
->get_frame
= __msm_get_frame
;
2011 drv
->put_frame
= __msm_put_frame_buf
;
2012 drv
->get_pict
= __msm_get_pic
;
2013 drv
->drv_poll
= __msm_poll_frame
;
2017 EXPORT_SYMBOL(msm_v4l2_register
);
2019 int msm_v4l2_unregister(struct msm_v4l2_driver
*drv
)
2024 EXPORT_SYMBOL(msm_v4l2_unregister
);
2026 static int msm_sync_init(struct msm_sync
*sync
,
2027 struct platform_device
*pdev
,
2028 int (*sensor_probe
)(const struct msm_camera_sensor_info
*,
2029 struct msm_sensor_ctrl
*))
2032 struct msm_sensor_ctrl sctrl
;
2033 sync
->sdata
= pdev
->dev
.platform_data
;
2035 spin_lock_init(&sync
->msg_event_q_lock
);
2036 INIT_LIST_HEAD(&sync
->msg_event_q
);
2037 init_waitqueue_head(&sync
->msg_event_wait
);
2039 spin_lock_init(&sync
->prev_frame_q_lock
);
2040 INIT_LIST_HEAD(&sync
->prev_frame_q
);
2041 init_waitqueue_head(&sync
->prev_frame_wait
);
2043 spin_lock_init(&sync
->pict_frame_q_lock
);
2044 INIT_LIST_HEAD(&sync
->pict_frame_q
);
2045 init_waitqueue_head(&sync
->pict_frame_wait
);
2047 wake_lock_init(&sync
->wake_lock
, WAKE_LOCK_IDLE
, "msm_camera");
2049 rc
= msm_camio_probe_on(pdev
);
2052 rc
= sensor_probe(sync
->sdata
, &sctrl
);
2055 sync
->sctrl
= sctrl
;
2057 msm_camio_probe_off(pdev
);
2059 pr_err("msm_camera: failed to initialize %s\n",
2060 sync
->sdata
->sensor_name
);
2061 wake_lock_destroy(&sync
->wake_lock
);
2066 mutex_init(&sync
->lock
);
2067 CDBG("initialized %s\n", sync
->sdata
->sensor_name
);
2071 static int msm_sync_destroy(struct msm_sync
*sync
)
2073 wake_lock_destroy(&sync
->wake_lock
);
2077 static int msm_device_init(struct msm_device
*pmsm
,
2078 struct msm_sync
*sync
,
2081 int dev_num
= 3 * node
;
2082 int rc
= msm_setup_cdev(pmsm
, node
,
2083 MKDEV(MAJOR(msm_devno
), dev_num
),
2084 "control", &msm_fops_control
);
2086 pr_err("error creating control node: %d\n", rc
);
2090 rc
= msm_setup_cdev(pmsm
+ 1, node
,
2091 MKDEV(MAJOR(msm_devno
), dev_num
+ 1),
2092 "config", &msm_fops_config
);
2094 pr_err("error creating config node: %d\n", rc
);
2095 msm_tear_down_cdev(pmsm
, MKDEV(MAJOR(msm_devno
),
2100 rc
= msm_setup_cdev(pmsm
+ 2, node
,
2101 MKDEV(MAJOR(msm_devno
), dev_num
+ 2),
2102 "frame", &msm_fops_frame
);
2104 pr_err("error creating frame node: %d\n", rc
);
2105 msm_tear_down_cdev(pmsm
,
2106 MKDEV(MAJOR(msm_devno
), dev_num
));
2107 msm_tear_down_cdev(pmsm
+ 1,
2108 MKDEV(MAJOR(msm_devno
), dev_num
+ 1));
2112 atomic_set(&pmsm
[0].opened
, 0);
2113 atomic_set(&pmsm
[1].opened
, 0);
2114 atomic_set(&pmsm
[2].opened
, 0);
2116 pmsm
[0].sync
= sync
;
2117 pmsm
[1].sync
= sync
;
2118 pmsm
[2].sync
= sync
;
2123 int msm_camera_drv_start(struct platform_device
*dev
,
2124 int (*sensor_probe
)(const struct msm_camera_sensor_info
*,
2125 struct msm_sensor_ctrl
*))
2127 struct msm_device
*pmsm
= NULL
;
2128 struct msm_sync
*sync
;
2130 static int camera_node
;
2132 if (camera_node
>= MSM_MAX_CAMERA_SENSORS
) {
2133 pr_err("msm_camera: too many camera sensors\n");
2138 /* There are three device nodes per sensor */
2139 rc
= alloc_chrdev_region(&msm_devno
, 0,
2140 3 * MSM_MAX_CAMERA_SENSORS
,
2143 pr_err("msm_camera: failed to allocate chrdev: %d\n",
2148 msm_class
= class_create(THIS_MODULE
, "msm_camera");
2149 if (IS_ERR(msm_class
)) {
2150 rc
= PTR_ERR(msm_class
);
2151 pr_err("msm_camera: create device class failed: %d\n",
2157 pmsm
= kzalloc(sizeof(struct msm_device
) * 3 +
2158 sizeof(struct msm_sync
), GFP_ATOMIC
);
2161 sync
= (struct msm_sync
*)(pmsm
+ 3);
2163 rc
= msm_sync_init(sync
, dev
, sensor_probe
);
2169 CDBG("setting camera node %d\n", camera_node
);
2170 rc
= msm_device_init(pmsm
, sync
, camera_node
);
2172 msm_sync_destroy(sync
);
2178 list_add(&sync
->list
, &msm_sensors
);
2181 EXPORT_SYMBOL(msm_camera_drv_start
);