1 /* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/time.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/spinlock.h>
25 #include <linux/hrtimer.h>
26 #include <linux/clk.h>
27 #include <mach/hardware.h>
29 #include <linux/debugfs.h>
30 #include <linux/delay.h>
31 #include <linux/mutex.h>
33 #include <asm/system.h>
34 #include <asm/mach-types.h>
35 #include <linux/semaphore.h>
36 #include <linux/uaccess.h>
40 #ifdef CONFIG_FB_MSM_MDP40
44 static struct clk
*mdp_clk
;
45 static struct clk
*mdp_pclk
;
47 struct completion mdp_ppp_comp
;
48 struct semaphore mdp_ppp_mutex
;
49 struct semaphore mdp_pipe_ctrl_mutex
;
51 unsigned long mdp_timer_duration
= (HZ
); /* 1 sec */
52 /* unsigned long mdp_mdp_timer_duration=0; */
54 boolean mdp_ppp_waiting
= FALSE
;
55 uint32 mdp_tv_underflow_cnt
;
56 uint32 mdp_lcdc_underflow_cnt
;
58 boolean mdp_current_clk_on
= FALSE
;
59 boolean mdp_is_in_isr
= FALSE
;
62 * legacy mdp_in_processing is only for DMA2-MDDI
63 * this applies to DMA2 block only
65 uint32 mdp_in_processing
= FALSE
;
67 #ifdef CONFIG_FB_MSM_MDP40
68 uint32 mdp_intr_mask
= MDP4_ANY_INTR_MASK
;
70 uint32 mdp_intr_mask
= MDP_ANY_INTR_MASK
;
73 MDP_BLOCK_TYPE mdp_debug
[MDP_MAX_BLOCK
];
75 int32 mdp_block_power_cnt
[MDP_MAX_BLOCK
];
77 spinlock_t mdp_spin_lock
;
78 struct workqueue_struct
*mdp_dma_wq
; /*mdp dma wq */
79 struct workqueue_struct
*mdp_vsync_wq
; /*mdp vsync wq */
81 static struct workqueue_struct
*mdp_pipe_ctrl_wq
; /* mdp mdp pipe ctrl wq */
82 static struct delayed_work mdp_pipe_ctrl_worker
;
84 #ifdef CONFIG_FB_MSM_MDP40
85 struct mdp_dma_data dma2_data
;
86 struct mdp_dma_data dma_s_data
;
87 struct mdp_dma_data dma_e_data
;
89 static struct mdp_dma_data dma2_data
;
90 static struct mdp_dma_data dma_s_data
;
91 static struct mdp_dma_data dma_e_data
;
93 static struct mdp_dma_data dma3_data
;
95 extern ktime_t mdp_dma2_last_update_time
;
97 extern uint32 mdp_dma2_update_time_in_usec
;
98 extern int mdp_lcd_rd_cnt_offset_slow
;
99 extern int mdp_lcd_rd_cnt_offset_fast
;
100 extern int mdp_usec_diff_threshold
;
102 #ifdef CONFIG_FB_MSM_LCDC
103 extern int mdp_lcdc_pclk_clk_rate
;
104 extern int mdp_lcdc_pad_pclk_clk_rate
;
105 extern int first_pixel_start_x
;
106 extern int first_pixel_start_y
;
109 #ifdef MSM_FB_ENABLE_DBGFS
110 struct dentry
*mdp_dir
;
113 #if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
114 static int mdp_suspend(struct platform_device
*pdev
, pm_message_t state
);
116 #define mdp_suspend NULL
119 struct timeval mdp_dma2_timeval
;
120 struct timeval mdp_ppp_timeval
;
122 #ifdef CONFIG_HAS_EARLYSUSPEND
123 static struct early_suspend early_suspend
;
126 #ifndef CONFIG_FB_MSM_MDP22
127 DEFINE_MUTEX(mdp_lut_push_sem
);
128 static int mdp_lut_i
;
129 static int mdp_lut_hw_update(struct fb_cmap
*cmap
)
139 for (i
= 0; i
< cmap
->len
; i
++) {
140 if (copy_from_user(&r
, cmap
->red
++, sizeof(r
)) ||
141 copy_from_user(&g
, cmap
->green
++, sizeof(g
)) ||
142 copy_from_user(&b
, cmap
->blue
++, sizeof(b
)))
145 #ifdef CONFIG_FB_MSM_MDP40
146 MDP_OUTP(MDP_BASE
+ 0x94800 +
148 MDP_OUTP(MDP_BASE
+ 0x93800 +
150 (0x400*mdp_lut_i
) + cmap
->start
*4 + i
*4,
153 ((r
& 0xff) << 16)));
159 static int mdp_lut_push
;
160 static int mdp_lut_push_i
;
161 static int mdp_lut_update_nonlcdc(struct fb_info
*info
, struct fb_cmap
*cmap
)
165 mdp_pipe_ctrl(MDP_CMD_BLOCK
, MDP_BLOCK_POWER_ON
, FALSE
);
166 ret
= mdp_lut_hw_update(cmap
);
167 mdp_pipe_ctrl(MDP_CMD_BLOCK
, MDP_BLOCK_POWER_OFF
, FALSE
);
172 mutex_lock(&mdp_lut_push_sem
);
174 mdp_lut_push_i
= mdp_lut_i
;
175 mutex_unlock(&mdp_lut_push_sem
);
177 mdp_lut_i
= (mdp_lut_i
+ 1)%2;
182 static int mdp_lut_update_lcdc(struct fb_info
*info
, struct fb_cmap
*cmap
)
186 mdp_pipe_ctrl(MDP_CMD_BLOCK
, MDP_BLOCK_POWER_ON
, FALSE
);
187 ret
= mdp_lut_hw_update(cmap
);
190 mdp_pipe_ctrl(MDP_CMD_BLOCK
, MDP_BLOCK_POWER_OFF
, FALSE
);
194 MDP_OUTP(MDP_BASE
+ 0x90070, (mdp_lut_i
<< 10) | 0x17);
195 mdp_pipe_ctrl(MDP_CMD_BLOCK
, MDP_BLOCK_POWER_OFF
, FALSE
);
196 mdp_lut_i
= (mdp_lut_i
+ 1)%2;
201 #define MDP_HIST_MAX_BIN 32
202 static __u32 mdp_hist_r
[MDP_HIST_MAX_BIN
];
203 static __u32 mdp_hist_g
[MDP_HIST_MAX_BIN
];
204 static __u32 mdp_hist_b
[MDP_HIST_MAX_BIN
];
206 #ifdef CONFIG_FB_MSM_MDP40
207 struct mdp_histogram mdp_hist
;
208 struct completion mdp_hist_comp
;
210 static struct mdp_histogram mdp_hist
;
211 static struct completion mdp_hist_comp
;
214 static int mdp_do_histogram(struct fb_info
*info
, struct mdp_histogram
*hist
)
218 if (!hist
->frame_cnt
|| (hist
->bin_cnt
== 0) ||
219 (hist
->bin_cnt
> MDP_HIST_MAX_BIN
))
222 INIT_COMPLETION(mdp_hist_comp
);
224 mdp_hist
.bin_cnt
= hist
->bin_cnt
;
225 mdp_hist
.r
= (hist
->r
) ? mdp_hist_r
: 0;
226 mdp_hist
.g
= (hist
->g
) ? mdp_hist_g
: 0;
227 mdp_hist
.b
= (hist
->b
) ? mdp_hist_b
: 0;
229 #ifdef CONFIG_FB_MSM_MDP40
230 MDP_OUTP(MDP_BASE
+ 0x95004, hist
->frame_cnt
);
231 MDP_OUTP(MDP_BASE
+ 0x95000, 1);
233 MDP_OUTP(MDP_BASE
+ 0x94004, hist
->frame_cnt
);
234 MDP_OUTP(MDP_BASE
+ 0x94000, 1);
236 wait_for_completion_killable(&mdp_hist_comp
);
239 ret
= copy_to_user(hist
->r
, mdp_hist
.r
, hist
->bin_cnt
*4);
244 ret
= copy_to_user(hist
->g
, mdp_hist
.g
, hist
->bin_cnt
*4);
249 ret
= copy_to_user(hist
->b
, mdp_hist
.b
, hist
->bin_cnt
*4);
256 printk(KERN_ERR
"%s: invalid hist buffer\n", __func__
);
261 /* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */
263 int mdp_ppp_pipe_wait(void)
267 /* wait 5 seconds for the operation to complete before declaring
270 if (mdp_ppp_waiting
== TRUE
) {
271 ret
= wait_for_completion_interruptible_timeout(&mdp_ppp_comp
,
275 printk(KERN_ERR
"%s: Timed out waiting for the MDP.\n",
282 static DEFINE_SPINLOCK(mdp_lock
);
283 static int mdp_irq_mask
;
284 static int mdp_irq_enabled
;
286 void mdp_enable_irq(uint32 term
)
288 unsigned long irq_flags
;
290 spin_lock_irqsave(&mdp_lock
, irq_flags
);
291 if (mdp_irq_mask
& term
) {
292 printk(KERN_ERR
"MDP IRQ term-0x%x is already set\n", term
);
294 mdp_irq_mask
|= term
;
295 if (mdp_irq_mask
&& !mdp_irq_enabled
) {
300 spin_unlock_irqrestore(&mdp_lock
, irq_flags
);
303 void mdp_disable_irq(uint32 term
)
305 unsigned long irq_flags
;
307 spin_lock_irqsave(&mdp_lock
, irq_flags
);
308 if (!(mdp_irq_mask
& term
)) {
309 printk(KERN_ERR
"MDP IRQ term-0x%x is not set\n", term
);
311 mdp_irq_mask
&= ~term
;
312 if (!mdp_irq_mask
&& mdp_irq_enabled
) {
314 disable_irq(INT_MDP
);
317 spin_unlock_irqrestore(&mdp_lock
, irq_flags
);
320 void mdp_disable_irq_nolock(uint32 term
)
323 if (!(mdp_irq_mask
& term
)) {
324 printk(KERN_ERR
"MDP IRQ term-0x%x is not set\n", term
);
326 mdp_irq_mask
&= ~term
;
327 if (!mdp_irq_mask
&& mdp_irq_enabled
) {
329 disable_irq(INT_MDP
);
334 void mdp_pipe_kickoff(uint32 term
, struct msm_fb_data_type
*mfd
)
337 dmb(); /* memory barrier */
339 /* kick off PPP engine */
340 if (term
== MDP_PPP_TERM
) {
341 if (mdp_debug
[MDP_PPP_BLOCK
])
342 jiffies_to_timeval(jiffies
, &mdp_ppp_timeval
);
344 /* let's turn on PPP block */
345 mdp_pipe_ctrl(MDP_PPP_BLOCK
, MDP_BLOCK_POWER_ON
, FALSE
);
347 mdp_enable_irq(term
);
348 INIT_COMPLETION(mdp_ppp_comp
);
349 mdp_ppp_waiting
= TRUE
;
350 outpdw(MDP_BASE
+ 0x30, 0x1000);
351 wait_for_completion_killable(&mdp_ppp_comp
);
352 mdp_disable_irq(term
);
354 if (mdp_debug
[MDP_PPP_BLOCK
]) {
357 jiffies_to_timeval(jiffies
, &now
);
358 mdp_ppp_timeval
.tv_usec
=
359 now
.tv_usec
- mdp_ppp_timeval
.tv_usec
;
360 MSM_FB_INFO("MDP-PPP: %d\n",
361 (int)mdp_ppp_timeval
.tv_usec
);
363 } else if (term
== MDP_DMA2_TERM
) {
364 if (mdp_debug
[MDP_DMA2_BLOCK
]) {
365 MSM_FB_INFO("MDP-DMA2: %d\n",
366 (int)mdp_dma2_timeval
.tv_usec
);
367 jiffies_to_timeval(jiffies
, &mdp_dma2_timeval
);
369 /* DMA update timestamp */
370 mdp_dma2_last_update_time
= ktime_get_real();
371 /* let's turn on DMA2 block */
372 #ifdef CONFIG_FB_MSM_MDP22
373 outpdw(MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0044, 0x0);/* start DMA */
376 mutex_lock(&mdp_lut_push_sem
);
378 MDP_OUTP(MDP_BASE
+ 0x90070,
379 (mdp_lut_push_i
<< 10) | 0x17);
380 mutex_unlock(&mdp_lut_push_sem
);
382 #ifdef CONFIG_FB_MSM_MDP40
383 outpdw(MDP_BASE
+ 0x000c, 0x0); /* start DMA */
385 outpdw(MDP_BASE
+ 0x0044, 0x0); /* start DMA */
388 #ifdef CONFIG_FB_MSM_MDP40
389 } else if (term
== MDP_DMA_S_TERM
) {
390 mdp_pipe_ctrl(MDP_DMA_S_BLOCK
, MDP_BLOCK_POWER_ON
, FALSE
);
391 outpdw(MDP_BASE
+ 0x0010, 0x0); /* start DMA */
392 } else if (term
== MDP_DMA_E_TERM
) {
393 mdp_pipe_ctrl(MDP_DMA_E_BLOCK
, MDP_BLOCK_POWER_ON
, FALSE
);
394 outpdw(MDP_BASE
+ 0x0014, 0x0); /* start DMA */
395 } else if (term
== MDP_OVERLAY0_TERM
) {
396 mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK
, MDP_BLOCK_POWER_ON
, FALSE
);
397 outpdw(MDP_BASE
+ 0x0004, 0);
398 } else if (term
== MDP_OVERLAY1_TERM
) {
399 mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK
, MDP_BLOCK_POWER_ON
, FALSE
);
400 outpdw(MDP_BASE
+ 0x0008, 0);
403 } else if (term
== MDP_DMA_S_TERM
) {
404 mdp_pipe_ctrl(MDP_DMA_S_BLOCK
, MDP_BLOCK_POWER_ON
, FALSE
);
405 outpdw(MDP_BASE
+ 0x0048, 0x0); /* start DMA */
410 static void mdp_pipe_ctrl_workqueue_handler(struct work_struct
*work
)
412 mdp_pipe_ctrl(MDP_MASTER_BLOCK
, MDP_BLOCK_POWER_OFF
, FALSE
);
415 void mdp_pipe_ctrl(MDP_BLOCK_TYPE block
, MDP_BLOCK_POWER_STATE state
,
418 boolean mdp_all_blocks_off
= TRUE
;
422 spin_lock_irqsave(&mdp_spin_lock
, flag
);
423 if (MDP_BLOCK_POWER_ON
== state
) {
424 mdp_block_power_cnt
[block
]++;
426 if (MDP_DMA2_BLOCK
== block
)
427 mdp_in_processing
= TRUE
;
429 mdp_block_power_cnt
[block
]--;
431 if (mdp_block_power_cnt
[block
] < 0) {
433 * Master has to serve a request to power off MDP always
434 * It also has a timer to power off. So, in case of
435 * timer expires first and DMA2 finishes later,
436 * master has to power off two times
437 * There shouldn't be multiple power-off request for
440 if (block
!= MDP_MASTER_BLOCK
) {
441 MSM_FB_INFO("mdp_block_power_cnt[block=%d] \
442 multiple power-off request\n", block
);
444 mdp_block_power_cnt
[block
] = 0;
447 if (MDP_DMA2_BLOCK
== block
)
448 mdp_in_processing
= FALSE
;
450 spin_unlock_irqrestore(&mdp_spin_lock
, flag
);
453 * If it's in isr, we send our request to workqueue.
454 * Otherwise, processing happens in the current context
457 /* checking all blocks power state */
458 for (i
= 0; i
< MDP_MAX_BLOCK
; i
++) {
459 if (mdp_block_power_cnt
[i
] > 0)
460 mdp_all_blocks_off
= FALSE
;
463 if ((mdp_all_blocks_off
) && (mdp_current_clk_on
)) {
464 /* send workqueue to turn off mdp power */
465 queue_delayed_work(mdp_pipe_ctrl_wq
,
466 &mdp_pipe_ctrl_worker
,
470 down(&mdp_pipe_ctrl_mutex
);
471 /* checking all blocks power state */
472 for (i
= 0; i
< MDP_MAX_BLOCK
; i
++) {
473 if (mdp_block_power_cnt
[i
] > 0)
474 mdp_all_blocks_off
= FALSE
;
478 * find out whether a delayable work item is currently
482 if (delayed_work_pending(&mdp_pipe_ctrl_worker
)) {
484 * try to cancel the current work if it fails to
485 * stop (which means del_timer can't delete it
486 * from the list, it's about to expire and run),
487 * we have to let it run. queue_delayed_work won't
488 * accept the next job which is same as
489 * queue_delayed_work(mdp_timer_duration = 0)
491 cancel_delayed_work(&mdp_pipe_ctrl_worker
);
494 if ((mdp_all_blocks_off
) && (mdp_current_clk_on
)) {
495 if (block
== MDP_MASTER_BLOCK
) {
496 mdp_current_clk_on
= FALSE
;
497 /* turn off MDP clks */
498 if (mdp_clk
!= NULL
) {
499 clk_disable(mdp_clk
);
500 MSM_FB_DEBUG("MDP CLK OFF\n");
502 if (mdp_pclk
!= NULL
) {
503 clk_disable(mdp_pclk
);
504 MSM_FB_DEBUG("MDP PCLK OFF\n");
507 /* send workqueue to turn off mdp power */
508 queue_delayed_work(mdp_pipe_ctrl_wq
,
509 &mdp_pipe_ctrl_worker
,
512 } else if ((!mdp_all_blocks_off
) && (!mdp_current_clk_on
)) {
513 mdp_current_clk_on
= TRUE
;
514 /* turn on MDP clks */
515 if (mdp_clk
!= NULL
) {
517 MSM_FB_DEBUG("MDP CLK ON\n");
519 if (mdp_pclk
!= NULL
) {
520 clk_enable(mdp_pclk
);
521 MSM_FB_DEBUG("MDP PCLK ON\n");
524 up(&mdp_pipe_ctrl_mutex
);
528 #ifndef CONFIG_FB_MSM_MDP40
529 irqreturn_t
mdp_isr(int irq
, void *ptr
)
531 uint32 mdp_interrupt
= 0;
532 struct mdp_dma_data
*dma
;
534 mdp_is_in_isr
= TRUE
;
536 mdp_interrupt
= inp32(MDP_INTR_STATUS
);
537 outp32(MDP_INTR_CLEAR
, mdp_interrupt
);
539 mdp_interrupt
&= mdp_intr_mask
;
541 if (mdp_interrupt
& TV_ENC_UNDERRUN
) {
542 mdp_interrupt
&= ~(TV_ENC_UNDERRUN
);
543 mdp_tv_underflow_cnt
++;
549 /* DMA3 TV-Out Start */
550 if (mdp_interrupt
& TV_OUT_DMA3_START
) {
551 /* let's disable TV out interrupt */
552 mdp_intr_mask
&= ~TV_OUT_DMA3_START
;
553 outp32(MDP_INTR_ENABLE
, mdp_intr_mask
);
557 dma
->waiting
= FALSE
;
558 complete(&dma
->comp
);
561 #ifndef CONFIG_FB_MSM_MDP22
562 if (mdp_interrupt
& MDP_HIST_DONE
) {
563 outp32(MDP_BASE
+ 0x94018, 0x3);
564 outp32(MDP_INTR_CLEAR
, MDP_HIST_DONE
);
566 memcpy(mdp_hist
.r
, MDP_BASE
+ 0x94100,
569 memcpy(mdp_hist
.g
, MDP_BASE
+ 0x94200,
572 memcpy(mdp_hist
.b
, MDP_BASE
+ 0x94300,
574 complete(&mdp_hist_comp
);
578 if (mdp_interrupt
& LCDC_UNDERFLOW
) {
579 mdp_lcdc_underflow_cnt
++;
581 /* LCDC Frame Start */
582 if (mdp_interrupt
& LCDC_FRAME_START
) {
583 /* let's disable LCDC interrupt */
584 mdp_intr_mask
&= ~LCDC_FRAME_START
;
585 outp32(MDP_INTR_ENABLE
, mdp_intr_mask
);
589 dma
->waiting
= FALSE
;
590 complete(&dma
->comp
);
594 /* DMA2 LCD-Out Complete */
595 if (mdp_interrupt
& MDP_DMA_S_DONE
) {
598 mdp_pipe_ctrl(MDP_DMA_S_BLOCK
, MDP_BLOCK_POWER_OFF
,
600 complete(&dma
->comp
);
604 /* DMA2 LCD-Out Complete */
605 if (mdp_interrupt
& MDP_DMA_P_DONE
) {
609 now_k
= ktime_get_real();
610 mdp_dma2_last_update_time
.tv
.sec
=
611 now_k
.tv
.sec
- mdp_dma2_last_update_time
.tv
.sec
;
612 mdp_dma2_last_update_time
.tv
.nsec
=
613 now_k
.tv
.nsec
- mdp_dma2_last_update_time
.tv
.nsec
;
615 if (mdp_debug
[MDP_DMA2_BLOCK
]) {
616 jiffies_to_timeval(jiffies
, &now
);
617 mdp_dma2_timeval
.tv_usec
=
618 now
.tv_usec
- mdp_dma2_timeval
.tv_usec
;
623 mdp_pipe_ctrl(MDP_DMA2_BLOCK
, MDP_BLOCK_POWER_OFF
,
625 complete(&dma
->comp
);
628 if (mdp_interrupt
& MDP_PPP_DONE
) {
629 #ifdef CONFIG_MDP_PPP_ASYNC_OP
632 mdp_pipe_ctrl(MDP_PPP_BLOCK
,
633 MDP_BLOCK_POWER_OFF
, TRUE
);
634 if (mdp_ppp_waiting
) {
635 mdp_ppp_waiting
= FALSE
;
636 complete(&mdp_ppp_comp
);
642 mdp_is_in_isr
= FALSE
;
648 static void mdp_drv_init(void)
652 for (i
= 0; i
< MDP_MAX_BLOCK
; i
++) {
656 /* initialize spin lock and workqueue */
657 spin_lock_init(&mdp_spin_lock
);
658 mdp_dma_wq
= create_singlethread_workqueue("mdp_dma_wq");
659 mdp_vsync_wq
= create_singlethread_workqueue("mdp_vsync_wq");
660 mdp_pipe_ctrl_wq
= create_singlethread_workqueue("mdp_pipe_ctrl_wq");
661 INIT_DELAYED_WORK(&mdp_pipe_ctrl_worker
,
662 mdp_pipe_ctrl_workqueue_handler
);
663 #ifdef CONFIG_MDP_PPP_ASYNC_OP
667 /* initialize semaphore */
668 init_completion(&mdp_ppp_comp
);
669 init_MUTEX(&mdp_ppp_mutex
);
670 init_MUTEX(&mdp_pipe_ctrl_mutex
);
672 dma2_data
.busy
= FALSE
;
673 dma2_data
.waiting
= FALSE
;
674 init_completion(&dma2_data
.comp
);
675 init_MUTEX(&dma2_data
.mutex
);
676 mutex_init(&dma2_data
.ov_mutex
);
678 dma3_data
.busy
= FALSE
;
679 dma3_data
.waiting
= FALSE
;
680 init_completion(&dma3_data
.comp
);
681 init_MUTEX(&dma3_data
.mutex
);
683 dma_s_data
.busy
= FALSE
;
684 dma_s_data
.waiting
= FALSE
;
685 init_completion(&dma_s_data
.comp
);
686 init_MUTEX(&dma_s_data
.mutex
);
688 dma_e_data
.busy
= FALSE
;
689 dma_e_data
.waiting
= FALSE
;
690 init_completion(&dma_e_data
.comp
);
692 #ifndef CONFIG_FB_MSM_MDP22
693 init_completion(&mdp_hist_comp
);
696 /* initializing mdp power block counter to 0 */
697 for (i
= 0; i
< MDP_MAX_BLOCK
; i
++) {
698 mdp_block_power_cnt
[i
] = 0;
701 #ifdef MSM_FB_ENABLE_DBGFS
704 char sub_name
[] = "mdp";
706 root
= msm_fb_get_debugfs_root();
708 mdp_dir
= debugfs_create_dir(sub_name
, root
);
711 msm_fb_debugfs_file_create(mdp_dir
,
712 "dma2_update_time_in_usec",
713 (u32
*) &mdp_dma2_update_time_in_usec
);
714 msm_fb_debugfs_file_create(mdp_dir
,
716 (u32
*) &mdp_lcd_rd_cnt_offset_slow
);
717 msm_fb_debugfs_file_create(mdp_dir
,
719 (u32
*) &mdp_lcd_rd_cnt_offset_fast
);
720 msm_fb_debugfs_file_create(mdp_dir
,
721 "mdp_usec_diff_threshold",
722 (u32
*) &mdp_usec_diff_threshold
);
723 msm_fb_debugfs_file_create(mdp_dir
,
724 "mdp_current_clk_on",
725 (u32
*) &mdp_current_clk_on
);
726 #ifdef CONFIG_FB_MSM_LCDC
727 msm_fb_debugfs_file_create(mdp_dir
,
729 (u32
*) &first_pixel_start_x
);
730 msm_fb_debugfs_file_create(mdp_dir
,
732 (u32
*) &first_pixel_start_y
);
733 msm_fb_debugfs_file_create(mdp_dir
,
734 "mdp_lcdc_pclk_clk_rate",
735 (u32
*) &mdp_lcdc_pclk_clk_rate
);
736 msm_fb_debugfs_file_create(mdp_dir
,
737 "mdp_lcdc_pad_pclk_clk_rate",
738 (u32
*) &mdp_lcdc_pad_pclk_clk_rate
);
746 static int mdp_probe(struct platform_device
*pdev
);
747 static int mdp_remove(struct platform_device
*pdev
);
749 static struct platform_driver mdp_driver
= {
751 .remove
= mdp_remove
,
752 #ifndef CONFIG_HAS_EARLYSUSPEND
753 .suspend
= mdp_suspend
,
759 * Driver name must match the device name added in
766 static int mdp_off(struct platform_device
*pdev
)
771 struct msm_fb_data_type
*mfd
= platform_get_drvdata(pdev
);
774 ret
= panel_next_off(pdev
);
777 mdp_hw_vsync_clk_disable(mfd
);
783 static int mdp_on(struct platform_device
*pdev
)
786 struct msm_fb_data_type
*mfd
= platform_get_drvdata(pdev
);
792 mdp_hw_vsync_clk_enable(mfd
);
795 ret
= panel_next_on(pdev
);
800 static int mdp_irq_clk_setup(void)
804 #ifdef CONFIG_FB_MSM_MDP40
805 ret
= request_irq(INT_MDP
, mdp4_isr
, IRQF_DISABLED
, "MDP", 0);
807 ret
= request_irq(INT_MDP
, mdp_isr
, IRQF_DISABLED
, "MDP", 0);
810 printk(KERN_ERR
"mdp request_irq() failed!\n");
813 disable_irq(INT_MDP
);
815 mdp_clk
= clk_get(NULL
, "mdp_clk");
817 if (IS_ERR(mdp_clk
)) {
818 ret
= PTR_ERR(mdp_clk
);
819 printk(KERN_ERR
"can't get mdp_clk error:%d!\n", ret
);
820 free_irq(INT_MDP
, 0);
824 mdp_pclk
= clk_get(NULL
, "mdp_pclk");
825 if (IS_ERR(mdp_pclk
))
829 #ifdef CONFIG_FB_MSM_MDP40
831 * mdp_clk should greater than mdp_pclk always
833 clk_set_rate(mdp_clk
, 122880000); /* 122.88 Mhz */
834 printk(KERN_INFO
"mdp_clk: mdp_clk=%d mdp_pclk=%d\n",
835 (int)clk_get_rate(mdp_clk
), (int)clk_get_rate(mdp_pclk
));
841 static struct platform_device
*pdev_list
[MSM_FB_MAX_DEV_LIST
];
842 static int pdev_list_cnt
;
843 static int mdp_resource_initialized
;
844 static struct msm_panel_common_pdata
*mdp_pdata
;
846 static int mdp_probe(struct platform_device
*pdev
)
848 struct platform_device
*msm_fb_dev
= NULL
;
849 struct msm_fb_data_type
*mfd
;
850 struct msm_fb_panel_data
*pdata
= NULL
;
852 resource_size_t size
;
853 #ifdef CONFIG_FB_MSM_MDP40
859 if ((pdev
->id
== 0) && (pdev
->num_resources
> 0)) {
860 mdp_pdata
= pdev
->dev
.platform_data
;
862 size
= resource_size(&pdev
->resource
[0]);
863 msm_mdp_base
= ioremap(pdev
->resource
[0].start
, size
);
865 MSM_FB_INFO("MDP HW Base phy_Address = 0x%x virt = 0x%x\n",
866 (int)pdev
->resource
[0].start
, (int)msm_mdp_base
);
868 if (unlikely(!msm_mdp_base
))
871 printk("irq clk setup\n");
872 rc
= mdp_irq_clk_setup();
873 printk("irq clk setup done\n");
877 /* initializing mdp hw */
878 #ifdef CONFIG_FB_MSM_MDP40
884 mdp_resource_initialized
= 1;
888 if (!mdp_resource_initialized
)
891 mfd
= platform_get_drvdata(pdev
);
896 if (mfd
->key
!= MFD_KEY
)
899 if (pdev_list_cnt
>= MSM_FB_MAX_DEV_LIST
)
902 msm_fb_dev
= platform_device_alloc("msm_fb", pdev
->id
);
906 /* link to the latest pdev */
907 mfd
->pdev
= msm_fb_dev
;
910 if (platform_device_add_data
911 (msm_fb_dev
, pdev
->dev
.platform_data
,
912 sizeof(struct msm_fb_panel_data
))) {
913 printk(KERN_ERR
"mdp_probe: platform_device_add_data failed!\n");
918 pdata
= msm_fb_dev
->dev
.platform_data
;
920 pdata
->off
= mdp_off
;
923 switch (mfd
->panel
.type
) {
927 INIT_WORK(&mfd
->dma_update_worker
,
928 mdp_lcd_update_workqueue_handler
);
929 INIT_WORK(&mfd
->vsync_resync_worker
,
930 mdp_vsync_resync_workqueue_handler
);
931 mfd
->hw_refresh
= FALSE
;
933 if (mfd
->panel
.type
== EXT_MDDI_PANEL
) {
934 /* 15 fps -> 66 msec */
935 mfd
->refresh_timer_duration
= (66 * HZ
/ 1000);
937 /* 24 fps -> 42 msec */
938 mfd
->refresh_timer_duration
= (42 * HZ
/ 1000);
941 #ifdef CONFIG_FB_MSM_MDP22
942 mfd
->dma_fnc
= mdp_dma2_update
;
943 mfd
->dma
= &dma2_data
;
945 if (mfd
->panel_info
.pdest
== DISPLAY_1
) {
946 #ifdef CONFIG_FB_MSM_OVERLAY
947 mfd
->dma_fnc
= mdp4_mddi_overlay
;
949 mfd
->dma_fnc
= mdp_dma2_update
;
951 mfd
->dma
= &dma2_data
;
952 mfd
->lut_update
= mdp_lut_update_nonlcdc
;
953 mfd
->do_histogram
= mdp_do_histogram
;
955 mfd
->dma_fnc
= mdp_dma_s_update
;
956 mfd
->dma
= &dma_s_data
;
960 mfd
->vsync_gpio
= mdp_pdata
->gpio
;
962 mfd
->vsync_gpio
= -1;
964 #ifdef CONFIG_FB_MSM_MDP40
965 if (mfd
->panel
.type
== EBI2_PANEL
)
970 if (mfd
->panel_info
.pdest
== DISPLAY_1
)
971 if_no
= PRIMARY_INTF_SEL
;
973 if_no
= SECONDARY_INTF_SEL
;
975 mdp4_display_intf_sel(if_no
, intf
);
977 mdp_config_vsync(mfd
);
982 pdata
->on
= mdp_lcdc_on
;
983 pdata
->off
= mdp_lcdc_off
;
984 mfd
->hw_refresh
= TRUE
;
985 mfd
->cursor_update
= mdp_hw_cursor_update
;
986 #ifndef CONFIG_FB_MSM_MDP22
987 mfd
->lut_update
= mdp_lut_update_lcdc
;
988 mfd
->do_histogram
= mdp_do_histogram
;
990 #ifdef CONFIG_FB_MSM_OVERLAY
991 mfd
->dma_fnc
= mdp4_lcdc_overlay
;
993 mfd
->dma_fnc
= mdp_lcdc_update
;
996 #ifdef CONFIG_FB_MSM_MDP40
997 if (mfd
->panel
.type
== HDMI_PANEL
) {
998 mfd
->dma
= &dma_e_data
;
999 mdp4_display_intf_sel(EXTERNAL_INTF_SEL
, LCDC_RGB_INTF
);
1001 mfd
->dma
= &dma2_data
;
1002 mdp4_display_intf_sel(PRIMARY_INTF_SEL
, LCDC_RGB_INTF
);
1005 mfd
->dma
= &dma2_data
;
1006 spin_lock_irqsave(&mdp_spin_lock
, flag
);
1007 mdp_intr_mask
&= ~MDP_DMA_P_DONE
;
1008 outp32(MDP_INTR_ENABLE
, mdp_intr_mask
);
1009 spin_unlock_irqrestore(&mdp_spin_lock
, flag
);
1014 pdata
->on
= mdp_dma3_on
;
1015 pdata
->off
= mdp_dma3_off
;
1016 mfd
->hw_refresh
= TRUE
;
1017 mfd
->dma_fnc
= mdp_dma3_update
;
1018 mfd
->dma
= &dma3_data
;
1022 printk(KERN_ERR
"mdp_probe: unknown device type!\n");
1027 /* set driver data */
1028 platform_set_drvdata(msm_fb_dev
, mfd
);
1030 rc
= platform_device_add(msm_fb_dev
);
1035 pdev_list
[pdev_list_cnt
++] = pdev
;
1039 platform_device_put(msm_fb_dev
);
1043 static void mdp_suspend_sub(void)
1045 /* cancel pipe ctrl worker */
1046 cancel_delayed_work(&mdp_pipe_ctrl_worker
);
1048 /* for workder can't be cancelled... */
1049 flush_workqueue(mdp_pipe_ctrl_wq
);
1051 /* let's wait for PPP completion */
1052 while (mdp_block_power_cnt
[MDP_PPP_BLOCK
] > 0) ;
1054 /* try to power down */
1055 mdp_pipe_ctrl(MDP_MASTER_BLOCK
, MDP_BLOCK_POWER_OFF
, FALSE
);
1058 #if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
1059 static int mdp_suspend(struct platform_device
*pdev
, pm_message_t state
)
1066 #ifdef CONFIG_HAS_EARLYSUSPEND
1067 static void mdp_early_suspend(struct early_suspend
*h
)
1073 static int mdp_remove(struct platform_device
*pdev
)
1075 iounmap(msm_mdp_base
);
1079 static int mdp_register_driver(void)
1081 #ifdef CONFIG_HAS_EARLYSUSPEND
1082 early_suspend
.level
= EARLY_SUSPEND_LEVEL_DISABLE_FB
- 1;
1083 early_suspend
.suspend
= mdp_early_suspend
;
1084 register_early_suspend(&early_suspend
);
1087 return platform_driver_register(&mdp_driver
);
1090 static int __init
mdp_driver_init(void)
1096 ret
= mdp_register_driver();
1098 printk(KERN_ERR
"mdp_register_driver() failed!\n");
1102 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_FB_MSM_MDP40)
1103 mdp4_debugfs_init();
1110 module_init(mdp_driver_init
);