GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / staging / msm / mdp.c
blob7cafef6ad01c8899f3b8dbd512b643140be3299c
1 /* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/time.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/spinlock.h>
25 #include <linux/hrtimer.h>
26 #include <linux/clk.h>
27 #include <mach/hardware.h>
28 #include <linux/io.h>
29 #include <linux/debugfs.h>
30 #include <linux/delay.h>
31 #include <linux/mutex.h>
33 #include <asm/system.h>
34 #include <asm/mach-types.h>
35 #include <linux/semaphore.h>
36 #include <linux/uaccess.h>
38 #include "mdp.h"
39 #include "msm_fb.h"
40 #ifdef CONFIG_FB_MSM_MDP40
41 #include "mdp4.h"
42 #endif
44 static struct clk *mdp_clk;
45 static struct clk *mdp_pclk;
47 struct completion mdp_ppp_comp;
48 struct semaphore mdp_ppp_mutex;
49 struct semaphore mdp_pipe_ctrl_mutex;
51 unsigned long mdp_timer_duration = (HZ); /* 1 sec */
52 /* unsigned long mdp_mdp_timer_duration=0; */
54 boolean mdp_ppp_waiting = FALSE;
55 uint32 mdp_tv_underflow_cnt;
56 uint32 mdp_lcdc_underflow_cnt;
58 boolean mdp_current_clk_on = FALSE;
59 boolean mdp_is_in_isr = FALSE;
62 * legacy mdp_in_processing is only for DMA2-MDDI
63 * this applies to DMA2 block only
65 uint32 mdp_in_processing = FALSE;
67 #ifdef CONFIG_FB_MSM_MDP40
68 uint32 mdp_intr_mask = MDP4_ANY_INTR_MASK;
69 #else
70 uint32 mdp_intr_mask = MDP_ANY_INTR_MASK;
71 #endif
73 MDP_BLOCK_TYPE mdp_debug[MDP_MAX_BLOCK];
75 int32 mdp_block_power_cnt[MDP_MAX_BLOCK];
77 spinlock_t mdp_spin_lock;
78 struct workqueue_struct *mdp_dma_wq; /*mdp dma wq */
79 struct workqueue_struct *mdp_vsync_wq; /*mdp vsync wq */
81 static struct workqueue_struct *mdp_pipe_ctrl_wq; /* mdp mdp pipe ctrl wq */
82 static struct delayed_work mdp_pipe_ctrl_worker;
84 #ifdef CONFIG_FB_MSM_MDP40
85 struct mdp_dma_data dma2_data;
86 struct mdp_dma_data dma_s_data;
87 struct mdp_dma_data dma_e_data;
88 #else
89 static struct mdp_dma_data dma2_data;
90 static struct mdp_dma_data dma_s_data;
91 static struct mdp_dma_data dma_e_data;
92 #endif
93 static struct mdp_dma_data dma3_data;
95 extern ktime_t mdp_dma2_last_update_time;
97 extern uint32 mdp_dma2_update_time_in_usec;
98 extern int mdp_lcd_rd_cnt_offset_slow;
99 extern int mdp_lcd_rd_cnt_offset_fast;
100 extern int mdp_usec_diff_threshold;
102 #ifdef CONFIG_FB_MSM_LCDC
103 extern int mdp_lcdc_pclk_clk_rate;
104 extern int mdp_lcdc_pad_pclk_clk_rate;
105 extern int first_pixel_start_x;
106 extern int first_pixel_start_y;
107 #endif
109 #ifdef MSM_FB_ENABLE_DBGFS
110 struct dentry *mdp_dir;
111 #endif
113 #if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
114 static int mdp_suspend(struct platform_device *pdev, pm_message_t state);
115 #else
116 #define mdp_suspend NULL
117 #endif
119 struct timeval mdp_dma2_timeval;
120 struct timeval mdp_ppp_timeval;
122 #ifdef CONFIG_HAS_EARLYSUSPEND
123 static struct early_suspend early_suspend;
124 #endif
126 #ifndef CONFIG_FB_MSM_MDP22
127 DEFINE_MUTEX(mdp_lut_push_sem);
128 static int mdp_lut_i;
129 static int mdp_lut_hw_update(struct fb_cmap *cmap)
131 int i;
132 u16 *c[3];
133 u16 r, g, b;
135 c[0] = cmap->green;
136 c[1] = cmap->blue;
137 c[2] = cmap->red;
139 for (i = 0; i < cmap->len; i++) {
140 if (copy_from_user(&r, cmap->red++, sizeof(r)) ||
141 copy_from_user(&g, cmap->green++, sizeof(g)) ||
142 copy_from_user(&b, cmap->blue++, sizeof(b)))
143 return -EFAULT;
145 #ifdef CONFIG_FB_MSM_MDP40
146 MDP_OUTP(MDP_BASE + 0x94800 +
147 #else
148 MDP_OUTP(MDP_BASE + 0x93800 +
149 #endif
150 (0x400*mdp_lut_i) + cmap->start*4 + i*4,
151 ((g & 0xff) |
152 ((b & 0xff) << 8) |
153 ((r & 0xff) << 16)));
156 return 0;
159 static int mdp_lut_push;
160 static int mdp_lut_push_i;
161 static int mdp_lut_update_nonlcdc(struct fb_info *info, struct fb_cmap *cmap)
163 int ret;
165 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
166 ret = mdp_lut_hw_update(cmap);
167 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
169 if (ret)
170 return ret;
172 mutex_lock(&mdp_lut_push_sem);
173 mdp_lut_push = 1;
174 mdp_lut_push_i = mdp_lut_i;
175 mutex_unlock(&mdp_lut_push_sem);
177 mdp_lut_i = (mdp_lut_i + 1)%2;
179 return 0;
182 static int mdp_lut_update_lcdc(struct fb_info *info, struct fb_cmap *cmap)
184 int ret;
186 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
187 ret = mdp_lut_hw_update(cmap);
189 if (ret) {
190 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
191 return ret;
194 MDP_OUTP(MDP_BASE + 0x90070, (mdp_lut_i << 10) | 0x17);
195 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
196 mdp_lut_i = (mdp_lut_i + 1)%2;
198 return 0;
201 #define MDP_HIST_MAX_BIN 32
202 static __u32 mdp_hist_r[MDP_HIST_MAX_BIN];
203 static __u32 mdp_hist_g[MDP_HIST_MAX_BIN];
204 static __u32 mdp_hist_b[MDP_HIST_MAX_BIN];
206 #ifdef CONFIG_FB_MSM_MDP40
207 struct mdp_histogram mdp_hist;
208 struct completion mdp_hist_comp;
209 #else
210 static struct mdp_histogram mdp_hist;
211 static struct completion mdp_hist_comp;
212 #endif
214 static int mdp_do_histogram(struct fb_info *info, struct mdp_histogram *hist)
216 int ret = 0;
218 if (!hist->frame_cnt || (hist->bin_cnt == 0) ||
219 (hist->bin_cnt > MDP_HIST_MAX_BIN))
220 return -EINVAL;
222 INIT_COMPLETION(mdp_hist_comp);
224 mdp_hist.bin_cnt = hist->bin_cnt;
225 mdp_hist.r = (hist->r) ? mdp_hist_r : 0;
226 mdp_hist.g = (hist->g) ? mdp_hist_g : 0;
227 mdp_hist.b = (hist->b) ? mdp_hist_b : 0;
229 #ifdef CONFIG_FB_MSM_MDP40
230 MDP_OUTP(MDP_BASE + 0x95004, hist->frame_cnt);
231 MDP_OUTP(MDP_BASE + 0x95000, 1);
232 #else
233 MDP_OUTP(MDP_BASE + 0x94004, hist->frame_cnt);
234 MDP_OUTP(MDP_BASE + 0x94000, 1);
235 #endif
236 wait_for_completion_killable(&mdp_hist_comp);
238 if (hist->r) {
239 ret = copy_to_user(hist->r, mdp_hist.r, hist->bin_cnt*4);
240 if (ret)
241 goto hist_err;
243 if (hist->g) {
244 ret = copy_to_user(hist->g, mdp_hist.g, hist->bin_cnt*4);
245 if (ret)
246 goto hist_err;
248 if (hist->b) {
249 ret = copy_to_user(hist->b, mdp_hist.b, hist->bin_cnt*4);
250 if (ret)
251 goto hist_err;
253 return 0;
255 hist_err:
256 printk(KERN_ERR "%s: invalid hist buffer\n", __func__);
257 return ret;
259 #endif
261 /* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */
263 int mdp_ppp_pipe_wait(void)
265 int ret = 1;
267 /* wait 5 seconds for the operation to complete before declaring
268 the MDP hung */
270 if (mdp_ppp_waiting == TRUE) {
271 ret = wait_for_completion_interruptible_timeout(&mdp_ppp_comp,
272 5 * HZ);
274 if (!ret)
275 printk(KERN_ERR "%s: Timed out waiting for the MDP.\n",
276 __func__);
279 return ret;
282 static DEFINE_SPINLOCK(mdp_lock);
283 static int mdp_irq_mask;
284 static int mdp_irq_enabled;
286 void mdp_enable_irq(uint32 term)
288 unsigned long irq_flags;
290 spin_lock_irqsave(&mdp_lock, irq_flags);
291 if (mdp_irq_mask & term) {
292 printk(KERN_ERR "MDP IRQ term-0x%x is already set\n", term);
293 } else {
294 mdp_irq_mask |= term;
295 if (mdp_irq_mask && !mdp_irq_enabled) {
296 mdp_irq_enabled = 1;
297 enable_irq(INT_MDP);
300 spin_unlock_irqrestore(&mdp_lock, irq_flags);
303 void mdp_disable_irq(uint32 term)
305 unsigned long irq_flags;
307 spin_lock_irqsave(&mdp_lock, irq_flags);
308 if (!(mdp_irq_mask & term)) {
309 printk(KERN_ERR "MDP IRQ term-0x%x is not set\n", term);
310 } else {
311 mdp_irq_mask &= ~term;
312 if (!mdp_irq_mask && mdp_irq_enabled) {
313 mdp_irq_enabled = 0;
314 disable_irq(INT_MDP);
317 spin_unlock_irqrestore(&mdp_lock, irq_flags);
320 void mdp_disable_irq_nolock(uint32 term)
323 if (!(mdp_irq_mask & term)) {
324 printk(KERN_ERR "MDP IRQ term-0x%x is not set\n", term);
325 } else {
326 mdp_irq_mask &= ~term;
327 if (!mdp_irq_mask && mdp_irq_enabled) {
328 mdp_irq_enabled = 0;
329 disable_irq(INT_MDP);
334 void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd)
337 dmb(); /* memory barrier */
339 /* kick off PPP engine */
340 if (term == MDP_PPP_TERM) {
341 if (mdp_debug[MDP_PPP_BLOCK])
342 jiffies_to_timeval(jiffies, &mdp_ppp_timeval);
344 /* let's turn on PPP block */
345 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
347 mdp_enable_irq(term);
348 INIT_COMPLETION(mdp_ppp_comp);
349 mdp_ppp_waiting = TRUE;
350 outpdw(MDP_BASE + 0x30, 0x1000);
351 wait_for_completion_killable(&mdp_ppp_comp);
352 mdp_disable_irq(term);
354 if (mdp_debug[MDP_PPP_BLOCK]) {
355 struct timeval now;
357 jiffies_to_timeval(jiffies, &now);
358 mdp_ppp_timeval.tv_usec =
359 now.tv_usec - mdp_ppp_timeval.tv_usec;
360 MSM_FB_INFO("MDP-PPP: %d\n",
361 (int)mdp_ppp_timeval.tv_usec);
363 } else if (term == MDP_DMA2_TERM) {
364 if (mdp_debug[MDP_DMA2_BLOCK]) {
365 MSM_FB_INFO("MDP-DMA2: %d\n",
366 (int)mdp_dma2_timeval.tv_usec);
367 jiffies_to_timeval(jiffies, &mdp_dma2_timeval);
369 /* DMA update timestamp */
370 mdp_dma2_last_update_time = ktime_get_real();
371 /* let's turn on DMA2 block */
372 #ifdef CONFIG_FB_MSM_MDP22
373 outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */
374 #else
375 if (mdp_lut_push) {
376 mutex_lock(&mdp_lut_push_sem);
377 mdp_lut_push = 0;
378 MDP_OUTP(MDP_BASE + 0x90070,
379 (mdp_lut_push_i << 10) | 0x17);
380 mutex_unlock(&mdp_lut_push_sem);
382 #ifdef CONFIG_FB_MSM_MDP40
383 outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */
384 #else
385 outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */
386 #endif
387 #endif
388 #ifdef CONFIG_FB_MSM_MDP40
389 } else if (term == MDP_DMA_S_TERM) {
390 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
391 outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */
392 } else if (term == MDP_DMA_E_TERM) {
393 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
394 outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */
395 } else if (term == MDP_OVERLAY0_TERM) {
396 mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
397 outpdw(MDP_BASE + 0x0004, 0);
398 } else if (term == MDP_OVERLAY1_TERM) {
399 mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
400 outpdw(MDP_BASE + 0x0008, 0);
402 #else
403 } else if (term == MDP_DMA_S_TERM) {
404 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
405 outpdw(MDP_BASE + 0x0048, 0x0); /* start DMA */
407 #endif
410 static void mdp_pipe_ctrl_workqueue_handler(struct work_struct *work)
412 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
415 void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
416 boolean isr)
418 boolean mdp_all_blocks_off = TRUE;
419 int i;
420 unsigned long flag;
422 spin_lock_irqsave(&mdp_spin_lock, flag);
423 if (MDP_BLOCK_POWER_ON == state) {
424 mdp_block_power_cnt[block]++;
426 if (MDP_DMA2_BLOCK == block)
427 mdp_in_processing = TRUE;
428 } else {
429 mdp_block_power_cnt[block]--;
431 if (mdp_block_power_cnt[block] < 0) {
433 * Master has to serve a request to power off MDP always
434 * It also has a timer to power off. So, in case of
435 * timer expires first and DMA2 finishes later,
436 * master has to power off two times
437 * There shouldn't be multiple power-off request for
438 * other blocks
440 if (block != MDP_MASTER_BLOCK) {
441 MSM_FB_INFO("mdp_block_power_cnt[block=%d] \
442 multiple power-off request\n", block);
444 mdp_block_power_cnt[block] = 0;
447 if (MDP_DMA2_BLOCK == block)
448 mdp_in_processing = FALSE;
450 spin_unlock_irqrestore(&mdp_spin_lock, flag);
453 * If it's in isr, we send our request to workqueue.
454 * Otherwise, processing happens in the current context
456 if (isr) {
457 /* checking all blocks power state */
458 for (i = 0; i < MDP_MAX_BLOCK; i++) {
459 if (mdp_block_power_cnt[i] > 0)
460 mdp_all_blocks_off = FALSE;
463 if ((mdp_all_blocks_off) && (mdp_current_clk_on)) {
464 /* send workqueue to turn off mdp power */
465 queue_delayed_work(mdp_pipe_ctrl_wq,
466 &mdp_pipe_ctrl_worker,
467 mdp_timer_duration);
469 } else {
470 down(&mdp_pipe_ctrl_mutex);
471 /* checking all blocks power state */
472 for (i = 0; i < MDP_MAX_BLOCK; i++) {
473 if (mdp_block_power_cnt[i] > 0)
474 mdp_all_blocks_off = FALSE;
478 * find out whether a delayable work item is currently
479 * pending
482 if (delayed_work_pending(&mdp_pipe_ctrl_worker)) {
484 * try to cancel the current work if it fails to
485 * stop (which means del_timer can't delete it
486 * from the list, it's about to expire and run),
487 * we have to let it run. queue_delayed_work won't
488 * accept the next job which is same as
489 * queue_delayed_work(mdp_timer_duration = 0)
491 cancel_delayed_work(&mdp_pipe_ctrl_worker);
494 if ((mdp_all_blocks_off) && (mdp_current_clk_on)) {
495 if (block == MDP_MASTER_BLOCK) {
496 mdp_current_clk_on = FALSE;
497 /* turn off MDP clks */
498 if (mdp_clk != NULL) {
499 clk_disable(mdp_clk);
500 MSM_FB_DEBUG("MDP CLK OFF\n");
502 if (mdp_pclk != NULL) {
503 clk_disable(mdp_pclk);
504 MSM_FB_DEBUG("MDP PCLK OFF\n");
506 } else {
507 /* send workqueue to turn off mdp power */
508 queue_delayed_work(mdp_pipe_ctrl_wq,
509 &mdp_pipe_ctrl_worker,
510 mdp_timer_duration);
512 } else if ((!mdp_all_blocks_off) && (!mdp_current_clk_on)) {
513 mdp_current_clk_on = TRUE;
514 /* turn on MDP clks */
515 if (mdp_clk != NULL) {
516 clk_enable(mdp_clk);
517 MSM_FB_DEBUG("MDP CLK ON\n");
519 if (mdp_pclk != NULL) {
520 clk_enable(mdp_pclk);
521 MSM_FB_DEBUG("MDP PCLK ON\n");
524 up(&mdp_pipe_ctrl_mutex);
528 #ifndef CONFIG_FB_MSM_MDP40
529 irqreturn_t mdp_isr(int irq, void *ptr)
531 uint32 mdp_interrupt = 0;
532 struct mdp_dma_data *dma;
534 mdp_is_in_isr = TRUE;
535 do {
536 mdp_interrupt = inp32(MDP_INTR_STATUS);
537 outp32(MDP_INTR_CLEAR, mdp_interrupt);
539 mdp_interrupt &= mdp_intr_mask;
541 if (mdp_interrupt & TV_ENC_UNDERRUN) {
542 mdp_interrupt &= ~(TV_ENC_UNDERRUN);
543 mdp_tv_underflow_cnt++;
546 if (!mdp_interrupt)
547 break;
549 /* DMA3 TV-Out Start */
550 if (mdp_interrupt & TV_OUT_DMA3_START) {
551 /* let's disable TV out interrupt */
552 mdp_intr_mask &= ~TV_OUT_DMA3_START;
553 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
555 dma = &dma3_data;
556 if (dma->waiting) {
557 dma->waiting = FALSE;
558 complete(&dma->comp);
561 #ifndef CONFIG_FB_MSM_MDP22
562 if (mdp_interrupt & MDP_HIST_DONE) {
563 outp32(MDP_BASE + 0x94018, 0x3);
564 outp32(MDP_INTR_CLEAR, MDP_HIST_DONE);
565 if (mdp_hist.r)
566 memcpy(mdp_hist.r, MDP_BASE + 0x94100,
567 mdp_hist.bin_cnt*4);
568 if (mdp_hist.g)
569 memcpy(mdp_hist.g, MDP_BASE + 0x94200,
570 mdp_hist.bin_cnt*4);
571 if (mdp_hist.b)
572 memcpy(mdp_hist.b, MDP_BASE + 0x94300,
573 mdp_hist.bin_cnt*4);
574 complete(&mdp_hist_comp);
577 /* LCDC UnderFlow */
578 if (mdp_interrupt & LCDC_UNDERFLOW) {
579 mdp_lcdc_underflow_cnt++;
581 /* LCDC Frame Start */
582 if (mdp_interrupt & LCDC_FRAME_START) {
583 /* let's disable LCDC interrupt */
584 mdp_intr_mask &= ~LCDC_FRAME_START;
585 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
587 dma = &dma2_data;
588 if (dma->waiting) {
589 dma->waiting = FALSE;
590 complete(&dma->comp);
594 /* DMA2 LCD-Out Complete */
595 if (mdp_interrupt & MDP_DMA_S_DONE) {
596 dma = &dma_s_data;
597 dma->busy = FALSE;
598 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_OFF,
599 TRUE);
600 complete(&dma->comp);
602 #endif
604 /* DMA2 LCD-Out Complete */
605 if (mdp_interrupt & MDP_DMA_P_DONE) {
606 struct timeval now;
607 ktime_t now_k;
609 now_k = ktime_get_real();
610 mdp_dma2_last_update_time.tv.sec =
611 now_k.tv.sec - mdp_dma2_last_update_time.tv.sec;
612 mdp_dma2_last_update_time.tv.nsec =
613 now_k.tv.nsec - mdp_dma2_last_update_time.tv.nsec;
615 if (mdp_debug[MDP_DMA2_BLOCK]) {
616 jiffies_to_timeval(jiffies, &now);
617 mdp_dma2_timeval.tv_usec =
618 now.tv_usec - mdp_dma2_timeval.tv_usec;
621 dma = &dma2_data;
622 dma->busy = FALSE;
623 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF,
624 TRUE);
625 complete(&dma->comp);
627 /* PPP Complete */
628 if (mdp_interrupt & MDP_PPP_DONE) {
629 #ifdef CONFIG_MDP_PPP_ASYNC_OP
630 mdp_ppp_djob_done();
631 #else
632 mdp_pipe_ctrl(MDP_PPP_BLOCK,
633 MDP_BLOCK_POWER_OFF, TRUE);
634 if (mdp_ppp_waiting) {
635 mdp_ppp_waiting = FALSE;
636 complete(&mdp_ppp_comp);
638 #endif
640 } while (1);
642 mdp_is_in_isr = FALSE;
644 return IRQ_HANDLED;
646 #endif
648 static void mdp_drv_init(void)
650 int i;
652 for (i = 0; i < MDP_MAX_BLOCK; i++) {
653 mdp_debug[i] = 0;
656 /* initialize spin lock and workqueue */
657 spin_lock_init(&mdp_spin_lock);
658 mdp_dma_wq = create_singlethread_workqueue("mdp_dma_wq");
659 mdp_vsync_wq = create_singlethread_workqueue("mdp_vsync_wq");
660 mdp_pipe_ctrl_wq = create_singlethread_workqueue("mdp_pipe_ctrl_wq");
661 INIT_DELAYED_WORK(&mdp_pipe_ctrl_worker,
662 mdp_pipe_ctrl_workqueue_handler);
663 #ifdef CONFIG_MDP_PPP_ASYNC_OP
664 mdp_ppp_dq_init();
665 #endif
667 /* initialize semaphore */
668 init_completion(&mdp_ppp_comp);
669 init_MUTEX(&mdp_ppp_mutex);
670 init_MUTEX(&mdp_pipe_ctrl_mutex);
672 dma2_data.busy = FALSE;
673 dma2_data.waiting = FALSE;
674 init_completion(&dma2_data.comp);
675 init_MUTEX(&dma2_data.mutex);
676 mutex_init(&dma2_data.ov_mutex);
678 dma3_data.busy = FALSE;
679 dma3_data.waiting = FALSE;
680 init_completion(&dma3_data.comp);
681 init_MUTEX(&dma3_data.mutex);
683 dma_s_data.busy = FALSE;
684 dma_s_data.waiting = FALSE;
685 init_completion(&dma_s_data.comp);
686 init_MUTEX(&dma_s_data.mutex);
688 dma_e_data.busy = FALSE;
689 dma_e_data.waiting = FALSE;
690 init_completion(&dma_e_data.comp);
692 #ifndef CONFIG_FB_MSM_MDP22
693 init_completion(&mdp_hist_comp);
694 #endif
696 /* initializing mdp power block counter to 0 */
697 for (i = 0; i < MDP_MAX_BLOCK; i++) {
698 mdp_block_power_cnt[i] = 0;
701 #ifdef MSM_FB_ENABLE_DBGFS
703 struct dentry *root;
704 char sub_name[] = "mdp";
706 root = msm_fb_get_debugfs_root();
707 if (root != NULL) {
708 mdp_dir = debugfs_create_dir(sub_name, root);
710 if (mdp_dir) {
711 msm_fb_debugfs_file_create(mdp_dir,
712 "dma2_update_time_in_usec",
713 (u32 *) &mdp_dma2_update_time_in_usec);
714 msm_fb_debugfs_file_create(mdp_dir,
715 "vs_rdcnt_slow",
716 (u32 *) &mdp_lcd_rd_cnt_offset_slow);
717 msm_fb_debugfs_file_create(mdp_dir,
718 "vs_rdcnt_fast",
719 (u32 *) &mdp_lcd_rd_cnt_offset_fast);
720 msm_fb_debugfs_file_create(mdp_dir,
721 "mdp_usec_diff_threshold",
722 (u32 *) &mdp_usec_diff_threshold);
723 msm_fb_debugfs_file_create(mdp_dir,
724 "mdp_current_clk_on",
725 (u32 *) &mdp_current_clk_on);
726 #ifdef CONFIG_FB_MSM_LCDC
727 msm_fb_debugfs_file_create(mdp_dir,
728 "lcdc_start_x",
729 (u32 *) &first_pixel_start_x);
730 msm_fb_debugfs_file_create(mdp_dir,
731 "lcdc_start_y",
732 (u32 *) &first_pixel_start_y);
733 msm_fb_debugfs_file_create(mdp_dir,
734 "mdp_lcdc_pclk_clk_rate",
735 (u32 *) &mdp_lcdc_pclk_clk_rate);
736 msm_fb_debugfs_file_create(mdp_dir,
737 "mdp_lcdc_pad_pclk_clk_rate",
738 (u32 *) &mdp_lcdc_pad_pclk_clk_rate);
739 #endif
743 #endif
746 static int mdp_probe(struct platform_device *pdev);
747 static int mdp_remove(struct platform_device *pdev);
749 static struct platform_driver mdp_driver = {
750 .probe = mdp_probe,
751 .remove = mdp_remove,
752 #ifndef CONFIG_HAS_EARLYSUSPEND
753 .suspend = mdp_suspend,
754 .resume = NULL,
755 #endif
756 .shutdown = NULL,
757 .driver = {
759 * Driver name must match the device name added in
760 * platform.c.
762 .name = "mdp",
766 static int mdp_off(struct platform_device *pdev)
768 int ret = 0;
770 #ifdef MDP_HW_VSYNC
771 struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
772 #endif
774 ret = panel_next_off(pdev);
776 #ifdef MDP_HW_VSYNC
777 mdp_hw_vsync_clk_disable(mfd);
778 #endif
780 return ret;
783 static int mdp_on(struct platform_device *pdev)
785 #ifdef MDP_HW_VSYNC
786 struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
787 #endif
789 int ret = 0;
791 #ifdef MDP_HW_VSYNC
792 mdp_hw_vsync_clk_enable(mfd);
793 #endif
795 ret = panel_next_on(pdev);
797 return ret;
800 static int mdp_irq_clk_setup(void)
802 int ret;
804 #ifdef CONFIG_FB_MSM_MDP40
805 ret = request_irq(INT_MDP, mdp4_isr, IRQF_DISABLED, "MDP", 0);
806 #else
807 ret = request_irq(INT_MDP, mdp_isr, IRQF_DISABLED, "MDP", 0);
808 #endif
809 if (ret) {
810 printk(KERN_ERR "mdp request_irq() failed!\n");
811 return ret;
813 disable_irq(INT_MDP);
815 mdp_clk = clk_get(NULL, "mdp_clk");
817 if (IS_ERR(mdp_clk)) {
818 ret = PTR_ERR(mdp_clk);
819 printk(KERN_ERR "can't get mdp_clk error:%d!\n", ret);
820 free_irq(INT_MDP, 0);
821 return ret;
824 mdp_pclk = clk_get(NULL, "mdp_pclk");
825 if (IS_ERR(mdp_pclk))
826 mdp_pclk = NULL;
829 #ifdef CONFIG_FB_MSM_MDP40
831 * mdp_clk should greater than mdp_pclk always
833 clk_set_rate(mdp_clk, 122880000); /* 122.88 Mhz */
834 printk(KERN_INFO "mdp_clk: mdp_clk=%d mdp_pclk=%d\n",
835 (int)clk_get_rate(mdp_clk), (int)clk_get_rate(mdp_pclk));
836 #endif
838 return 0;
841 static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
842 static int pdev_list_cnt;
843 static int mdp_resource_initialized;
844 static struct msm_panel_common_pdata *mdp_pdata;
846 static int mdp_probe(struct platform_device *pdev)
848 struct platform_device *msm_fb_dev = NULL;
849 struct msm_fb_data_type *mfd;
850 struct msm_fb_panel_data *pdata = NULL;
851 int rc;
852 resource_size_t size ;
853 #ifdef CONFIG_FB_MSM_MDP40
854 int intf, if_no;
855 #else
856 unsigned long flag;
857 #endif
859 if ((pdev->id == 0) && (pdev->num_resources > 0)) {
860 mdp_pdata = pdev->dev.platform_data;
862 size = resource_size(&pdev->resource[0]);
863 msm_mdp_base = ioremap(pdev->resource[0].start, size);
865 MSM_FB_INFO("MDP HW Base phy_Address = 0x%x virt = 0x%x\n",
866 (int)pdev->resource[0].start, (int)msm_mdp_base);
868 if (unlikely(!msm_mdp_base))
869 return -ENOMEM;
871 printk("irq clk setup\n");
872 rc = mdp_irq_clk_setup();
873 printk("irq clk setup done\n");
874 if (rc)
875 return rc;
877 /* initializing mdp hw */
878 #ifdef CONFIG_FB_MSM_MDP40
879 mdp4_hw_init();
880 #else
881 mdp_hw_init();
882 #endif
884 mdp_resource_initialized = 1;
885 return 0;
888 if (!mdp_resource_initialized)
889 return -EPERM;
891 mfd = platform_get_drvdata(pdev);
893 if (!mfd)
894 return -ENODEV;
896 if (mfd->key != MFD_KEY)
897 return -EINVAL;
899 if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
900 return -ENOMEM;
902 msm_fb_dev = platform_device_alloc("msm_fb", pdev->id);
903 if (!msm_fb_dev)
904 return -ENOMEM;
906 /* link to the latest pdev */
907 mfd->pdev = msm_fb_dev;
909 /* add panel data */
910 if (platform_device_add_data
911 (msm_fb_dev, pdev->dev.platform_data,
912 sizeof(struct msm_fb_panel_data))) {
913 printk(KERN_ERR "mdp_probe: platform_device_add_data failed!\n");
914 rc = -ENOMEM;
915 goto mdp_probe_err;
917 /* data chain */
918 pdata = msm_fb_dev->dev.platform_data;
919 pdata->on = mdp_on;
920 pdata->off = mdp_off;
921 pdata->next = pdev;
923 switch (mfd->panel.type) {
924 case EXT_MDDI_PANEL:
925 case MDDI_PANEL:
926 case EBI2_PANEL:
927 INIT_WORK(&mfd->dma_update_worker,
928 mdp_lcd_update_workqueue_handler);
929 INIT_WORK(&mfd->vsync_resync_worker,
930 mdp_vsync_resync_workqueue_handler);
931 mfd->hw_refresh = FALSE;
933 if (mfd->panel.type == EXT_MDDI_PANEL) {
934 /* 15 fps -> 66 msec */
935 mfd->refresh_timer_duration = (66 * HZ / 1000);
936 } else {
937 /* 24 fps -> 42 msec */
938 mfd->refresh_timer_duration = (42 * HZ / 1000);
941 #ifdef CONFIG_FB_MSM_MDP22
942 mfd->dma_fnc = mdp_dma2_update;
943 mfd->dma = &dma2_data;
944 #else
945 if (mfd->panel_info.pdest == DISPLAY_1) {
946 #ifdef CONFIG_FB_MSM_OVERLAY
947 mfd->dma_fnc = mdp4_mddi_overlay;
948 #else
949 mfd->dma_fnc = mdp_dma2_update;
950 #endif
951 mfd->dma = &dma2_data;
952 mfd->lut_update = mdp_lut_update_nonlcdc;
953 mfd->do_histogram = mdp_do_histogram;
954 } else {
955 mfd->dma_fnc = mdp_dma_s_update;
956 mfd->dma = &dma_s_data;
958 #endif
959 if (mdp_pdata)
960 mfd->vsync_gpio = mdp_pdata->gpio;
961 else
962 mfd->vsync_gpio = -1;
964 #ifdef CONFIG_FB_MSM_MDP40
965 if (mfd->panel.type == EBI2_PANEL)
966 intf = EBI2_INTF;
967 else
968 intf = MDDI_INTF;
970 if (mfd->panel_info.pdest == DISPLAY_1)
971 if_no = PRIMARY_INTF_SEL;
972 else
973 if_no = SECONDARY_INTF_SEL;
975 mdp4_display_intf_sel(if_no, intf);
976 #endif
977 mdp_config_vsync(mfd);
978 break;
980 case HDMI_PANEL:
981 case LCDC_PANEL:
982 pdata->on = mdp_lcdc_on;
983 pdata->off = mdp_lcdc_off;
984 mfd->hw_refresh = TRUE;
985 mfd->cursor_update = mdp_hw_cursor_update;
986 #ifndef CONFIG_FB_MSM_MDP22
987 mfd->lut_update = mdp_lut_update_lcdc;
988 mfd->do_histogram = mdp_do_histogram;
989 #endif
990 #ifdef CONFIG_FB_MSM_OVERLAY
991 mfd->dma_fnc = mdp4_lcdc_overlay;
992 #else
993 mfd->dma_fnc = mdp_lcdc_update;
994 #endif
996 #ifdef CONFIG_FB_MSM_MDP40
997 if (mfd->panel.type == HDMI_PANEL) {
998 mfd->dma = &dma_e_data;
999 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, LCDC_RGB_INTF);
1000 } else {
1001 mfd->dma = &dma2_data;
1002 mdp4_display_intf_sel(PRIMARY_INTF_SEL, LCDC_RGB_INTF);
1004 #else
1005 mfd->dma = &dma2_data;
1006 spin_lock_irqsave(&mdp_spin_lock, flag);
1007 mdp_intr_mask &= ~MDP_DMA_P_DONE;
1008 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
1009 spin_unlock_irqrestore(&mdp_spin_lock, flag);
1010 #endif
1011 break;
1013 case TV_PANEL:
1014 pdata->on = mdp_dma3_on;
1015 pdata->off = mdp_dma3_off;
1016 mfd->hw_refresh = TRUE;
1017 mfd->dma_fnc = mdp_dma3_update;
1018 mfd->dma = &dma3_data;
1019 break;
1021 default:
1022 printk(KERN_ERR "mdp_probe: unknown device type!\n");
1023 rc = -ENODEV;
1024 goto mdp_probe_err;
1027 /* set driver data */
1028 platform_set_drvdata(msm_fb_dev, mfd);
1030 rc = platform_device_add(msm_fb_dev);
1031 if (rc) {
1032 goto mdp_probe_err;
1035 pdev_list[pdev_list_cnt++] = pdev;
1036 return 0;
1038 mdp_probe_err:
1039 platform_device_put(msm_fb_dev);
1040 return rc;
1043 static void mdp_suspend_sub(void)
1045 /* cancel pipe ctrl worker */
1046 cancel_delayed_work(&mdp_pipe_ctrl_worker);
1048 /* for workder can't be cancelled... */
1049 flush_workqueue(mdp_pipe_ctrl_wq);
1051 /* let's wait for PPP completion */
1052 while (mdp_block_power_cnt[MDP_PPP_BLOCK] > 0) ;
1054 /* try to power down */
1055 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1058 #if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
1059 static int mdp_suspend(struct platform_device *pdev, pm_message_t state)
1061 mdp_suspend_sub();
1062 return 0;
1064 #endif
1066 #ifdef CONFIG_HAS_EARLYSUSPEND
1067 static void mdp_early_suspend(struct early_suspend *h)
1069 mdp_suspend_sub();
1071 #endif
1073 static int mdp_remove(struct platform_device *pdev)
1075 iounmap(msm_mdp_base);
1076 return 0;
1079 static int mdp_register_driver(void)
1081 #ifdef CONFIG_HAS_EARLYSUSPEND
1082 early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1;
1083 early_suspend.suspend = mdp_early_suspend;
1084 register_early_suspend(&early_suspend);
1085 #endif
1087 return platform_driver_register(&mdp_driver);
1090 static int __init mdp_driver_init(void)
1092 int ret;
1094 mdp_drv_init();
1096 ret = mdp_register_driver();
1097 if (ret) {
1098 printk(KERN_ERR "mdp_register_driver() failed!\n");
1099 return ret;
1102 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_FB_MSM_MDP40)
1103 mdp4_debugfs_init();
1104 #endif
1106 return 0;
1110 module_init(mdp_driver_init);