1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2008 by Akio Idehara, Andrew Mahone
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
23 * Implementation of area average and linear row and vertical scalers, and
24 * nearest-neighbor grey scaler (C) 2008 Andrew Mahone
26 * All files in this archive are subject to the GNU General Public License.
27 * See the file COPYING in the source tree root for full license agreement.
29 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
30 * KIND, either express or implied.
32 ****************************************************************************/
45 #ifdef HAVE_REMOTE_LCD
46 #include "lcd-remote.h"
48 #ifdef ROCKBOX_DEBUG_SCALERS
49 #define SDEBUGF DEBUGF
62 #include <jpeg_load.h>
64 #if CONFIG_CPU == SH7034
65 /* 16*16->32 bit multiplication is a single instrcution on the SH1 */
66 #define MULUQ(a, b) ((uint32_t) (((uint16_t) (a)) * ((uint16_t) (b))))
67 #define MULQ(a, b) ((int32_t) (((int16_t) (a)) * ((int16_t) (b))))
69 #define MULUQ(a, b) ((a) * (b))
70 #define MULQ(a, b) ((a) * (b))
74 #define CHANNEL_BYTES (sizeof(struct uint32_argb)/sizeof(uint32_t))
76 #define CHANNEL_BYTES (sizeof(uint32_t)/sizeof(uint32_t)) /* packed */
79 /* calculate the maximum dimensions which will preserve the aspect ration of
80 src while fitting in the constraints passed in dst, and store result in dst,
81 returning 0 if rounding and 1 if not rounding.
83 int recalc_dimension(struct dim
*dst
, struct dim
*src
)
85 /* This only looks backwards. The input image size is being pre-scaled by
86 * the inverse of the pixel aspect ratio, so that once the size it scaled
87 * to meet the output constraints, the scaled image will have appropriate
90 int sw
= src
->width
* LCD_PIXEL_ASPECT_HEIGHT
;
91 int sh
= src
->height
* LCD_PIXEL_ASPECT_WIDTH
;
94 dst
->width
= LCD_WIDTH
;
96 dst
->height
= LCD_HEIGHT
;
98 if (dst
->width
> sw
|| dst
->height
> sh
)
103 if (sw
== dst
->width
&& sh
== dst
->height
)
106 tmp
= (sw
* dst
->height
+ (sh
>> 1)) / sh
;
107 if (tmp
> dst
->width
)
108 dst
->height
= (sh
* dst
->width
+ (sw
>> 1)) / sw
;
111 return src
->width
== dst
->width
&& src
->height
== dst
->height
;
114 /* All of these scalers use variations of Bresenham's algorithm to convert from
115 their input to output coordinates. The error value is shifted from the
116 "classic" version such that it is a useful input to the scaling calculation.
119 #ifdef HAVE_LCD_COLOR
120 /* dither + pack on channel of RGB565, R an B share a packing macro */
121 #define PACKRB(v, delta) ((31 * v + (v >> 3) + delta) >> 8)
122 #define PACKG(g, delta) ((63 * g + (g >> 2) + delta) >> 8)
125 /* read new img_part unconditionally, return false on failure */
126 #define FILL_BUF_INIT(img_part, store_part, args) { \
127 img_part = store_part(args); \
128 if (img_part == NULL) \
132 /* read new img_part if current one is empty, return false on failure */
133 #define FILL_BUF(img_part, store_part, args) { \
134 if (img_part->len == 0) \
135 img_part = store_part(args); \
136 if (img_part == NULL) \
140 #if defined(CPU_COLDFIRE)
141 #define MAC(op1, op2, num) \
143 "mac.l %0, %1, %%acc" #num \
145 : "%d" (op1), "d" (op2)\
147 #define MAC_OUT(dest, num) \
149 "movclr.l %%acc" #num ", %0" \
152 #elif defined(CPU_SH)
153 /* calculate the 32-bit product of unsigned 16-bit op1 and op2 */
154 static inline int32_t mul_s16_s16(int16_t op1
, int16_t op2
)
156 return (int32_t)(op1
* op2
);
159 /* calculate the 32-bit product of signed 16-bit op1 and op2 */
160 static inline uint32_t mul_u16_u16(uint16_t op1
, uint16_t op2
)
162 return (uint32_t)(op1
* op2
);
166 /* horizontal area average scaler */
167 static bool scale_h_area(void *out_line_ptr
,
168 struct scaler_context
*ctx
, bool accum
)
170 SDEBUGF("scale_h_area\n");
171 unsigned int ix
, ox
, oxe
, mul
;
172 #if defined(CPU_SH) || defined (TEST_SH_MATH)
173 const uint32_t h_i_val
= ctx
->src
->width
,
174 h_o_val
= ctx
->bm
->width
;
176 const uint32_t h_i_val
= ctx
->h_i_val
,
177 h_o_val
= ctx
->h_o_val
;
179 #ifdef HAVE_LCD_COLOR
180 struct uint32_argb rgbvalacc
= { 0, 0, 0, 0 },
181 rgbvaltmp
= { 0, 0, 0, 0 },
182 *out_line
= (struct uint32_argb
*)out_line_ptr
;
184 uint32_t acc
= 0, tmp
= 0, *out_line
= (uint32_t*)out_line_ptr
;
186 struct img_part
*part
;
187 FILL_BUF_INIT(part
,ctx
->store_part
,ctx
->args
);
191 /* give other tasks a chance to run */
193 for (ix
= 0; ix
< (unsigned int)ctx
->src
->width
; ix
++)
196 /* end of current area has been reached */
197 /* fill buffer if needed */
198 FILL_BUF(part
,ctx
->store_part
,ctx
->args
);
199 #ifdef HAVE_LCD_COLOR
202 /* "reset" error, which now represents partial coverage of next
203 pixel by the next area
207 #if defined(CPU_COLDFIRE)
208 /* Coldfire EMAC math */
209 /* add saved partial pixel from start of area */
210 MAC(rgbvalacc
.r
, h_o_val
, 0);
211 MAC(rgbvalacc
.g
, h_o_val
, 1);
212 MAC(rgbvalacc
.b
, h_o_val
, 2);
213 MAC(rgbvalacc
.a
, h_o_val
, 3);
214 MAC(rgbvaltmp
.r
, mul
, 0);
215 MAC(rgbvaltmp
.g
, mul
, 1);
216 MAC(rgbvaltmp
.b
, mul
, 2);
217 MAC(rgbvaltmp
.a
, mul
, 3);
218 /* get new pixel , then add its partial coverage to this area */
220 rgbvaltmp
.r
= part
->buf
->red
;
221 rgbvaltmp
.g
= part
->buf
->green
;
222 rgbvaltmp
.b
= part
->buf
->blue
;
223 rgbvaltmp
.a
= part
->buf
->alpha
;
224 MAC(rgbvaltmp
.r
, mul
, 0);
225 MAC(rgbvaltmp
.g
, mul
, 1);
226 MAC(rgbvaltmp
.b
, mul
, 2);
227 MAC(rgbvaltmp
.a
, mul
, 3);
228 MAC_OUT(rgbvalacc
.r
, 0);
229 MAC_OUT(rgbvalacc
.g
, 1);
230 MAC_OUT(rgbvalacc
.b
, 2);
231 MAC_OUT(rgbvalacc
.a
, 3);
234 /* add saved partial pixel from start of area */
235 rgbvalacc
.r
= rgbvalacc
.r
* h_o_val
+ rgbvaltmp
.r
* mul
;
236 rgbvalacc
.g
= rgbvalacc
.g
* h_o_val
+ rgbvaltmp
.g
* mul
;
237 rgbvalacc
.b
= rgbvalacc
.b
* h_o_val
+ rgbvaltmp
.b
* mul
;
238 rgbvalacc
.a
= rgbvalacc
.a
* h_o_val
+ rgbvaltmp
.a
* mul
;
240 /* get new pixel , then add its partial coverage to this area */
241 rgbvaltmp
.r
= part
->buf
->red
;
242 rgbvaltmp
.g
= part
->buf
->green
;
243 rgbvaltmp
.b
= part
->buf
->blue
;
244 rgbvaltmp
.a
= part
->buf
->alpha
;
246 rgbvalacc
.r
+= rgbvaltmp
.r
* mul
;
247 rgbvalacc
.g
+= rgbvaltmp
.g
* mul
;
248 rgbvalacc
.b
+= rgbvaltmp
.b
* mul
;
249 rgbvalacc
.a
+= rgbvaltmp
.a
* mul
;
251 rgbvalacc
.r
= (rgbvalacc
.r
+ (1 << 21)) >> 22;
252 rgbvalacc
.g
= (rgbvalacc
.g
+ (1 << 21)) >> 22;
253 rgbvalacc
.b
= (rgbvalacc
.b
+ (1 << 21)) >> 22;
254 rgbvalacc
.a
= (rgbvalacc
.a
+ (1 << 21)) >> 22;
255 /* store or accumulate to output row */
258 rgbvalacc
.r
+= out_line
[ox
].r
;
259 rgbvalacc
.g
+= out_line
[ox
].g
;
260 rgbvalacc
.b
+= out_line
[ox
].b
;
261 rgbvalacc
.a
+= out_line
[ox
].a
;
263 out_line
[ox
].r
= rgbvalacc
.r
;
264 out_line
[ox
].g
= rgbvalacc
.g
;
265 out_line
[ox
].b
= rgbvalacc
.b
;
266 out_line
[ox
].a
= rgbvalacc
.a
;
267 /* reset accumulator */
276 /* add pixel value to accumulator */
277 rgbvalacc
.r
+= part
->buf
->red
;
278 rgbvalacc
.g
+= part
->buf
->green
;
279 rgbvalacc
.b
+= part
->buf
->blue
;
280 rgbvalacc
.a
+= part
->buf
->alpha
;
285 /* "reset" error, which now represents partial coverage of next
286 pixel by the next area
289 #if defined(CPU_COLDFIRE)
290 /* Coldfire EMAC math */
291 /* add saved partial pixel from start of area */
292 MAC(acc
, h_o_val
, 0);
294 /* get new pixel , then add its partial coverage to this area */
299 #elif defined(CPU_SH)
300 /* SH-1 16x16->32 math */
301 /* add saved partial pixel from start of area */
302 acc
= mul_u16_u16(acc
, h_o_val
) + mul_u16_u16(tmp
, mul
);
304 /* get new pixel , then add its partial coverage to this area */
307 acc
+= mul_u16_u16(tmp
, mul
);
310 /* add saved partial pixel from start of area */
311 acc
= (acc
* h_o_val
) + (tmp
* mul
);
313 /* get new pixel , then add its partial coverage to this area */
318 #if !(defined(CPU_SH) || defined(TEST_SH_MATH))
319 /* round, divide, and either store or accumulate to output row */
320 acc
= (acc
+ (1 << 21)) >> 22;
327 /* reset accumulator */
333 /* add pixel value to accumulator */
343 /* vertical area average scaler */
344 static inline bool scale_v_area(struct rowset
*rset
, struct scaler_context
*ctx
)
346 uint32_t mul
, oy
, iy
, oye
;
347 #if defined(CPU_SH) || defined (TEST_SH_MATH)
348 const uint32_t v_i_val
= ctx
->src
->height
,
349 v_o_val
= ctx
->bm
->height
;
351 const uint32_t v_i_val
= ctx
->v_i_val
,
352 v_o_val
= ctx
->v_o_val
;
355 /* Set up rounding and scale factors */
359 uint32_t *rowacc
= (uint32_t *) ctx
->buf
,
360 *rowtmp
= rowacc
+ ctx
->bm
->width
* CHANNEL_BYTES
,
361 *rowacc_px
, *rowtmp_px
;
362 memset((void *)ctx
->buf
, 0, ctx
->bm
->width
* 2 * sizeof(uint32_t)*CHANNEL_BYTES
);
363 SDEBUGF("scale_v_area\n");
364 /* zero the accumulator and temp rows */
365 for (iy
= 0; iy
< (unsigned int)ctx
->src
->height
; iy
++)
368 /* end of current area has been reached */
371 /* "reset" error, which now represents partial coverage of the next
375 /* add stored partial row to accumulator */
376 for(rowacc_px
= rowacc
, rowtmp_px
= rowtmp
; rowacc_px
!= rowtmp
;
377 rowacc_px
++, rowtmp_px
++)
378 *rowacc_px
= *rowacc_px
* v_o_val
+ *rowtmp_px
* mul
;
379 /* store new scaled row in temp row */
380 if(!ctx
->h_scaler(rowtmp
, ctx
, false))
382 /* add partial coverage by new row to this area, then round and
386 for(rowacc_px
= rowacc
, rowtmp_px
= rowtmp
; rowacc_px
!= rowtmp
;
387 rowacc_px
++, rowtmp_px
++)
388 *rowacc_px
+= mul
* *rowtmp_px
;
389 ctx
->output_row(oy
, (void*)rowacc
, ctx
);
390 /* clear accumulator row, store partial coverage for next row */
391 memset((void *)rowacc
, 0, ctx
->bm
->width
* sizeof(uint32_t) * CHANNEL_BYTES
);
396 /* accumulate new scaled row to rowacc */
397 if (!ctx
->h_scaler(rowacc
, ctx
, true))
405 /* horizontal linear scaler */
406 static bool scale_h_linear(void *out_line_ptr
, struct scaler_context
*ctx
,
409 unsigned int ix
, ox
, ixe
;
410 #if defined(CPU_SH) || defined (TEST_SH_MATH)
411 const uint32_t h_i_val
= ctx
->src
->width
- 1,
412 h_o_val
= ctx
->bm
->width
- 1;
414 const uint32_t h_i_val
= ctx
->h_i_val
,
415 h_o_val
= ctx
->h_o_val
;
417 /* type x = x is an ugly hack for hiding an unitialized data warning. The
418 values are conditionally initialized before use, but other values are
419 set such that this will occur before these are used.
421 #ifdef HAVE_LCD_COLOR
422 struct uint32_argb rgbval
=rgbval
, rgbinc
=rgbinc
,
423 *out_line
= (struct uint32_argb
*)out_line_ptr
;
425 uint32_t val
=val
, inc
=inc
, *out_line
= (uint32_t*)out_line_ptr
;
427 struct img_part
*part
;
428 SDEBUGF("scale_h_linear\n");
429 FILL_BUF_INIT(part
,ctx
->store_part
,ctx
->args
);
431 /* The error is set so that values are initialized on the first pass. */
433 /* give other tasks a chance to run */
435 for (ox
= 0; ox
< (uint32_t)ctx
->bm
->width
; ox
++)
437 #ifdef HAVE_LCD_COLOR
440 /* Store the new "current" pixel value in rgbval, and the color
441 step value in rgbinc.
444 rgbinc
.r
= -(part
->buf
->red
);
445 rgbinc
.g
= -(part
->buf
->green
);
446 rgbinc
.b
= -(part
->buf
->blue
);
447 rgbinc
.a
= -(part
->buf
->alpha
);
448 #if defined(CPU_COLDFIRE)
449 /* Coldfire EMAC math */
450 MAC(part
->buf
->red
, h_o_val
, 0);
451 MAC(part
->buf
->green
, h_o_val
, 1);
452 MAC(part
->buf
->blue
, h_o_val
, 2);
453 MAC(part
->buf
->alpha
, h_o_val
, 3);
456 rgbval
.r
= (part
->buf
->red
) * h_o_val
;
457 rgbval
.g
= (part
->buf
->green
) * h_o_val
;
458 rgbval
.b
= (part
->buf
->blue
) * h_o_val
;
459 rgbval
.a
= (part
->buf
->alpha
) * h_o_val
;
462 /* If this wasn't the last pixel, add the next one to rgbinc. */
463 if (LIKELY(ix
< (uint32_t)ctx
->src
->width
)) {
466 /* Fetch new pixels if needed */
467 FILL_BUF(part
,ctx
->store_part
,ctx
->args
);
468 rgbinc
.r
+= part
->buf
->red
;
469 rgbinc
.g
+= part
->buf
->green
;
470 rgbinc
.b
+= part
->buf
->blue
;
471 rgbinc
.a
+= part
->buf
->alpha
;
472 /* Add a partial step to rgbval, in this pixel isn't precisely
473 aligned with the new source pixel
475 #if defined(CPU_COLDFIRE)
476 /* Coldfire EMAC math */
477 MAC(rgbinc
.r
, ixe
, 0);
478 MAC(rgbinc
.g
, ixe
, 1);
479 MAC(rgbinc
.b
, ixe
, 2);
480 MAC(rgbinc
.a
, ixe
, 3);
483 rgbval
.r
+= rgbinc
.r
* ixe
;
484 rgbval
.g
+= rgbinc
.g
* ixe
;
485 rgbval
.b
+= rgbinc
.b
* ixe
;
486 rgbval
.a
+= rgbinc
.a
* ixe
;
489 #if defined(CPU_COLDFIRE)
490 /* get final EMAC result out of ACC registers */
491 MAC_OUT(rgbval
.r
, 0);
492 MAC_OUT(rgbval
.g
, 1);
493 MAC_OUT(rgbval
.b
, 2);
494 MAC_OUT(rgbval
.a
, 3);
496 /* Now multiply the color increment to its proper value */
502 rgbval
.r
+= rgbinc
.r
;
503 rgbval
.g
+= rgbinc
.g
;
504 rgbval
.b
+= rgbinc
.b
;
505 rgbval
.a
+= rgbinc
.a
;
507 /* round and scale values, and accumulate or store to output */
510 out_line
[ox
].r
+= (rgbval
.r
+ (1 << 21)) >> 22;
511 out_line
[ox
].g
+= (rgbval
.g
+ (1 << 21)) >> 22;
512 out_line
[ox
].b
+= (rgbval
.b
+ (1 << 21)) >> 22;
513 out_line
[ox
].a
+= (rgbval
.a
+ (1 << 21)) >> 22;
515 out_line
[ox
].r
= (rgbval
.r
+ (1 << 21)) >> 22;
516 out_line
[ox
].g
= (rgbval
.g
+ (1 << 21)) >> 22;
517 out_line
[ox
].b
= (rgbval
.b
+ (1 << 21)) >> 22;
518 out_line
[ox
].a
= (rgbval
.a
+ (1 << 21)) >> 22;
523 /* Store the new "current" pixel value in rgbval, and the color
524 step value in rgbinc.
529 #if defined(CPU_COLDFIRE)
530 /* Coldfire EMAC math */
531 MAC(val
, h_o_val
, 0);
532 #elif defined(CPU_SH)
533 /* SH-1 16x16->32 math */
534 val
= mul_u16_u16(val
, h_o_val
);
540 /* If this wasn't the last pixel, add the next one to rgbinc. */
541 if (LIKELY(ix
< (uint32_t)ctx
->src
->width
)) {
544 /* Fetch new pixels if needed */
545 FILL_BUF(part
,ctx
->store_part
,ctx
->args
);
547 /* Add a partial step to rgbval, in this pixel isn't precisely
548 aligned with the new source pixel
550 #if defined(CPU_COLDFIRE)
551 /* Coldfire EMAC math */
553 #elif defined(CPU_SH)
554 /* SH-1 16x16->32 math */
555 val
+= mul_s16_s16(inc
, ixe
);
561 #if defined(CPU_COLDFIRE)
562 /* get final EMAC result out of ACC register */
565 /* Now multiply the color increment to its proper value */
567 /* SH-1 16x16->32 math */
568 inc
= mul_s16_s16(inc
, h_i_val
);
575 #if !(defined(CPU_SH) || defined(TEST_SH_MATH))
576 /* round and scale values, and accumulate or store to output */
579 out_line
[ox
] += (val
+ (1 << 21)) >> 22;
581 out_line
[ox
] = (val
+ (1 << 21)) >> 22;
584 /* round and scale values, and accumulate or store to output */
598 /* vertical linear scaler */
599 static inline bool scale_v_linear(struct rowset
*rset
,
600 struct scaler_context
*ctx
)
604 #if defined(CPU_SH) || defined (TEST_SH_MATH)
605 const uint32_t v_i_val
= ctx
->src
->height
- 1,
606 v_o_val
= ctx
->bm
->height
- 1;
608 const uint32_t v_i_val
= ctx
->v_i_val
,
609 v_o_val
= ctx
->v_o_val
;
611 /* Set up our buffers, to store the increment and current value for each
612 column, and one temp buffer used to read in new rows.
614 uint32_t *rowinc
= (uint32_t *)(ctx
->buf
),
615 *rowval
= rowinc
+ ctx
->bm
->width
* CHANNEL_BYTES
,
616 *rowtmp
= rowval
+ ctx
->bm
->width
* CHANNEL_BYTES
,
617 *rowinc_px
, *rowval_px
, *rowtmp_px
;
619 SDEBUGF("scale_v_linear\n");
622 /* get first scaled row in rowtmp */
623 if(!ctx
->h_scaler((void*)rowtmp
, ctx
, false))
625 for (oy
= rset
->rowstart
; oy
!= rset
->rowstop
; oy
+= rset
->rowstep
)
631 for(rowinc_px
= rowinc
, rowtmp_px
= rowtmp
, rowval_px
= rowval
;
632 rowinc_px
< rowval
; rowinc_px
++, rowtmp_px
++, rowval_px
++)
634 *rowinc_px
= -*rowtmp_px
;
635 *rowval_px
= *rowtmp_px
* v_o_val
;
637 if (iy
< (uint32_t)ctx
->src
->height
)
639 if (!ctx
->h_scaler((void*)rowtmp
, ctx
, false))
641 for(rowinc_px
= rowinc
, rowtmp_px
= rowtmp
, rowval_px
= rowval
;
642 rowinc_px
< rowval
; rowinc_px
++, rowtmp_px
++, rowval_px
++)
644 *rowinc_px
+= *rowtmp_px
;
645 *rowval_px
+= *rowinc_px
* iye
;
646 *rowinc_px
*= v_i_val
;
650 for(rowinc_px
= rowinc
, rowval_px
= rowval
; rowinc_px
< rowval
;
651 rowinc_px
++, rowval_px
++)
652 *rowval_px
+= *rowinc_px
;
653 ctx
->output_row(oy
, (void*)rowval
, ctx
);
658 #endif /* HAVE_UPSCALER */
660 #if defined(HAVE_LCD_COLOR) && (defined(HAVE_JPEG) || defined(PLUGIN))
661 static void output_row_32_native_fromyuv(uint32_t row
, void * row_in
,
662 struct scaler_context
*ctx
)
664 #if defined(LCD_STRIDEFORMAT) && LCD_STRIDEFORMAT == VERTICAL_STRIDE
665 #define DEST_STEP (ctx->bm->height)
668 #define DEST_STEP (1)
669 #define Y_STEP (BM_WIDTH(ctx->bm->width,FORMAT_NATIVE,0))
673 uint8_t dy
= DITHERY(row
);
674 struct uint32_argb
*qp
= (struct uint32_argb
*)row_in
;
675 SDEBUGF("output_row: y: %lu in: %p\n",row
, row_in
);
676 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ Y_STEP
* row
;
678 unsigned r
, g
, b
, y
, u
, v
;
680 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
682 delta
= DITHERXDY(col
,dy
);
683 y
= SC_OUT(qp
->b
, ctx
);
684 u
= SC_OUT(qp
->g
, ctx
);
685 v
= SC_OUT(qp
->r
, ctx
);
687 yuv_to_rgb(y
, u
, v
, &r
, &g
, &b
);
688 r
= (31 * r
+ (r
>> 3) + delta
) >> 8;
689 g
= (63 * g
+ (g
>> 2) + delta
) >> 8;
690 b
= (31 * b
+ (b
>> 3) + delta
) >> 8;
691 *dest
= LCD_RGBPACK_LCD(r
, g
, b
);
697 #if !defined(PLUGIN) || LCD_DEPTH > 1
698 static void output_row_32_native(uint32_t row
, void * row_in
,
699 struct scaler_context
*ctx
)
702 int fb_width
= BM_WIDTH(ctx
->bm
->width
,FORMAT_NATIVE
,0);
703 uint8_t dy
= DITHERY(row
);
704 #ifdef HAVE_LCD_COLOR
705 struct uint32_argb
*qp
= (struct uint32_argb
*)row_in
;
707 uint32_t *qp
= (uint32_t*)row_in
;
709 SDEBUGF("output_row: y: %lu in: %p\n",row
, row_in
);
711 #if LCD_PIXELFORMAT == HORIZONTAL_PACKING
712 /* greyscale iPods */
713 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ fb_width
* row
;
719 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
721 delta
= DITHERXDY(col
,dy
);
722 bright
= SC_OUT(*qp
++, ctx
);
723 bright
= (3 * bright
+ (bright
>> 6) + delta
) >> 8;
724 data
|= (~bright
& 3) << shift
;
734 #elif LCD_PIXELFORMAT == VERTICAL_PACKING
736 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ fb_width
*
738 int shift
= 2 * (row
& 3);
742 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
744 delta
= DITHERXDY(col
,dy
);
745 bright
= SC_OUT(*qp
++, ctx
);
746 bright
= (3 * bright
+ (bright
>> 6) + delta
) >> 8;
747 *dest
++ |= (~bright
& 3) << shift
;
749 #elif LCD_PIXELFORMAT == VERTICAL_INTERLEAVED
751 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ fb_width
*
757 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
759 delta
= DITHERXDY(col
,dy
);
760 bright
= SC_OUT(*qp
++, ctx
);
761 bright
= (3 * bright
+ (bright
>> 6) + delta
) >> 8;
762 *dest
++ |= vi_pattern
[bright
] << shift
;
764 #endif /* LCD_PIXELFORMAT */
765 #elif LCD_DEPTH == 16
766 /* iriver h300, colour iPods, X5 */
768 fb_data
*dest
= STRIDE_MAIN((fb_data
*)ctx
->bm
->data
+ fb_width
* row
,
769 (fb_data
*)ctx
->bm
->data
+ row
);
772 struct uint32_argb q0
;
773 /* setup alpha channel buffer */
774 unsigned char *bm_alpha
= NULL
;
775 if (ctx
->bm
->alpha_offset
> 0)
776 bm_alpha
= ctx
->bm
->data
+ ctx
->bm
->alpha_offset
;
778 bm_alpha
+= ALIGN_UP(ctx
->bm
->width
, 2)*row
/2;
780 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
782 delta
= DITHERXDY(col
,dy
);
784 r
= SC_OUT(q0
.r
, ctx
);
785 g
= SC_OUT(q0
.g
, ctx
);
786 b
= SC_OUT(q0
.b
, ctx
);
787 r
= (31 * r
+ (r
>> 3) + delta
) >> 8;
788 g
= (63 * g
+ (g
>> 2) + delta
) >> 8;
789 b
= (31 * b
+ (b
>> 3) + delta
) >> 8;
790 *dest
= LCD_RGBPACK_LCD(r
, g
, b
);
791 dest
+= STRIDE_MAIN(1, ctx
->bm
->height
);
793 /* pack alpha channel for 2 pixels into 1 byte */
794 unsigned alpha
= SC_OUT(q0
.a
, ctx
);
796 *bm_alpha
++ |= alpha
&0xf0;
798 *bm_alpha
= alpha
>>4;
801 #endif /* LCD_DEPTH */
805 #if defined(PLUGIN) && LCD_DEPTH > 1
806 unsigned int get_size_native(struct bitmap
*bm
)
808 return BM_SIZE(bm
->width
,bm
->height
,FORMAT_NATIVE
,0);
811 const struct custom_format format_native
= {
812 .output_row_8
= output_row_8_native
,
813 #if defined(HAVE_LCD_COLOR) && (defined(HAVE_JPEG) || defined(PLUGIN))
815 output_row_32_native
,
816 output_row_32_native_fromyuv
819 .output_row_32
= output_row_32_native
,
821 .get_size
= get_size_native
825 int resize_on_load(struct bitmap
*bm
, bool dither
, struct dim
*src
,
826 struct rowset
*rset
, unsigned char *buf
, unsigned int len
,
827 const struct custom_format
*format
,
828 IF_PIX_FMT(int format_index
,)
829 struct img_part
* (*store_part
)(void *args
),
832 const int sw
= src
->width
;
833 const int sh
= src
->height
;
834 const int dw
= bm
->width
;
835 const int dh
= bm
->height
;
837 /* buffer for 1 line + 2 spare lines */
838 #ifdef HAVE_LCD_COLOR
839 unsigned int needed
= sizeof(struct uint32_argb
) * 3 * bm
->width
;
841 unsigned int needed
= sizeof(uint32_t) * 3 * bm
->width
;
843 #if MAX_SC_STACK_ALLOC
844 uint8_t sc_buf
[(needed
<= len
|| needed
> MAX_SC_STACK_ALLOC
) ?
847 ALIGN_BUFFER(buf
, len
, sizeof(uint32_t));
850 #if MAX_SC_STACK_ALLOC
851 if (needed
> MAX_SC_STACK_ALLOC
)
853 DEBUGF("unable to allocate required buffer: %d needed, "
854 "%d available, %d permitted from stack\n",
855 needed
, len
, MAX_SC_STACK_ALLOC
);
858 if (sizeof(sc_buf
) < needed
)
860 DEBUGF("failed to allocate large enough buffer on stack: "
861 "%d needed, only got %d",
862 needed
, MAX_SC_STACK_ALLOC
);
866 DEBUGF("unable to allocate required buffer: %d needed, "
867 "%d available\n", needed
, len
);
872 struct scaler_context ctx
;
873 #ifdef HAVE_ADJUSTABLE_CPU_FREQ
876 ctx
.store_part
= store_part
;
878 #if MAX_SC_STACK_ALLOC
879 ctx
.buf
= needed
> len
? sc_buf
: buf
;
887 #if defined(CPU_SH) || defined (TEST_SH_MATH)
891 #if defined(HAVE_LCD_COLOR) && defined(HAVE_JPEG)
892 ctx
.output_row
= format_index
? output_row_32_native_fromyuv
893 : output_row_32_native
;
895 ctx
.output_row
= output_row_32_native
;
899 #ifdef HAVE_LCD_COLOR
900 ctx
.output_row
= format
->output_row_32
[format_index
];
902 ctx
.output_row
= format
->output_row_32
;
908 ctx
.h_scaler
= scale_h_area
;
909 #if defined(CPU_SH) || defined (TEST_SH_MATH)
912 uint32_t h_div
= (1U << 24) / sw
;
913 ctx
.h_i_val
= sw
* h_div
;
914 ctx
.h_o_val
= dw
* h_div
;
918 ctx
.h_scaler
= scale_h_linear
;
919 #if defined(CPU_SH) || defined (TEST_SH_MATH)
922 uint32_t h_div
= (1U << 24) / (dw
- 1);
923 ctx
.h_i_val
= (sw
- 1) * h_div
;
924 ctx
.h_o_val
= (dw
- 1) * h_div
;
929 unsigned old_macsr
= coldfire_get_macsr();
930 coldfire_set_macsr(EMAC_UNSIGNED
);
936 #if defined(CPU_SH) || defined (TEST_SH_MATH)
938 ctx
.recip
= ((uint32_t)(-div
)) / div
+ 1;
940 uint32_t v_div
= (1U << 22) / sh
;
941 ctx
.v_i_val
= sh
* v_div
;
942 ctx
.v_o_val
= dh
* v_div
;
944 ret
= scale_v_area(rset
, &ctx
);
949 #if defined(CPU_SH) || defined (TEST_SH_MATH)
951 ctx
.recip
= ((uint32_t)(-div
)) / div
+ 1;
953 uint32_t v_div
= (1U << 22) / dh
;
954 ctx
.v_i_val
= (sh
- 1) * v_div
;
955 ctx
.v_o_val
= (dh
- 1) * v_div
;
957 ret
= scale_v_linear(rset
, &ctx
);
961 /* Restore emac status; other modules like tone control filter
962 * calculation may rely on it. */
963 coldfire_set_macsr(old_macsr
);
965 #ifdef HAVE_ADJUSTABLE_CPU_FREQ