1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2008 by Akio Idehara, Andrew Mahone
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
23 * Implementation of area average and linear row and vertical scalers, and
24 * nearest-neighbor grey scaler (C) 2008 Andrew Mahone
26 * All files in this archive are subject to the GNU General Public License.
27 * See the file COPYING in the source tree root for full license agreement.
29 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
30 * KIND, either express or implied.
32 ****************************************************************************/
45 #ifdef HAVE_REMOTE_LCD
46 #include "lcd-remote.h"
48 #ifdef ROCKBOX_DEBUG_SCALERS
49 #define SDEBUGF DEBUGF
62 #include <jpeg_load.h>
64 #if CONFIG_CPU == SH7034
65 /* 16*16->32 bit multiplication is a single instrcution on the SH1 */
66 #define MULUQ(a, b) ((uint32_t) (((uint16_t) (a)) * ((uint16_t) (b))))
67 #define MULQ(a, b) ((int32_t) (((int16_t) (a)) * ((int16_t) (b))))
69 #define MULUQ(a, b) ((a) * (b))
70 #define MULQ(a, b) ((a) * (b))
73 /* calculate the maximum dimensions which will preserve the aspect ration of
74 src while fitting in the constraints passed in dst, and store result in dst,
75 returning 0 if rounding and 1 if not rounding.
77 int recalc_dimension(struct dim
*dst
, struct dim
*src
)
79 /* This only looks backwards. The input image size is being pre-scaled by
80 * the inverse of the pixel aspect ratio, so that once the size it scaled
81 * to meet the output constraints, the scaled image will have appropriate
84 int sw
= src
->width
* LCD_PIXEL_ASPECT_HEIGHT
;
85 int sh
= src
->height
* LCD_PIXEL_ASPECT_WIDTH
;
88 dst
->width
= LCD_WIDTH
;
90 dst
->height
= LCD_HEIGHT
;
92 if (dst
->width
> sw
|| dst
->height
> sh
)
97 if (sw
== dst
->width
&& sh
== dst
->height
)
100 tmp
= (sw
* dst
->height
+ (sh
>> 1)) / sh
;
101 if (tmp
> dst
->width
)
102 dst
->height
= (sh
* dst
->width
+ (sw
>> 1)) / sw
;
105 return src
->width
== dst
->width
&& src
->height
== dst
->height
;
108 /* All of these scalers use variations of Bresenham's algorithm to convert from
109 their input to output coordinates. The error value is shifted from the
110 "classic" version such that it is a useful input to the scaling calculation.
113 #ifdef HAVE_LCD_COLOR
114 /* dither + pack on channel of RGB565, R an B share a packing macro */
115 #define PACKRB(v, delta) ((31 * v + (v >> 3) + delta) >> 8)
116 #define PACKG(g, delta) ((63 * g + (g >> 2) + delta) >> 8)
119 /* read new img_part unconditionally, return false on failure */
120 #define FILL_BUF_INIT(img_part, store_part, args) { \
121 img_part = store_part(args); \
122 if (img_part == NULL) \
126 /* read new img_part if current one is empty, return false on failure */
127 #define FILL_BUF(img_part, store_part, args) { \
128 if (img_part->len == 0) \
129 img_part = store_part(args); \
130 if (img_part == NULL) \
134 #if defined(CPU_COLDFIRE)
135 #define MAC(op1, op2, num) \
137 "mac.l %0, %1, %%acc" #num \
139 : "%d" (op1), "d" (op2)\
141 #define MAC_OUT(dest, num) \
143 "movclr.l %%acc" #num ", %0" \
146 #elif defined(CPU_SH)
147 /* calculate the 32-bit product of unsigned 16-bit op1 and op2 */
148 static inline int32_t mul_s16_s16(int16_t op1
, int16_t op2
)
150 return (int32_t)(op1
* op2
);
153 /* calculate the 32-bit product of signed 16-bit op1 and op2 */
154 static inline uint32_t mul_u16_u16(uint16_t op1
, uint16_t op2
)
156 return (uint32_t)(op1
* op2
);
160 /* horizontal area average scaler */
161 static bool scale_h_area(void *out_line_ptr
,
162 struct scaler_context
*ctx
, bool accum
)
164 SDEBUGF("scale_h_area\n");
165 unsigned int ix
, ox
, oxe
, mul
;
166 #if defined(CPU_SH) || defined (TEST_SH_MATH)
167 const uint32_t h_i_val
= ctx
->src
->width
,
168 h_o_val
= ctx
->bm
->width
;
170 const uint32_t h_i_val
= ctx
->h_i_val
,
171 h_o_val
= ctx
->h_o_val
;
173 #ifdef HAVE_LCD_COLOR
174 struct uint32_rgb rgbvalacc
= { 0, 0, 0 },
175 rgbvaltmp
= { 0, 0, 0 },
176 *out_line
= (struct uint32_rgb
*)out_line_ptr
;
178 uint32_t acc
= 0, tmp
= 0, *out_line
= (uint32_t*)out_line_ptr
;
180 struct img_part
*part
;
181 FILL_BUF_INIT(part
,ctx
->store_part
,ctx
->args
);
185 /* give other tasks a chance to run */
187 for (ix
= 0; ix
< (unsigned int)ctx
->src
->width
; ix
++)
190 /* end of current area has been reached */
191 /* fill buffer if needed */
192 FILL_BUF(part
,ctx
->store_part
,ctx
->args
);
193 #ifdef HAVE_LCD_COLOR
196 /* "reset" error, which now represents partial coverage of next
197 pixel by the next area
201 #if defined(CPU_COLDFIRE)
202 /* Coldfire EMAC math */
203 /* add saved partial pixel from start of area */
204 MAC(rgbvalacc
.r
, h_o_val
, 0);
205 MAC(rgbvalacc
.g
, h_o_val
, 1);
206 MAC(rgbvalacc
.b
, h_o_val
, 2);
207 MAC(rgbvaltmp
.r
, mul
, 0);
208 MAC(rgbvaltmp
.g
, mul
, 1);
209 MAC(rgbvaltmp
.b
, mul
, 2);
210 /* get new pixel , then add its partial coverage to this area */
212 rgbvaltmp
.r
= part
->buf
->red
;
213 rgbvaltmp
.g
= part
->buf
->green
;
214 rgbvaltmp
.b
= part
->buf
->blue
;
215 MAC(rgbvaltmp
.r
, mul
, 0);
216 MAC(rgbvaltmp
.g
, mul
, 1);
217 MAC(rgbvaltmp
.b
, mul
, 2);
218 MAC_OUT(rgbvalacc
.r
, 0);
219 MAC_OUT(rgbvalacc
.g
, 1);
220 MAC_OUT(rgbvalacc
.b
, 2);
223 /* add saved partial pixel from start of area */
224 rgbvalacc
.r
= rgbvalacc
.r
* h_o_val
+ rgbvaltmp
.r
* mul
;
225 rgbvalacc
.g
= rgbvalacc
.g
* h_o_val
+ rgbvaltmp
.g
* mul
;
226 rgbvalacc
.b
= rgbvalacc
.b
* h_o_val
+ rgbvaltmp
.b
* mul
;
228 /* get new pixel , then add its partial coverage to this area */
229 rgbvaltmp
.r
= part
->buf
->red
;
230 rgbvaltmp
.g
= part
->buf
->green
;
231 rgbvaltmp
.b
= part
->buf
->blue
;
233 rgbvalacc
.r
+= rgbvaltmp
.r
* mul
;
234 rgbvalacc
.g
+= rgbvaltmp
.g
* mul
;
235 rgbvalacc
.b
+= rgbvaltmp
.b
* mul
;
237 rgbvalacc
.r
= (rgbvalacc
.r
+ (1 << 21)) >> 22;
238 rgbvalacc
.g
= (rgbvalacc
.g
+ (1 << 21)) >> 22;
239 rgbvalacc
.b
= (rgbvalacc
.b
+ (1 << 21)) >> 22;
240 /* store or accumulate to output row */
243 rgbvalacc
.r
+= out_line
[ox
].r
;
244 rgbvalacc
.g
+= out_line
[ox
].g
;
245 rgbvalacc
.b
+= out_line
[ox
].b
;
247 out_line
[ox
].r
= rgbvalacc
.r
;
248 out_line
[ox
].g
= rgbvalacc
.g
;
249 out_line
[ox
].b
= rgbvalacc
.b
;
250 /* reset accumulator */
258 /* add pixel value to accumulator */
259 rgbvalacc
.r
+= part
->buf
->red
;
260 rgbvalacc
.g
+= part
->buf
->green
;
261 rgbvalacc
.b
+= part
->buf
->blue
;
266 /* "reset" error, which now represents partial coverage of next
267 pixel by the next area
270 #if defined(CPU_COLDFIRE)
271 /* Coldfire EMAC math */
272 /* add saved partial pixel from start of area */
273 MAC(acc
, h_o_val
, 0);
275 /* get new pixel , then add its partial coverage to this area */
280 #elif defined(CPU_SH)
281 /* SH-1 16x16->32 math */
282 /* add saved partial pixel from start of area */
283 acc
= mul_u16_u16(acc
, h_o_val
) + mul_u16_u16(tmp
, mul
);
285 /* get new pixel , then add its partial coverage to this area */
288 acc
+= mul_u16_u16(tmp
, mul
);
291 /* add saved partial pixel from start of area */
292 acc
= (acc
* h_o_val
) + (tmp
* mul
);
294 /* get new pixel , then add its partial coverage to this area */
299 #if !(defined(CPU_SH) || defined(TEST_SH_MATH))
300 /* round, divide, and either store or accumulate to output row */
301 acc
= (acc
+ (1 << 21)) >> 22;
308 /* reset accumulator */
314 /* add pixel value to accumulator */
324 /* vertical area average scaler */
325 static inline bool scale_v_area(struct rowset
*rset
, struct scaler_context
*ctx
)
327 uint32_t mul
, oy
, iy
, oye
;
328 #if defined(CPU_SH) || defined (TEST_SH_MATH)
329 const uint32_t v_i_val
= ctx
->src
->height
,
330 v_o_val
= ctx
->bm
->height
;
332 const uint32_t v_i_val
= ctx
->v_i_val
,
333 v_o_val
= ctx
->v_o_val
;
336 /* Set up rounding and scale factors */
340 #ifdef HAVE_LCD_COLOR
341 uint32_t *rowacc
= (uint32_t *) ctx
->buf
,
342 *rowtmp
= rowacc
+ 3 * ctx
->bm
->width
,
343 *rowacc_px
, *rowtmp_px
;
344 memset((void *)ctx
->buf
, 0, ctx
->bm
->width
* 2 * sizeof(struct uint32_rgb
));
346 uint32_t *rowacc
= (uint32_t *) ctx
->buf
,
347 *rowtmp
= rowacc
+ ctx
->bm
->width
,
348 *rowacc_px
, *rowtmp_px
;
349 memset((void *)ctx
->buf
, 0, ctx
->bm
->width
* 2 * sizeof(uint32_t));
351 SDEBUGF("scale_v_area\n");
352 /* zero the accumulator and temp rows */
353 for (iy
= 0; iy
< (unsigned int)ctx
->src
->height
; iy
++)
356 /* end of current area has been reached */
359 /* "reset" error, which now represents partial coverage of the next
363 /* add stored partial row to accumulator */
364 for(rowacc_px
= rowacc
, rowtmp_px
= rowtmp
; rowacc_px
!= rowtmp
;
365 rowacc_px
++, rowtmp_px
++)
366 *rowacc_px
= *rowacc_px
* v_o_val
+ *rowtmp_px
* mul
;
367 /* store new scaled row in temp row */
368 if(!ctx
->h_scaler(rowtmp
, ctx
, false))
370 /* add partial coverage by new row to this area, then round and
374 for(rowacc_px
= rowacc
, rowtmp_px
= rowtmp
; rowacc_px
!= rowtmp
;
375 rowacc_px
++, rowtmp_px
++)
376 *rowacc_px
+= mul
* *rowtmp_px
;
377 ctx
->output_row(oy
, (void*)rowacc
, ctx
);
378 /* clear accumulator row, store partial coverage for next row */
379 #ifdef HAVE_LCD_COLOR
380 memset((void *)rowacc
, 0, ctx
->bm
->width
* sizeof(uint32_t) * 3);
382 memset((void *)rowacc
, 0, ctx
->bm
->width
* sizeof(uint32_t));
388 /* accumulate new scaled row to rowacc */
389 if (!ctx
->h_scaler(rowacc
, ctx
, true))
397 /* horizontal linear scaler */
398 static bool scale_h_linear(void *out_line_ptr
, struct scaler_context
*ctx
,
401 unsigned int ix
, ox
, ixe
;
402 #if defined(CPU_SH) || defined (TEST_SH_MATH)
403 const uint32_t h_i_val
= ctx
->src
->width
- 1,
404 h_o_val
= ctx
->bm
->width
- 1;
406 const uint32_t h_i_val
= ctx
->h_i_val
,
407 h_o_val
= ctx
->h_o_val
;
409 /* type x = x is an ugly hack for hiding an unitialized data warning. The
410 values are conditionally initialized before use, but other values are
411 set such that this will occur before these are used.
413 #ifdef HAVE_LCD_COLOR
414 struct uint32_rgb rgbval
=rgbval
, rgbinc
=rgbinc
,
415 *out_line
= (struct uint32_rgb
*)out_line_ptr
;
417 uint32_t val
=val
, inc
=inc
, *out_line
= (uint32_t*)out_line_ptr
;
419 struct img_part
*part
;
420 SDEBUGF("scale_h_linear\n");
421 FILL_BUF_INIT(part
,ctx
->store_part
,ctx
->args
);
423 /* The error is set so that values are initialized on the first pass. */
425 /* give other tasks a chance to run */
427 for (ox
= 0; ox
< (uint32_t)ctx
->bm
->width
; ox
++)
429 #ifdef HAVE_LCD_COLOR
432 /* Store the new "current" pixel value in rgbval, and the color
433 step value in rgbinc.
436 rgbinc
.r
= -(part
->buf
->red
);
437 rgbinc
.g
= -(part
->buf
->green
);
438 rgbinc
.b
= -(part
->buf
->blue
);
439 #if defined(CPU_COLDFIRE)
440 /* Coldfire EMAC math */
441 MAC(part
->buf
->red
, h_o_val
, 0);
442 MAC(part
->buf
->green
, h_o_val
, 1);
443 MAC(part
->buf
->blue
, h_o_val
, 2);
446 rgbval
.r
= (part
->buf
->red
) * h_o_val
;
447 rgbval
.g
= (part
->buf
->green
) * h_o_val
;
448 rgbval
.b
= (part
->buf
->blue
) * h_o_val
;
451 /* If this wasn't the last pixel, add the next one to rgbinc. */
452 if (LIKELY(ix
< (uint32_t)ctx
->src
->width
)) {
455 /* Fetch new pixels if needed */
456 FILL_BUF(part
,ctx
->store_part
,ctx
->args
);
457 rgbinc
.r
+= part
->buf
->red
;
458 rgbinc
.g
+= part
->buf
->green
;
459 rgbinc
.b
+= part
->buf
->blue
;
460 /* Add a partial step to rgbval, in this pixel isn't precisely
461 aligned with the new source pixel
463 #if defined(CPU_COLDFIRE)
464 /* Coldfire EMAC math */
465 MAC(rgbinc
.r
, ixe
, 0);
466 MAC(rgbinc
.g
, ixe
, 1);
467 MAC(rgbinc
.b
, ixe
, 2);
470 rgbval
.r
+= rgbinc
.r
* ixe
;
471 rgbval
.g
+= rgbinc
.g
* ixe
;
472 rgbval
.b
+= rgbinc
.b
* ixe
;
475 #if defined(CPU_COLDFIRE)
476 /* get final EMAC result out of ACC registers */
477 MAC_OUT(rgbval
.r
, 0);
478 MAC_OUT(rgbval
.g
, 1);
479 MAC_OUT(rgbval
.b
, 2);
481 /* Now multiply the color increment to its proper value */
486 rgbval
.r
+= rgbinc
.r
;
487 rgbval
.g
+= rgbinc
.g
;
488 rgbval
.b
+= rgbinc
.b
;
490 /* round and scale values, and accumulate or store to output */
493 out_line
[ox
].r
+= (rgbval
.r
+ (1 << 21)) >> 22;
494 out_line
[ox
].g
+= (rgbval
.g
+ (1 << 21)) >> 22;
495 out_line
[ox
].b
+= (rgbval
.b
+ (1 << 21)) >> 22;
497 out_line
[ox
].r
= (rgbval
.r
+ (1 << 21)) >> 22;
498 out_line
[ox
].g
= (rgbval
.g
+ (1 << 21)) >> 22;
499 out_line
[ox
].b
= (rgbval
.b
+ (1 << 21)) >> 22;
504 /* Store the new "current" pixel value in rgbval, and the color
505 step value in rgbinc.
510 #if defined(CPU_COLDFIRE)
511 /* Coldfire EMAC math */
512 MAC(val
, h_o_val
, 0);
513 #elif defined(CPU_SH)
514 /* SH-1 16x16->32 math */
515 val
= mul_u16_u16(val
, h_o_val
);
521 /* If this wasn't the last pixel, add the next one to rgbinc. */
522 if (LIKELY(ix
< (uint32_t)ctx
->src
->width
)) {
525 /* Fetch new pixels if needed */
526 FILL_BUF(part
,ctx
->store_part
,ctx
->args
);
528 /* Add a partial step to rgbval, in this pixel isn't precisely
529 aligned with the new source pixel
531 #if defined(CPU_COLDFIRE)
532 /* Coldfire EMAC math */
534 #elif defined(CPU_SH)
535 /* SH-1 16x16->32 math */
536 val
+= mul_s16_s16(inc
, ixe
);
542 #if defined(CPU_COLDFIRE)
543 /* get final EMAC result out of ACC register */
546 /* Now multiply the color increment to its proper value */
548 /* SH-1 16x16->32 math */
549 inc
= mul_s16_s16(inc
, h_i_val
);
556 #if !(defined(CPU_SH) || defined(TEST_SH_MATH))
557 /* round and scale values, and accumulate or store to output */
560 out_line
[ox
] += (val
+ (1 << 21)) >> 22;
562 out_line
[ox
] = (val
+ (1 << 21)) >> 22;
565 /* round and scale values, and accumulate or store to output */
579 /* vertical linear scaler */
580 static inline bool scale_v_linear(struct rowset
*rset
,
581 struct scaler_context
*ctx
)
585 #if defined(CPU_SH) || defined (TEST_SH_MATH)
586 const uint32_t v_i_val
= ctx
->src
->height
- 1,
587 v_o_val
= ctx
->bm
->height
- 1;
589 const uint32_t v_i_val
= ctx
->v_i_val
,
590 v_o_val
= ctx
->v_o_val
;
592 /* Set up our buffers, to store the increment and current value for each
593 column, and one temp buffer used to read in new rows.
595 #ifdef HAVE_LCD_COLOR
596 uint32_t *rowinc
= (uint32_t *)(ctx
->buf
),
597 *rowval
= rowinc
+ 3 * ctx
->bm
->width
,
598 *rowtmp
= rowval
+ 3 * ctx
->bm
->width
,
600 uint32_t *rowinc
= (uint32_t *)(ctx
->buf
),
601 *rowval
= rowinc
+ ctx
->bm
->width
,
602 *rowtmp
= rowval
+ ctx
->bm
->width
,
604 *rowinc_px
, *rowval_px
, *rowtmp_px
;
606 SDEBUGF("scale_v_linear\n");
609 /* get first scaled row in rowtmp */
610 if(!ctx
->h_scaler((void*)rowtmp
, ctx
, false))
612 for (oy
= rset
->rowstart
; oy
!= rset
->rowstop
; oy
+= rset
->rowstep
)
618 for(rowinc_px
= rowinc
, rowtmp_px
= rowtmp
, rowval_px
= rowval
;
619 rowinc_px
< rowval
; rowinc_px
++, rowtmp_px
++, rowval_px
++)
621 *rowinc_px
= -*rowtmp_px
;
622 *rowval_px
= *rowtmp_px
* v_o_val
;
624 if (iy
< (uint32_t)ctx
->src
->height
)
626 if (!ctx
->h_scaler((void*)rowtmp
, ctx
, false))
628 for(rowinc_px
= rowinc
, rowtmp_px
= rowtmp
, rowval_px
= rowval
;
629 rowinc_px
< rowval
; rowinc_px
++, rowtmp_px
++, rowval_px
++)
631 *rowinc_px
+= *rowtmp_px
;
632 *rowval_px
+= *rowinc_px
* iye
;
633 *rowinc_px
*= v_i_val
;
637 for(rowinc_px
= rowinc
, rowval_px
= rowval
; rowinc_px
< rowval
;
638 rowinc_px
++, rowval_px
++)
639 *rowval_px
+= *rowinc_px
;
640 ctx
->output_row(oy
, (void*)rowval
, ctx
);
645 #endif /* HAVE_UPSCALER */
647 #if defined(HAVE_LCD_COLOR) && (defined(HAVE_JPEG) || defined(PLUGIN))
648 static void output_row_32_native_fromyuv(uint32_t row
, void * row_in
,
649 struct scaler_context
*ctx
)
651 #if defined(LCD_STRIDEFORMAT) && LCD_STRIDEFORMAT == VERTICAL_STRIDE
652 #define DEST_STEP (ctx->bm->height)
655 #define DEST_STEP (1)
656 #define Y_STEP (BM_WIDTH(ctx->bm->width,FORMAT_NATIVE,0))
660 uint8_t dy
= DITHERY(row
);
661 struct uint32_rgb
*qp
= (struct uint32_rgb
*)row_in
;
662 SDEBUGF("output_row: y: %lu in: %p\n",row
, row_in
);
663 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ Y_STEP
* row
;
665 unsigned r
, g
, b
, y
, u
, v
;
667 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
669 delta
= DITHERXDY(col
,dy
);
670 y
= SC_OUT(qp
->b
, ctx
);
671 u
= SC_OUT(qp
->g
, ctx
);
672 v
= SC_OUT(qp
->r
, ctx
);
674 yuv_to_rgb(y
, u
, v
, &r
, &g
, &b
);
675 r
= (31 * r
+ (r
>> 3) + delta
) >> 8;
676 g
= (63 * g
+ (g
>> 2) + delta
) >> 8;
677 b
= (31 * b
+ (b
>> 3) + delta
) >> 8;
678 *dest
= LCD_RGBPACK_LCD(r
, g
, b
);
684 #if !defined(PLUGIN) || LCD_DEPTH > 1
685 static void output_row_32_native(uint32_t row
, void * row_in
,
686 struct scaler_context
*ctx
)
689 int fb_width
= BM_WIDTH(ctx
->bm
->width
,FORMAT_NATIVE
,0);
690 uint8_t dy
= DITHERY(row
);
691 #ifdef HAVE_LCD_COLOR
692 struct uint32_rgb
*qp
= (struct uint32_rgb
*)row_in
;
694 uint32_t *qp
= (uint32_t*)row_in
;
696 SDEBUGF("output_row: y: %lu in: %p\n",row
, row_in
);
698 #if LCD_PIXELFORMAT == HORIZONTAL_PACKING
699 /* greyscale iPods */
700 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ fb_width
* row
;
706 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
708 delta
= DITHERXDY(col
,dy
);
709 bright
= SC_OUT(*qp
++, ctx
);
710 bright
= (3 * bright
+ (bright
>> 6) + delta
) >> 8;
711 data
|= (~bright
& 3) << shift
;
721 #elif LCD_PIXELFORMAT == VERTICAL_PACKING
723 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ fb_width
*
725 int shift
= 2 * (row
& 3);
729 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
731 delta
= DITHERXDY(col
,dy
);
732 bright
= SC_OUT(*qp
++, ctx
);
733 bright
= (3 * bright
+ (bright
>> 6) + delta
) >> 8;
734 *dest
++ |= (~bright
& 3) << shift
;
736 #elif LCD_PIXELFORMAT == VERTICAL_INTERLEAVED
738 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ fb_width
*
744 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
746 delta
= DITHERXDY(col
,dy
);
747 bright
= SC_OUT(*qp
++, ctx
);
748 bright
= (3 * bright
+ (bright
>> 6) + delta
) >> 8;
749 *dest
++ |= vi_pattern
[bright
] << shift
;
751 #endif /* LCD_PIXELFORMAT */
752 #elif LCD_DEPTH == 16
754 #if defined(LCD_STRIDEFORMAT) && LCD_STRIDEFORMAT == VERTICAL_STRIDE
757 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ row
;
760 struct uint32_rgb q0
;
762 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
764 delta
= DITHERXDY(col
,dy
);
766 r
= SC_OUT(q0
.r
, ctx
);
767 g
= SC_OUT(q0
.g
, ctx
);
768 b
= SC_OUT(q0
.b
, ctx
);
769 r
= (31 * r
+ (r
>> 3) + delta
) >> 8;
770 g
= (63 * g
+ (g
>> 2) + delta
) >> 8;
771 b
= (31 * b
+ (b
>> 3) + delta
) >> 8;
772 *dest
= LCD_RGBPACK_LCD(r
, g
, b
);
773 dest
+= ctx
->bm
->height
;
776 /* iriver h300, colour iPods, X5 */
777 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ fb_width
* row
;
780 struct uint32_rgb q0
;
782 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
784 delta
= DITHERXDY(col
,dy
);
786 r
= SC_OUT(q0
.r
, ctx
);
787 g
= SC_OUT(q0
.g
, ctx
);
788 b
= SC_OUT(q0
.b
, ctx
);
789 r
= (31 * r
+ (r
>> 3) + delta
) >> 8;
790 g
= (63 * g
+ (g
>> 2) + delta
) >> 8;
791 b
= (31 * b
+ (b
>> 3) + delta
) >> 8;
792 *dest
++ = LCD_RGBPACK_LCD(r
, g
, b
);
796 #endif /* LCD_DEPTH */
800 #if defined(PLUGIN) && LCD_DEPTH > 1
801 unsigned int get_size_native(struct bitmap
*bm
)
803 return BM_SIZE(bm
->width
,bm
->height
,FORMAT_NATIVE
,0);
806 const struct custom_format format_native
= {
807 .output_row_8
= output_row_8_native
,
808 #if defined(HAVE_LCD_COLOR) && (defined(HAVE_JPEG) || defined(PLUGIN))
810 output_row_32_native
,
811 output_row_32_native_fromyuv
814 .output_row_32
= output_row_32_native
,
816 .get_size
= get_size_native
820 int resize_on_load(struct bitmap
*bm
, bool dither
, struct dim
*src
,
821 struct rowset
*rset
, unsigned char *buf
, unsigned int len
,
822 const struct custom_format
*format
,
823 IF_PIX_FMT(int format_index
,)
824 struct img_part
* (*store_part
)(void *args
),
827 const int sw
= src
->width
;
828 const int sh
= src
->height
;
829 const int dw
= bm
->width
;
830 const int dh
= bm
->height
;
832 #ifdef HAVE_LCD_COLOR
833 unsigned int needed
= sizeof(struct uint32_rgb
) * 3 * bm
->width
;
835 unsigned int needed
= sizeof(uint32_t) * 3 * bm
->width
;
837 #if MAX_SC_STACK_ALLOC
838 uint8_t sc_buf
[(needed
<= len
|| needed
> MAX_SC_STACK_ALLOC
) ?
841 ALIGN_BUFFER(buf
, len
, sizeof(uint32_t));
844 #if MAX_SC_STACK_ALLOC
845 if (needed
> MAX_SC_STACK_ALLOC
)
847 DEBUGF("unable to allocate required buffer: %d needed, "
848 "%d available, %d permitted from stack\n",
849 needed
, len
, MAX_SC_STACK_ALLOC
);
852 if (sizeof(sc_buf
) < needed
)
854 DEBUGF("failed to allocate large enough buffer on stack: "
855 "%d needed, only got %d",
856 needed
, MAX_SC_STACK_ALLOC
);
860 DEBUGF("unable to allocate required buffer: %d needed, "
861 "%d available\n", needed
, len
);
866 struct scaler_context ctx
;
867 #ifdef HAVE_ADJUSTABLE_CPU_FREQ
870 ctx
.store_part
= store_part
;
872 #if MAX_SC_STACK_ALLOC
873 ctx
.buf
= needed
> len
? sc_buf
: buf
;
881 #if defined(CPU_SH) || defined (TEST_SH_MATH)
885 #if defined(HAVE_LCD_COLOR) && defined(HAVE_JPEG)
886 ctx
.output_row
= format_index
? output_row_32_native_fromyuv
887 : output_row_32_native
;
889 ctx
.output_row
= output_row_32_native
;
893 #ifdef HAVE_LCD_COLOR
894 ctx
.output_row
= format
->output_row_32
[format_index
];
896 ctx
.output_row
= format
->output_row_32
;
902 ctx
.h_scaler
= scale_h_area
;
903 #if defined(CPU_SH) || defined (TEST_SH_MATH)
906 uint32_t h_div
= (1U << 24) / sw
;
907 ctx
.h_i_val
= sw
* h_div
;
908 ctx
.h_o_val
= dw
* h_div
;
912 ctx
.h_scaler
= scale_h_linear
;
913 #if defined(CPU_SH) || defined (TEST_SH_MATH)
916 uint32_t h_div
= (1U << 24) / (dw
- 1);
917 ctx
.h_i_val
= (sw
- 1) * h_div
;
918 ctx
.h_o_val
= (dw
- 1) * h_div
;
923 unsigned old_macsr
= coldfire_get_macsr();
924 coldfire_set_macsr(EMAC_UNSIGNED
);
930 #if defined(CPU_SH) || defined (TEST_SH_MATH)
932 ctx
.recip
= ((uint32_t)(-div
)) / div
+ 1;
934 uint32_t v_div
= (1U << 22) / sh
;
935 ctx
.v_i_val
= sh
* v_div
;
936 ctx
.v_o_val
= dh
* v_div
;
938 ret
= scale_v_area(rset
, &ctx
);
943 #if defined(CPU_SH) || defined (TEST_SH_MATH)
945 ctx
.recip
= ((uint32_t)(-div
)) / div
+ 1;
947 uint32_t v_div
= (1U << 22) / dh
;
948 ctx
.v_i_val
= (sh
- 1) * v_div
;
949 ctx
.v_o_val
= (dh
- 1) * v_div
;
951 ret
= scale_v_linear(rset
, &ctx
);
955 /* Restore emac status; other modules like tone control filter
956 * calculation may rely on it. */
957 coldfire_set_macsr(old_macsr
);
959 #ifdef HAVE_ADJUSTABLE_CPU_FREQ