1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2008 by Akio Idehara, Andrew Mahone
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
23 * Implementation of area average and linear row and vertical scalers, and
24 * nearest-neighbor grey scaler (C) 2008 Andrew Mahone
26 * All files in this archive are subject to the GNU General Public License.
27 * See the file COPYING in the source tree root for full license agreement.
29 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
30 * KIND, either express or implied.
32 ****************************************************************************/
45 #ifdef HAVE_REMOTE_LCD
46 #include "lcd-remote.h"
48 #ifdef ROCKBOX_DEBUG_SCALERS
49 #define SDEBUGF DEBUGF
62 #include <jpeg_load.h>
64 #if CONFIG_CPU == SH7034
65 /* 16*16->32 bit multiplication is a single instrcution on the SH1 */
66 #define MULUQ(a, b) ((uint32_t) (((uint16_t) (a)) * ((uint16_t) (b))))
67 #define MULQ(a, b) ((int32_t) (((int16_t) (a)) * ((int16_t) (b))))
69 #define MULUQ(a, b) ((a) * (b))
70 #define MULQ(a, b) ((a) * (b))
73 /* calculate the maximum dimensions which will preserve the aspect ration of
74 src while fitting in the constraints passed in dst, and store result in dst,
75 returning 0 if rounding and 1 if not rounding.
77 int recalc_dimension(struct dim
*dst
, struct dim
*src
)
79 /* This only looks backwards. The input image size is being pre-scaled by
80 * the inverse of the pixel aspect ratio, so that once the size it scaled
81 * to meet the output constraints, the scaled image will have appropriate
84 int sw
= src
->width
* LCD_PIXEL_ASPECT_HEIGHT
;
85 int sh
= src
->height
* LCD_PIXEL_ASPECT_WIDTH
;
88 dst
->width
= LCD_WIDTH
;
90 dst
->height
= LCD_HEIGHT
;
92 if (dst
->width
> sw
|| dst
->height
> sh
)
97 if (sw
== dst
->width
&& sh
== dst
->height
)
100 tmp
= (sw
* dst
->height
+ (sh
>> 1)) / sh
;
101 if (tmp
> dst
->width
)
102 dst
->height
= (sh
* dst
->width
+ (sw
>> 1)) / sw
;
105 return src
->width
== dst
->width
&& src
->height
== dst
->height
;
108 /* All of these scalers use variations of Bresenham's algorithm to convert from
109 their input to output coordinates. The error value is shifted from the
110 "classic" version such that it is a useful input to the scaling calculation.
113 #ifdef HAVE_LCD_COLOR
114 /* dither + pack on channel of RGB565, R an B share a packing macro */
115 #define PACKRB(v, delta) ((31 * v + (v >> 3) + delta) >> 8)
116 #define PACKG(g, delta) ((63 * g + (g >> 2) + delta) >> 8)
119 /* read new img_part unconditionally, return false on failure */
120 #define FILL_BUF_INIT(img_part, store_part, args) { \
121 img_part = store_part(args); \
122 if (img_part == NULL) \
126 /* read new img_part if current one is empty, return false on failure */
127 #define FILL_BUF(img_part, store_part, args) { \
128 if (img_part->len == 0) \
129 img_part = store_part(args); \
130 if (img_part == NULL) \
134 #if defined(CPU_COLDFIRE)
135 #define MAC(op1, op2, num) \
137 "mac.l %0, %1, %%acc" #num \
139 : "%d" (op1), "d" (op2)\
141 #define MAC_OUT(dest, num) \
143 "movclr.l %%acc" #num ", %0" \
146 #elif defined(CPU_SH)
147 /* calculate the 32-bit product of unsigned 16-bit op1 and op2 */
148 static inline int32_t mul_s16_s16(int16_t op1
, int16_t op2
)
150 return (int32_t)(op1
* op2
);
153 /* calculate the 32-bit product of signed 16-bit op1 and op2 */
154 static inline uint32_t mul_u16_u16(uint16_t op1
, uint16_t op2
)
156 return (uint32_t)(op1
* op2
);
160 /* horizontal area average scaler */
161 static bool scale_h_area(void *out_line_ptr
,
162 struct scaler_context
*ctx
, bool accum
)
164 SDEBUGF("scale_h_area\n");
165 unsigned int ix
, ox
, oxe
, mul
;
166 #if defined(CPU_SH) || defined (TEST_SH_MATH)
167 const uint32_t h_i_val
= ctx
->src
->width
,
168 h_o_val
= ctx
->bm
->width
;
170 const uint32_t h_i_val
= ctx
->h_i_val
,
171 h_o_val
= ctx
->h_o_val
;
173 #ifdef HAVE_LCD_COLOR
174 struct uint32_rgb rgbvalacc
= { 0, 0, 0 },
175 rgbvaltmp
= { 0, 0, 0 },
176 *out_line
= (struct uint32_rgb
*)out_line_ptr
;
178 uint32_t acc
= 0, tmp
= 0, *out_line
= (uint32_t*)out_line_ptr
;
180 struct img_part
*part
;
181 FILL_BUF_INIT(part
,ctx
->store_part
,ctx
->args
);
185 /* give other tasks a chance to run */
187 for (ix
= 0; ix
< (unsigned int)ctx
->src
->width
; ix
++)
190 /* end of current area has been reached */
191 /* fill buffer if needed */
192 FILL_BUF(part
,ctx
->store_part
,ctx
->args
);
193 #ifdef HAVE_LCD_COLOR
196 /* "reset" error, which now represents partial coverage of next
197 pixel by the next area
201 #if defined(CPU_COLDFIRE)
202 /* Coldfire EMAC math */
203 /* add saved partial pixel from start of area */
204 MAC(rgbvalacc
.r
, h_o_val
, 0);
205 MAC(rgbvalacc
.g
, h_o_val
, 1);
206 MAC(rgbvalacc
.b
, h_o_val
, 2);
207 MAC(rgbvaltmp
.r
, mul
, 0);
208 MAC(rgbvaltmp
.g
, mul
, 1);
209 MAC(rgbvaltmp
.b
, mul
, 2);
210 /* get new pixel , then add its partial coverage to this area */
212 rgbvaltmp
.r
= part
->buf
->red
;
213 rgbvaltmp
.g
= part
->buf
->green
;
214 rgbvaltmp
.b
= part
->buf
->blue
;
215 MAC(rgbvaltmp
.r
, mul
, 0);
216 MAC(rgbvaltmp
.g
, mul
, 1);
217 MAC(rgbvaltmp
.b
, mul
, 2);
218 MAC_OUT(rgbvalacc
.r
, 0);
219 MAC_OUT(rgbvalacc
.g
, 1);
220 MAC_OUT(rgbvalacc
.b
, 2);
223 /* add saved partial pixel from start of area */
224 rgbvalacc
.r
= rgbvalacc
.r
* h_o_val
+ rgbvaltmp
.r
* mul
;
225 rgbvalacc
.g
= rgbvalacc
.g
* h_o_val
+ rgbvaltmp
.g
* mul
;
226 rgbvalacc
.b
= rgbvalacc
.b
* h_o_val
+ rgbvaltmp
.b
* mul
;
228 /* get new pixel , then add its partial coverage to this area */
229 rgbvaltmp
.r
= part
->buf
->red
;
230 rgbvaltmp
.g
= part
->buf
->green
;
231 rgbvaltmp
.b
= part
->buf
->blue
;
233 rgbvalacc
.r
+= rgbvaltmp
.r
* mul
;
234 rgbvalacc
.g
+= rgbvaltmp
.g
* mul
;
235 rgbvalacc
.b
+= rgbvaltmp
.b
* mul
;
237 rgbvalacc
.r
= (rgbvalacc
.r
+ (1 << 21)) >> 22;
238 rgbvalacc
.g
= (rgbvalacc
.g
+ (1 << 21)) >> 22;
239 rgbvalacc
.b
= (rgbvalacc
.b
+ (1 << 21)) >> 22;
240 /* store or accumulate to output row */
243 rgbvalacc
.r
+= out_line
[ox
].r
;
244 rgbvalacc
.g
+= out_line
[ox
].g
;
245 rgbvalacc
.b
+= out_line
[ox
].b
;
247 out_line
[ox
].r
= rgbvalacc
.r
;
248 out_line
[ox
].g
= rgbvalacc
.g
;
249 out_line
[ox
].b
= rgbvalacc
.b
;
250 /* reset accumulator */
258 /* add pixel value to accumulator */
259 rgbvalacc
.r
+= part
->buf
->red
;
260 rgbvalacc
.g
+= part
->buf
->green
;
261 rgbvalacc
.b
+= part
->buf
->blue
;
266 /* "reset" error, which now represents partial coverage of next
267 pixel by the next area
270 #if defined(CPU_COLDFIRE)
271 /* Coldfire EMAC math */
272 /* add saved partial pixel from start of area */
273 MAC(acc
, h_o_val
, 0);
275 /* get new pixel , then add its partial coverage to this area */
280 #elif defined(CPU_SH)
281 /* SH-1 16x16->32 math */
282 /* add saved partial pixel from start of area */
283 acc
= mul_u16_u16(acc
, h_o_val
) + mul_u16_u16(tmp
, mul
);
285 /* get new pixel , then add its partial coverage to this area */
288 acc
+= mul_u16_u16(tmp
, mul
);
291 /* add saved partial pixel from start of area */
292 acc
= (acc
* h_o_val
) + (tmp
* mul
);
294 /* get new pixel , then add its partial coverage to this area */
299 #if !(defined(CPU_SH) || defined(TEST_SH_MATH))
300 /* round, divide, and either store or accumulate to output row */
301 acc
= (acc
+ (1 << 21)) >> 22;
308 /* reset accumulator */
314 /* add pixel value to accumulator */
324 /* vertical area average scaler */
325 static inline bool scale_v_area(struct rowset
*rset
, struct scaler_context
*ctx
)
327 uint32_t mul
, oy
, iy
, oye
;
328 #if defined(CPU_SH) || defined (TEST_SH_MATH)
329 const uint32_t v_i_val
= ctx
->src
->height
,
330 v_o_val
= ctx
->bm
->height
;
332 const uint32_t v_i_val
= ctx
->v_i_val
,
333 v_o_val
= ctx
->v_o_val
;
336 /* Set up rounding and scale factors */
340 #ifdef HAVE_LCD_COLOR
341 uint32_t *rowacc
= (uint32_t *) ctx
->buf
,
342 *rowtmp
= rowacc
+ 3 * ctx
->bm
->width
,
343 *rowacc_px
, *rowtmp_px
;
344 memset((void *)ctx
->buf
, 0, ctx
->bm
->width
* 2 * sizeof(struct uint32_rgb
));
346 uint32_t *rowacc
= (uint32_t *) ctx
->buf
,
347 *rowtmp
= rowacc
+ ctx
->bm
->width
,
348 *rowacc_px
, *rowtmp_px
;
349 memset((void *)ctx
->buf
, 0, ctx
->bm
->width
* 2 * sizeof(uint32_t));
351 SDEBUGF("scale_v_area\n");
352 /* zero the accumulator and temp rows */
353 for (iy
= 0; iy
< (unsigned int)ctx
->src
->height
; iy
++)
356 /* end of current area has been reached */
359 /* "reset" error, which now represents partial coverage of the next
363 /* add stored partial row to accumulator */
364 for(rowacc_px
= rowacc
, rowtmp_px
= rowtmp
; rowacc_px
!= rowtmp
;
365 rowacc_px
++, rowtmp_px
++)
366 *rowacc_px
= *rowacc_px
* v_o_val
+ *rowtmp_px
* mul
;
367 /* store new scaled row in temp row */
368 if(!ctx
->h_scaler(rowtmp
, ctx
, false))
370 /* add partial coverage by new row to this area, then round and
374 for(rowacc_px
= rowacc
, rowtmp_px
= rowtmp
; rowacc_px
!= rowtmp
;
375 rowacc_px
++, rowtmp_px
++)
376 *rowacc_px
+= mul
* *rowtmp_px
;
377 ctx
->output_row(oy
, (void*)rowacc
, ctx
);
378 /* clear accumulator row, store partial coverage for next row */
379 #ifdef HAVE_LCD_COLOR
380 memset((void *)rowacc
, 0, ctx
->bm
->width
* sizeof(uint32_t) * 3);
382 memset((void *)rowacc
, 0, ctx
->bm
->width
* sizeof(uint32_t));
388 /* accumulate new scaled row to rowacc */
389 if (!ctx
->h_scaler(rowacc
, ctx
, true))
397 /* horizontal linear scaler */
398 static bool scale_h_linear(void *out_line_ptr
, struct scaler_context
*ctx
,
401 unsigned int ix
, ox
, ixe
;
402 #if defined(CPU_SH) || defined (TEST_SH_MATH)
403 const uint32_t h_i_val
= ctx
->src
->width
- 1,
404 h_o_val
= ctx
->bm
->width
- 1;
406 const uint32_t h_i_val
= ctx
->h_i_val
,
407 h_o_val
= ctx
->h_o_val
;
409 /* type x = x is an ugly hack for hiding an unitialized data warning. The
410 values are conditionally initialized before use, but other values are
411 set such that this will occur before these are used.
413 #ifdef HAVE_LCD_COLOR
414 struct uint32_rgb rgbval
=rgbval
, rgbinc
=rgbinc
,
415 *out_line
= (struct uint32_rgb
*)out_line_ptr
;
417 uint32_t val
=val
, inc
=inc
, *out_line
= (uint32_t*)out_line_ptr
;
419 struct img_part
*part
;
420 SDEBUGF("scale_h_linear\n");
421 FILL_BUF_INIT(part
,ctx
->store_part
,ctx
->args
);
423 /* The error is set so that values are initialized on the first pass. */
425 /* give other tasks a chance to run */
427 for (ox
= 0; ox
< (uint32_t)ctx
->bm
->width
; ox
++)
429 #ifdef HAVE_LCD_COLOR
432 /* Store the new "current" pixel value in rgbval, and the color
433 step value in rgbinc.
436 rgbinc
.r
= -(part
->buf
->red
);
437 rgbinc
.g
= -(part
->buf
->green
);
438 rgbinc
.b
= -(part
->buf
->blue
);
439 #if defined(CPU_COLDFIRE)
440 /* Coldfire EMAC math */
441 MAC(part
->buf
->red
, h_o_val
, 0);
442 MAC(part
->buf
->green
, h_o_val
, 1);
443 MAC(part
->buf
->blue
, h_o_val
, 2);
446 rgbval
.r
= (part
->buf
->red
) * h_o_val
;
447 rgbval
.g
= (part
->buf
->green
) * h_o_val
;
448 rgbval
.b
= (part
->buf
->blue
) * h_o_val
;
451 /* If this wasn't the last pixel, add the next one to rgbinc. */
452 if (LIKELY(ix
< (uint32_t)ctx
->src
->width
)) {
455 /* Fetch new pixels if needed */
456 FILL_BUF(part
,ctx
->store_part
,ctx
->args
);
457 rgbinc
.r
+= part
->buf
->red
;
458 rgbinc
.g
+= part
->buf
->green
;
459 rgbinc
.b
+= part
->buf
->blue
;
460 /* Add a partial step to rgbval, in this pixel isn't precisely
461 aligned with the new source pixel
463 #if defined(CPU_COLDFIRE)
464 /* Coldfire EMAC math */
465 MAC(rgbinc
.r
, ixe
, 0);
466 MAC(rgbinc
.g
, ixe
, 1);
467 MAC(rgbinc
.b
, ixe
, 2);
470 rgbval
.r
+= rgbinc
.r
* ixe
;
471 rgbval
.g
+= rgbinc
.g
* ixe
;
472 rgbval
.b
+= rgbinc
.b
* ixe
;
475 #if defined(CPU_COLDFIRE)
476 /* get final EMAC result out of ACC registers */
477 MAC_OUT(rgbval
.r
, 0);
478 MAC_OUT(rgbval
.g
, 1);
479 MAC_OUT(rgbval
.b
, 2);
481 /* Now multiply the color increment to its proper value */
486 rgbval
.r
+= rgbinc
.r
;
487 rgbval
.g
+= rgbinc
.g
;
488 rgbval
.b
+= rgbinc
.b
;
490 /* round and scale values, and accumulate or store to output */
493 out_line
[ox
].r
+= (rgbval
.r
+ (1 << 21)) >> 22;
494 out_line
[ox
].g
+= (rgbval
.g
+ (1 << 21)) >> 22;
495 out_line
[ox
].b
+= (rgbval
.b
+ (1 << 21)) >> 22;
497 out_line
[ox
].r
= (rgbval
.r
+ (1 << 21)) >> 22;
498 out_line
[ox
].g
= (rgbval
.g
+ (1 << 21)) >> 22;
499 out_line
[ox
].b
= (rgbval
.b
+ (1 << 21)) >> 22;
504 /* Store the new "current" pixel value in rgbval, and the color
505 step value in rgbinc.
510 #if defined(CPU_COLDFIRE)
511 /* Coldfire EMAC math */
512 MAC(val
, h_o_val
, 0);
513 #elif defined(CPU_SH)
514 /* SH-1 16x16->32 math */
515 val
= mul_u16_u16(val
, h_o_val
);
521 /* If this wasn't the last pixel, add the next one to rgbinc. */
522 if (LIKELY(ix
< (uint32_t)ctx
->src
->width
)) {
525 /* Fetch new pixels if needed */
526 FILL_BUF(part
,ctx
->store_part
,ctx
->args
);
528 /* Add a partial step to rgbval, in this pixel isn't precisely
529 aligned with the new source pixel
531 #if defined(CPU_COLDFIRE)
532 /* Coldfire EMAC math */
534 #elif defined(CPU_SH)
535 /* SH-1 16x16->32 math */
536 val
+= mul_s16_s16(inc
, ixe
);
542 #if defined(CPU_COLDFIRE)
543 /* get final EMAC result out of ACC register */
546 /* Now multiply the color increment to its proper value */
548 /* SH-1 16x16->32 math */
549 inc
= mul_s16_s16(inc
, h_i_val
);
556 #if !(defined(CPU_SH) || defined(TEST_SH_MATH))
557 /* round and scale values, and accumulate or store to output */
560 out_line
[ox
] += (val
+ (1 << 21)) >> 22;
562 out_line
[ox
] = (val
+ (1 << 21)) >> 22;
565 /* round and scale values, and accumulate or store to output */
579 /* vertical linear scaler */
580 static inline bool scale_v_linear(struct rowset
*rset
,
581 struct scaler_context
*ctx
)
583 uint32_t mul
, iy
, iye
;
585 #if defined(CPU_SH) || defined (TEST_SH_MATH)
586 const uint32_t v_i_val
= ctx
->src
->height
- 1,
587 v_o_val
= ctx
->bm
->height
- 1;
589 const uint32_t v_i_val
= ctx
->v_i_val
,
590 v_o_val
= ctx
->v_o_val
;
592 /* Set up our buffers, to store the increment and current value for each
593 column, and one temp buffer used to read in new rows.
595 #ifdef HAVE_LCD_COLOR
596 uint32_t *rowinc
= (uint32_t *)(ctx
->buf
),
597 *rowval
= rowinc
+ 3 * ctx
->bm
->width
,
598 *rowtmp
= rowval
+ 3 * ctx
->bm
->width
,
600 uint32_t *rowinc
= (uint32_t *)(ctx
->buf
),
601 *rowval
= rowinc
+ ctx
->bm
->width
,
602 *rowtmp
= rowval
+ ctx
->bm
->width
,
604 *rowinc_px
, *rowval_px
, *rowtmp_px
;
606 SDEBUGF("scale_v_linear\n");
610 /* get first scaled row in rowtmp */
611 if(!ctx
->h_scaler((void*)rowtmp
, ctx
, false))
613 for (oy
= rset
->rowstart
; oy
!= rset
->rowstop
; oy
+= rset
->rowstep
)
619 for(rowinc_px
= rowinc
, rowtmp_px
= rowtmp
, rowval_px
= rowval
;
620 rowinc_px
< rowval
; rowinc_px
++, rowtmp_px
++, rowval_px
++)
622 *rowinc_px
= -*rowtmp_px
;
623 *rowval_px
= *rowtmp_px
* v_o_val
;
625 if (iy
< (uint32_t)ctx
->src
->height
)
627 if (!ctx
->h_scaler((void*)rowtmp
, ctx
, false))
629 for(rowinc_px
= rowinc
, rowtmp_px
= rowtmp
, rowval_px
= rowval
;
630 rowinc_px
< rowval
; rowinc_px
++, rowtmp_px
++, rowval_px
++)
632 *rowinc_px
+= *rowtmp_px
;
633 *rowval_px
+= *rowinc_px
* iye
;
634 *rowinc_px
*= v_i_val
;
638 for(rowinc_px
= rowinc
, rowval_px
= rowval
; rowinc_px
< rowval
;
639 rowinc_px
++, rowval_px
++)
640 *rowval_px
+= *rowinc_px
;
641 ctx
->output_row(oy
, (void*)rowval
, ctx
);
646 #endif /* HAVE_UPSCALER */
648 #if defined(HAVE_LCD_COLOR) && (defined(HAVE_JPEG) || defined(PLUGIN))
649 static void output_row_32_native_fromyuv(uint32_t row
, void * row_in
,
650 struct scaler_context
*ctx
)
652 #if defined(LCD_STRIDEFORMAT) && LCD_STRIDEFORMAT == VERTICAL_STRIDE
653 #define DEST_STEP (ctx->bm->height)
656 #define DEST_STEP (1)
657 #define Y_STEP (BM_WIDTH(ctx->bm->width,FORMAT_NATIVE,0))
661 uint8_t dy
= DITHERY(row
);
662 struct uint32_rgb
*qp
= (struct uint32_rgb
*)row_in
;
663 SDEBUGF("output_row: y: %lu in: %p\n",row
, row_in
);
664 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ Y_STEP
* row
;
666 unsigned r
, g
, b
, y
, u
, v
;
668 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
670 delta
= DITHERXDY(col
,dy
);
671 y
= SC_OUT(qp
->b
, ctx
);
672 u
= SC_OUT(qp
->g
, ctx
);
673 v
= SC_OUT(qp
->r
, ctx
);
675 yuv_to_rgb(y
, u
, v
, &r
, &g
, &b
);
676 r
= (31 * r
+ (r
>> 3) + delta
) >> 8;
677 g
= (63 * g
+ (g
>> 2) + delta
) >> 8;
678 b
= (31 * b
+ (b
>> 3) + delta
) >> 8;
679 *dest
= LCD_RGBPACK_LCD(r
, g
, b
);
685 #if !defined(PLUGIN) || LCD_DEPTH > 1
686 static void output_row_32_native(uint32_t row
, void * row_in
,
687 struct scaler_context
*ctx
)
690 int fb_width
= BM_WIDTH(ctx
->bm
->width
,FORMAT_NATIVE
,0);
691 uint8_t dy
= DITHERY(row
);
692 #ifdef HAVE_LCD_COLOR
693 struct uint32_rgb
*qp
= (struct uint32_rgb
*)row_in
;
695 uint32_t *qp
= (uint32_t*)row_in
;
697 SDEBUGF("output_row: y: %lu in: %p\n",row
, row_in
);
699 #if LCD_PIXELFORMAT == HORIZONTAL_PACKING
700 /* greyscale iPods */
701 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ fb_width
* row
;
707 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
709 delta
= DITHERXDY(col
,dy
);
710 bright
= SC_OUT(*qp
++, ctx
);
711 bright
= (3 * bright
+ (bright
>> 6) + delta
) >> 8;
712 data
|= (~bright
& 3) << shift
;
722 #elif LCD_PIXELFORMAT == VERTICAL_PACKING
724 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ fb_width
*
726 int shift
= 2 * (row
& 3);
730 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
732 delta
= DITHERXDY(col
,dy
);
733 bright
= SC_OUT(*qp
++, ctx
);
734 bright
= (3 * bright
+ (bright
>> 6) + delta
) >> 8;
735 *dest
++ |= (~bright
& 3) << shift
;
737 #elif LCD_PIXELFORMAT == VERTICAL_INTERLEAVED
739 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ fb_width
*
745 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
747 delta
= DITHERXDY(col
,dy
);
748 bright
= SC_OUT(*qp
++, ctx
);
749 bright
= (3 * bright
+ (bright
>> 6) + delta
) >> 8;
750 *dest
++ |= vi_pattern
[bright
] << shift
;
752 #endif /* LCD_PIXELFORMAT */
753 #elif LCD_DEPTH == 16
755 #if defined(LCD_STRIDEFORMAT) && LCD_STRIDEFORMAT == VERTICAL_STRIDE
758 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ row
;
761 struct uint32_rgb q0
;
763 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
765 delta
= DITHERXDY(col
,dy
);
767 r
= SC_OUT(q0
.r
, ctx
);
768 g
= SC_OUT(q0
.g
, ctx
);
769 b
= SC_OUT(q0
.b
, ctx
);
770 r
= (31 * r
+ (r
>> 3) + delta
) >> 8;
771 g
= (63 * g
+ (g
>> 2) + delta
) >> 8;
772 b
= (31 * b
+ (b
>> 3) + delta
) >> 8;
773 *dest
= LCD_RGBPACK_LCD(r
, g
, b
);
774 dest
+= ctx
->bm
->height
;
777 /* iriver h300, colour iPods, X5 */
778 fb_data
*dest
= (fb_data
*)ctx
->bm
->data
+ fb_width
* row
;
781 struct uint32_rgb q0
;
783 for (col
= 0; col
< ctx
->bm
->width
; col
++) {
785 delta
= DITHERXDY(col
,dy
);
787 r
= SC_OUT(q0
.r
, ctx
);
788 g
= SC_OUT(q0
.g
, ctx
);
789 b
= SC_OUT(q0
.b
, ctx
);
790 r
= (31 * r
+ (r
>> 3) + delta
) >> 8;
791 g
= (63 * g
+ (g
>> 2) + delta
) >> 8;
792 b
= (31 * b
+ (b
>> 3) + delta
) >> 8;
793 *dest
++ = LCD_RGBPACK_LCD(r
, g
, b
);
797 #endif /* LCD_DEPTH */
801 #if defined(PLUGIN) && LCD_DEPTH > 1
802 unsigned int get_size_native(struct bitmap
*bm
)
804 return BM_SIZE(bm
->width
,bm
->height
,FORMAT_NATIVE
,0);
807 const struct custom_format format_native
= {
808 .output_row_8
= output_row_8_native
,
809 #if defined(HAVE_LCD_COLOR) && (defined(HAVE_JPEG) || defined(PLUGIN))
811 output_row_32_native
,
812 output_row_32_native_fromyuv
815 .output_row_32
= output_row_32_native
,
817 .get_size
= get_size_native
821 int resize_on_load(struct bitmap
*bm
, bool dither
, struct dim
*src
,
822 struct rowset
*rset
, unsigned char *buf
, unsigned int len
,
823 const struct custom_format
*format
,
824 IF_PIX_FMT(int format_index
,)
825 struct img_part
* (*store_part
)(void *args
),
828 const int sw
= src
->width
;
829 const int sh
= src
->height
;
830 const int dw
= bm
->width
;
831 const int dh
= bm
->height
;
833 #ifdef HAVE_LCD_COLOR
834 unsigned int needed
= sizeof(struct uint32_rgb
) * 3 * bm
->width
;
836 unsigned int needed
= sizeof(uint32_t) * 3 * bm
->width
;
838 #if MAX_SC_STACK_ALLOC
839 uint8_t sc_buf
[(needed
<= len
|| needed
> MAX_SC_STACK_ALLOC
) ?
842 ALIGN_BUFFER(buf
, len
, sizeof(uint32_t));
845 #if MAX_SC_STACK_ALLOC
846 if (needed
> MAX_SC_STACK_ALLOC
)
848 DEBUGF("unable to allocate required buffer: %d needed, "
849 "%d available, %d permitted from stack\n",
850 needed
, len
, MAX_SC_STACK_ALLOC
);
853 if (sizeof(sc_buf
) < needed
)
855 DEBUGF("failed to allocate large enough buffer on stack: "
856 "%d needed, only got %d",
857 needed
, MAX_SC_STACK_ALLOC
);
861 DEBUGF("unable to allocate required buffer: %d needed, "
862 "%d available\n", needed
, len
);
867 struct scaler_context ctx
;
868 #ifdef HAVE_ADJUSTABLE_CPU_FREQ
871 ctx
.store_part
= store_part
;
873 #if MAX_SC_STACK_ALLOC
874 ctx
.buf
= needed
> len
? sc_buf
: buf
;
882 #if defined(CPU_SH) || defined (TEST_SH_MATH)
886 #if defined(HAVE_LCD_COLOR) && defined(HAVE_JPEG)
887 ctx
.output_row
= format_index
? output_row_32_native_fromyuv
888 : output_row_32_native
;
890 ctx
.output_row
= output_row_32_native
;
894 #ifdef HAVE_LCD_COLOR
895 ctx
.output_row
= format
->output_row_32
[format_index
];
897 ctx
.output_row
= format
->output_row_32
;
903 ctx
.h_scaler
= scale_h_area
;
904 #if defined(CPU_SH) || defined (TEST_SH_MATH)
907 uint32_t h_div
= (1U << 24) / sw
;
908 ctx
.h_i_val
= sw
* h_div
;
909 ctx
.h_o_val
= dw
* h_div
;
913 ctx
.h_scaler
= scale_h_linear
;
914 #if defined(CPU_SH) || defined (TEST_SH_MATH)
917 uint32_t h_div
= (1U << 24) / (dw
- 1);
918 ctx
.h_i_val
= (sw
- 1) * h_div
;
919 ctx
.h_o_val
= (dw
- 1) * h_div
;
924 unsigned old_macsr
= coldfire_get_macsr();
925 coldfire_set_macsr(EMAC_UNSIGNED
);
931 #if defined(CPU_SH) || defined (TEST_SH_MATH)
933 ctx
.recip
= ((uint32_t)(-div
)) / div
+ 1;
935 uint32_t v_div
= (1U << 22) / sh
;
936 ctx
.v_i_val
= sh
* v_div
;
937 ctx
.v_o_val
= dh
* v_div
;
939 ret
= scale_v_area(rset
, &ctx
);
944 #if defined(CPU_SH) || defined (TEST_SH_MATH)
946 ctx
.recip
= ((uint32_t)(-div
)) / div
+ 1;
948 uint32_t v_div
= (1U << 22) / dh
;
949 ctx
.v_i_val
= (sh
- 1) * v_div
;
950 ctx
.v_o_val
= (dh
- 1) * v_div
;
952 ret
= scale_v_linear(rset
, &ctx
);
956 /* Restore emac status; other modules like tone control filter
957 * calculation may rely on it. */
958 coldfire_set_macsr(old_macsr
);
960 #ifdef HAVE_ADJUSTABLE_CPU_FREQ