1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2007 by Michael Sevakis
12 * ARM code for memory framebuffer LCDs
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
22 ****************************************************************************/
27 /****************************************************************************
28 * void lcd_copy_buffer_rect(fb_data *dst, fb_data *src, int width,
31 .section .icode, "ax", %progbits
33 .global lcd_copy_buffer_rect
34 .type lcd_copy_buffer_rect, %function
39 lcd_copy_buffer_rect: @
40 stmfd sp!, { r4-r11, lr } @ save non-scratch regs
41 mov r5, r2 @ r5 = cached width
42 rsb r4, r2, #LCD_WIDTH @ r4 = LCD_WIDTH - width
44 subs r2, r5, #1 @ r2 = width - 1
45 beq 40f @ finish line @ one halfword? skip to trailing copy
46 tst r0, #2 @ word aligned?
47 beq 20f @ rem copy @ yes? skip to word copy
48 ldrh r6, [r1], #2 @ copy leading halfword
51 ble 40f @ finish line @ next line if lt or finish
52 @ trailing halfword if eq
54 add r14, r2, #1 @ get remaining width mod 16 after word
56 and r14, r14, #0xe @ r14 = 0 (16), 2, 4, 6, 8, 10, 12, 14
57 add pc, pc, r14, lsl #3 @ branch to 32-byte align
59 b 30f @ rw % 16 = 0 or 1? use octword loop
63 ldr r6, [r1], #4 @ rw % 16 = 2 or 3
66 b 25f @ copy up done @
67 ldmia r1!, { r6-r7 } @ rw % 16 = 4 or 5
69 stmia r0!, { r6-r7 } @
70 b 25f @ copy up done @
71 ldmia r1!, { r6-r8 } @ rw % 16 = 6 or 7
73 stmia r0!, { r6-r8 } @
74 b 25f @ copy up done @
75 ldmia r1!, { r6-r9 } @ rw % 16 = 8 or 9
77 stmia r0!, { r6-r9 } @
78 b 25f @ copy up done @
79 ldmia r1!, { r6-r10 } @ rw % 16 = 10 or 11
81 stmia r0!, { r6-r10 } @
82 b 25f @ copy up done @
83 ldmia r1!, { r6-r11 } @ rw % 16 = 12 or 13
85 stmia r0!, { r6-r11 } @
86 b 25f @ copy up done @
87 ldmia r1!, { r6-r12 } @ rw % 16 = 14 or 15
89 stmia r0!, { r6-r12 } @
91 ble 40f @ finish line @ no 32-byte segments remaining?
92 30: @ octword loop @ copy 16 pixels per loop
93 ldmia r1!, { r6-r12, r14 } @
95 stmia r0!, { r6-r12, r14 } @
96 bgt 30b @ octword loop @
98 ldreqh r6, [r1], #2 @ finish last halfword if eq ...
99 add r1, r1, r4, lsl #1 @
100 streqh r6, [r0], #2 @ ...
101 add r0, r0, r4, lsl #1 @
102 subs r3, r3, #1 @ next line
103 bgt 10b @ copy line @
104 ldmfd sp!, { r4-r11, pc } @ restore regs and return
105 .ltorg @ dump constant pool
106 .size lcd_copy_buffer_rect, .-lcd_copy_buffer_rect
108 /****************************************************************************
109 * void lcd_write_yuv_420_lines(fb_data *dst,
110 * unsigned char const * const src[3],
114 * |R| |1.000000 -0.000001 1.402000| |Y'|
115 * |G| = |1.000000 -0.334136 -0.714136| |Pb|
116 * |B| |1.000000 1.772000 0.000000| |Pr|
117 * Scaled, normalized, rounded and tweaked to yield RGB 565:
118 * |R| |74 0 101| |Y' - 16| >> 9
119 * |G| = |74 -24 -51| |Cb - 128| >> 8
120 * |B| |74 128 0| |Cr - 128| >> 9
122 * Write four RGB565 pixels in the following order on each loop:
126 .section .icode, "ax", %progbits
128 .global lcd_write_yuv420_lines
129 .type lcd_write_yuv420_lines, %function
130 lcd_write_yuv420_lines:
135 stmfd sp!, { r4-r10, lr } @ save non-scratch
136 ldmia r1, { r4, r5, r6 } @ r4 = yuv_src[0] = Y'_p
137 @ r5 = yuv_src[1] = Cb_p
138 @ r6 = yuv_src[2] = Cr_p
142 ldrb r7, [r4], #1 @ r7 = *Y'_p++;
143 ldrb r8, [r5], #1 @ r8 = *Cb_p++;
144 ldrb r9, [r6], #1 @ r9 = *Cr_p++;
146 sub r7, r7, #16 @ r7 = Y = (Y' - 16)*74
147 add r12, r7, r7, asl #2 @ actually (Y' - 16)*37 and shift right
148 add r7, r12, r7, asl #5 @ by one less when adding - same for all
150 sub r8, r8, #128 @ Cb -= 128
151 sub r9, r9, #128 @ Cr -= 128
153 add r10, r9, r9, asl #1 @ r10 = Cr*51 + Cb*24
154 add r10, r10, r10, asl #4 @
155 add r10, r10, r8, asl #3 @
156 add r10, r10, r8, asl #4 @
158 add lr, r9, r9, asl #2 @ r9 = Cr*101
159 add lr, lr, r9, asl #5 @
160 add r9, lr, r9, asl #6 @
162 add r8, r8, #2 @ r8 = bu = (Cb*128 + 128) >> 8
164 add r9, r9, #256 @ r9 = rv = (r9 + 256) >> 9
166 rsb r10, r10, #128 @ r10 = guv = (-r10 + 128) >> 8
167 mov r10, r10, asr #8 @
168 @ compute R, G, and B
169 add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu
170 add lr, r9, r7, asr #8 @ lr = r = (Y >> 9) + rv
171 add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv
174 usat r1, #5, r1 @ clamp b
175 usat lr, #5, lr @ clamp r
176 usat r7, #6, r7 @ clamp g
178 orr r12, r1, lr @ check if clamping is needed...
179 orr r12, r12, r7, asr #1 @ ...at all
182 cmp r1, #31 @ clamp b
183 mvnhi r1, r1, asr #31 @
185 cmp lr, #31 @ clamp r
186 mvnhi lr, lr, asr #31 @
188 cmp r7, #63 @ clamp g
189 mvnhi r7, r7, asr #31 @
194 ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride)
196 orr r1, r1, r7, lsl #5 @ r4 |= (g << 5)
197 orr r1, r1, lr, lsl #11 @ r4 = b | (r << 11)
199 #if LCD_WIDTH >= LCD_HEIGHT
201 #elif LCD_WIDTH < 256
202 strh r1, [r0], #LCD_WIDTH @ store pixel
207 sub r7, r12, #16 @ r7 = Y = (Y' - 16)*74
208 add r12, r7, r7, asl #2 @
209 add r7, r12, r7, asl #5 @
210 @ compute R, G, and B
211 add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu
212 add lr, r9, r7, asr #8 @ lr = r = (Y >> 9) + rv
213 add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv
216 usat r1, #5, r1 @ clamp b
217 usat lr, #5, lr @ clamp r
218 usat r7, #6, r7 @ clamp g
220 orr r12, r1, lr @ check if clamping is needed...
221 orr r12, r12, r7, asr #1 @ ...at all
224 cmp r1, #31 @ clamp b
225 mvnhi r1, r1, asr #31 @
227 cmp lr, #31 @ clamp r
228 mvnhi lr, lr, asr #31 @
230 cmp r7, #63 @ clamp g
231 mvnhi r7, r7, asr #31 @
236 ldrb r12, [r4], #1 @ r12 = Y' = *(Y'_p++)
238 orr r1, r1, lr, lsl #11 @ r1 = b | (r << 11)
239 orr r1, r1, r7, lsl #5 @ r1 |= (g << 5)
241 #if LCD_WIDTH >= LCD_HEIGHT
242 add r0, r0, #2*LCD_WIDTH @
243 strh r1, [r0] @ store pixel
244 sub r0, r0, #2*LCD_WIDTH @
245 #elif LCD_WIDTH < 256
246 strh r1, [r0, #-LCD_WIDTH-2] @ store pixel
249 add r0, r0, #LCD_WIDTH @
252 sub r7, r12, #16 @ r7 = Y = (Y' - 16)*74
253 add r12, r7, r7, asl #2 @
254 add r7, r12, r7, asl #5 @
255 @ compute R, G, and B
256 add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu
257 add lr, r9, r7, asr #8 @ lr = r = (Y >> 9) + rv
258 add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv
261 usat r1, #5, r1 @ clamp b
262 usat lr, #5, lr @ clamp r
263 usat r7, #6, r7 @ clamp g
265 orr r12, r1, lr @ check if clamping is needed...
266 orr r12, r12, r7, asr #1 @ ...at all
269 cmp r1, #31 @ clamp b
270 mvnhi r1, r1, asr #31 @
272 cmp lr, #31 @ clamp r
273 mvnhi lr, lr, asr #31 @
275 cmp r7, #63 @ clamp g
276 mvnhi r7, r7, asr #31 @
281 ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride)
283 orr r1, r1, r7, lsl #5 @ r1 = b | (g << 5)
284 orr r1, r1, lr, lsl #11 @ r1 |= (r << 11)
286 #if LCD_WIDTH >= LCD_HEIGHT
288 #elif LCD_WIDTH < 256
289 strh r1, [r0, #LCD_WIDTH]! @ store pixel
294 sub r7, r12, #16 @ r7 = Y = (Y' - 16)*74
295 add r12, r7, r7, asl #2 @
296 add r7, r12, r7, asl #5 @
297 @ compute R, G, and B
298 add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu
299 add lr, r9, r7, asr #8 @ lr = r = (Y >> 9) + rv
300 add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv
303 usat r1, #5, r1 @ clamp b
304 usat lr, #5, lr @ clamp r
305 usat r7, #6, r7 @ clamp g
307 orr r12, r1, lr @ check if clamping is needed...
308 orr r12, r12, r7, asr #1 @ ...at all
311 cmp r1, #31 @ clamp b
312 mvnhi r1, r1, asr #31 @
314 cmp lr, #31 @ clamp r
315 mvnhi lr, lr, asr #31 @
317 cmp r7, #63 @ clamp g
318 mvnhi r7, r7, asr #31 @
323 orr r12, r1, lr, lsl #11 @ r12 = b | (r << 11)
324 orr r12, r12, r7, lsl #5 @ r12 |= (g << 5)
326 #if LCD_WIDTH >= LCD_HEIGHT
327 add r0, r0, #2*LCD_WIDTH
330 sub r0, r0, #(2*LCD_WIDTH)-4
332 sub r0, r0, #(2*LCD_WIDTH)
336 strh r12, [r0, #-2] @ store pixel
338 add r0, r0, #2*LCD_WIDTH @
340 add r0, r0, #LCD_WIDTH @
344 subs r2, r2, #2 @ subtract block from width
345 bgt 10b @ loop line @
347 ldmfd sp!, { r4-r10, pc } @ restore registers and return
348 .ltorg @ dump constant pool
349 .size lcd_write_yuv420_lines, .-lcd_write_yuv420_lines
352 /****************************************************************************
353 * void lcd_write_yuv_420_lines_odither(fb_data *dst,
354 * unsigned char const * const src[3],
360 * |R| |1.000000 -0.000001 1.402000| |Y'|
361 * |G| = |1.000000 -0.334136 -0.714136| |Pb|
362 * |B| |1.000000 1.772000 0.000000| |Pr|
363 * Red scaled at twice g & b but at same precision to place it in correct
364 * bit position after multiply and leave instruction count lower.
365 * |R| |258 0 408| |Y' - 16|
366 * |G| = |149 -49 -104| |Cb - 128|
367 * |B| |149 258 0| |Cr - 128|
369 * Write four RGB565 pixels in the following order on each loop:
373 * Kernel pattern (raw|rotated|use order):
374 * 5 3 4 2 2 6 3 7 row0 row2 > down
375 * 1 7 0 6 | 4 0 5 1 | 2 4 6 0 3 5 7 1 col0 left
376 * 4 2 5 3 | 3 7 2 6 | 3 5 7 1 2 4 6 0 col2 \/
379 .section .icode, "ax", %progbits
381 .global lcd_write_yuv420_lines_odither
382 .type lcd_write_yuv420_lines_odither, %function
383 lcd_write_yuv420_lines_odither:
390 stmfd sp!, { r4-r11, lr } @ save non-scratch
391 ldmia r1, { r4, r5, r6 } @ r4 = yuv_src[0] = Y'_p
392 @ r5 = yuv_src[1] = Cb_p
393 @ r6 = yuv_src[2] = Cr_p
396 add r1, sp, #40 @ Line up pattern and kernel quadrant
397 ldmia r1, { r12, r14 } @
400 mov r14, r14, lsl #6 @ 0x00 or 0x80
403 ldrb r7, [r4], #1 @ r7 = *Y'_p++;
404 ldrb r8, [r5], #1 @ r8 = *Cb_p++;
405 ldrb r9, [r6], #1 @ r9 = *Cr_p++;
407 eor r14, r14, #0x80 @ flip pattern quadrant
409 sub r7, r7, #16 @ r7 = Y = (Y' - 16)*149
410 add r12, r7, r7, asl #2 @
411 add r12, r12, r12, asl #4 @
412 add r7, r12, r7, asl #6 @
414 sub r8, r8, #128 @ Cb -= 128
415 sub r9, r9, #128 @ Cr -= 128
417 add r10, r8, r8, asl #4 @ r10 = guv = Cr*104 + Cb*49
418 add r10, r10, r8, asl #5 @
419 add r10, r10, r9, asl #3 @
420 add r10, r10, r9, asl #5 @
421 add r10, r10, r9, asl #6 @
423 mov r8, r8, asl #1 @ r8 = bu = Cb*258
424 add r8, r8, r8, asl #7 @
426 add r9, r9, r9, asl #1 @ r9 = rv = Cr*408
427 add r9, r9, r9, asl #4 @
430 @ compute R, G, and B
431 add r1, r8, r7 @ r1 = b' = Y + bu
432 add r11, r9, r7, asl #1 @ r11 = r' = Y*2 + rv
433 rsb r7, r10, r7 @ r7 = g' = Y + guv
435 @ r8 = bu, r9 = rv, r10 = guv
437 sub r12, r1, r1, lsr #5 @ r1 = 31/32*b + b/256
438 add r1, r12, r1, lsr #8 @
440 sub r12, r11, r11, lsr #5 @ r11 = 31/32*r + r/256
441 add r11, r12, r11, lsr #8 @
443 sub r12, r7, r7, lsr #6 @ r7 = 63/64*g + g/256
444 add r7, r12, r7, lsr #8 @
446 add r12, r14, #0x100 @
448 add r1, r1, r12 @ b = r1 + delta
449 add r11, r11, r12, lsl #1 @ r = r11 + delta*2
450 add r7, r7, r12, lsr #1 @ g = r7 + delta/2
453 usat r11, #5, r11, asr #11 @ clamp r
454 usat r7, #6, r7, asr #9 @ clamp g
455 usat r1, #5, r1, asr #10 @ clamp b
457 ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride)
459 orr r1, r1, r11, lsl #11 @ r1 = b | (r << 11)
460 orr r1, r1, r7, lsl #5 @ r1 |= (g << 5)
462 orr r12, r1, r11, asr #1 @ check if clamping is needed...
463 orr r12, r12, r7 @ ...at all
464 movs r12, r12, asr #15 @
466 movs r12, r1, asr #15 @ clamp b
467 mvnne r1, r12, lsr #15 @
468 andne r1, r1, #0x7c00 @ mask b only if clamped
469 movs r12, r11, asr #16 @ clamp r
470 mvnne r11, r12, lsr #16 @
471 movs r12, r7, asr #15 @ clamp g
472 mvnne r7, r12, lsr #15 @
475 ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride)
477 and r11, r11, #0xf800 @ pack pixel
478 and r7, r7, #0x7e00 @ r1 = pixel = (r & 0xf800) |
479 orr r11, r11, r7, lsr #4 @ ((g & 0x7e00) >> 4) |
480 orr r1, r11, r1, lsr #10 @ (b >> 10)
483 #if LCD_WIDTH >= LCD_HEIGHT
485 #elif LCD_WIDTH < 256
486 strh r1, [r0], #LCD_WIDTH @ store pixel
491 sub r7, r12, #16 @ r7 = Y = (Y' - 16)*149
492 add r12, r7, r7, asl #2 @
493 add r12, r12, r12, asl #4 @
494 add r7, r12, r7, asl #6 @
495 @ compute R, G, and B
496 add r1, r8, r7 @ r1 = b' = Y + bu
497 add r11, r9, r7, asl #1 @ r11 = r' = Y*2 + rv
498 rsb r7, r10, r7 @ r7 = g' = Y + guv
500 sub r12, r1, r1, lsr #5 @ r1 = 31/32*b' + b'/256
501 add r1, r12, r1, lsr #8 @
503 sub r12, r11, r11, lsr #5 @ r11 = 31/32*r' + r'/256
504 add r11, r12, r11, lsr #8 @
506 sub r12, r7, r7, lsr #6 @ r7 = 63/64*g' + g'/256
507 add r7, r12, r7, lsr #8 @
509 add r12, r14, #0x200 @
511 add r1, r1, r12 @ b = r1 + delta
512 add r11, r11, r12, lsl #1 @ r = r11 + delta*2
513 add r7, r7, r12, lsr #1 @ g = r7 + delta/2
516 usat r11, #5, r11, asr #11 @ clamp r
517 usat r7, #6, r7, asr #9 @ clamp g
518 usat r1, #5, r1, asr #10 @ clamp b
520 ldrb r12, [r4], #1 @ r12 = Y' = *(Y'_p++)
522 orr r1, r1, r11, lsl #11 @ r1 = b | (r << 11)
523 orr r1, r1, r7, lsl #5 @ r1 |= (g << 5)
525 orr r12, r1, r11, asr #1 @ check if clamping is needed...
526 orr r12, r12, r7 @ ...at all
527 movs r12, r12, asr #15 @
529 movs r12, r1, asr #15 @ clamp b
530 mvnne r1, r12, lsr #15 @
531 andne r1, r1, #0x7c00 @ mask b only if clamped
532 movs r12, r11, asr #16 @ clamp r
533 mvnne r11, r12, lsr #16 @
534 movs r12, r7, asr #15 @ clamp g
535 mvnne r7, r12, lsr #15 @
538 ldrb r12, [r4], #1 @ r12 = Y' = *(Y'_p++)
540 and r11, r11, #0xf800 @ pack pixel
541 and r7, r7, #0x7e00 @ r1 = pixel = (r & 0xf800) |
542 orr r11, r11, r7, lsr #4 @ ((g & 0x7e00) >> 4) |
543 orr r1, r11, r1, lsr #10 @ (b >> 10)
546 #if LCD_WIDTH >= LCD_HEIGHT
547 add r0, r0, #2*LCD_WIDTH @
548 strh r1, [r0] @ store pixel
549 sub r0, r0, #2*LCD_WIDTH @
550 #elif LCD_WIDTH < 256
551 strh r1, [r0, #-LCD_WIDTH-2] @ store pixel
553 strh r1, [r0, #-2] @ store pixel
554 add r0, r0, #LCD_WIDTH @
557 sub r7, r12, #16 @ r7 = Y = (Y' - 16)*149
558 add r12, r7, r7, asl #2 @
559 add r12, r12, r12, asl #4 @
560 add r7, r12, r7, asl #6 @
561 @ compute R, G, and B
562 add r1, r8, r7 @ r1 = b' = Y + bu
563 add r11, r9, r7, asl #1 @ r11 = r' = Y*2 + rv
564 rsb r7, r10, r7 @ r7 = g' = Y + guv
566 @ r8 = bu, r9 = rv, r10 = guv
568 sub r12, r1, r1, lsr #5 @ r1 = 31/32*b' + b'/256
569 add r1, r12, r1, lsr #8 @
571 sub r12, r11, r11, lsr #5 @ r11 = 31/32*r' + r'/256
572 add r11, r12, r11, lsr #8 @
574 sub r12, r7, r7, lsr #6 @ r7 = 63/64*g' + g'/256
575 add r7, r12, r7, lsr #8 @
577 add r12, r14, #0x300 @
579 add r1, r1, r12 @ b = r1 + delta
580 add r11, r11, r12, lsl #1 @ r = r11 + delta*2
581 add r7, r7, r12, lsr #1 @ g = r7 + delta/2
584 usat r11, #5, r11, asr #11 @ clamp r
585 usat r7, #6, r7, asr #9 @ clamp g
586 usat r1, #5, r1, asr #10 @ clamp b
588 ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride)
590 orr r1, r1, r11, lsl #11 @ r1 = b | (r << 11)
591 orr r1, r1, r7, lsl #5 @ r1 |= (g << 5)
593 orr r12, r1, r11, asr #1 @ check if clamping is needed...
594 orr r12, r12, r7 @ ...at all
595 movs r12, r12, asr #15 @
597 movs r12, r1, asr #15 @ clamp b
598 mvnne r1, r12, lsr #15 @
599 andne r1, r1, #0x7c00 @ mask b only if clamped
600 movs r12, r11, asr #16 @ clamp r
601 mvnne r11, r12, lsr #16 @
602 movs r12, r7, asr #15 @ clamp g
603 mvnne r7, r12, lsr #15 @
606 ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride)
608 and r11, r11, #0xf800 @ pack pixel
609 and r7, r7, #0x7e00 @ r1 = pixel = (r & 0xf800) |
610 orr r11, r11, r7, lsr #4 @ ((g & 0x7e00) >> 4) |
611 orr r1, r11, r1, lsr #10 @ (b >> 10)
614 #if LCD_WIDTH >= LCD_HEIGHT
616 #elif LCD_WIDTH < 256
617 strh r1, [r0, #LCD_WIDTH]! @ store pixel
622 sub r7, r12, #16 @ r7 = Y = (Y' - 16)*149
623 add r12, r7, r7, asl #2 @
624 add r12, r12, r12, asl #4 @
625 add r7, r12, r7, asl #6 @
626 @ compute R, G, and B
627 add r1, r8, r7 @ r1 = b' = Y + bu
628 add r11, r9, r7, asl #1 @ r11 = r' = Y*2 + rv
629 rsb r7, r10, r7 @ r7 = g' = Y + guv
631 sub r12, r1, r1, lsr #5 @ r1 = 31/32*b + b/256
632 add r1, r12, r1, lsr #8 @
634 sub r12, r11, r11, lsr #5 @ r11 = 31/32*r + r/256
635 add r11, r12, r11, lsr #8 @
637 sub r12, r7, r7, lsr #6 @ r7 = 63/64*g + g/256
638 add r7, r12, r7, lsr #8 @
640 @ This element is zero - use r14 @
642 add r1, r1, r14 @ b = r1 + delta
643 add r11, r11, r14, lsl #1 @ r = r11 + delta*2
644 add r7, r7, r14, lsr #1 @ g = r7 + delta/2
647 usat r11, #5, r11, asr #11 @ clamp r
648 usat r7, #6, r7, asr #9 @ clamp g
649 usat r1, #5, r1, asr #10 @ clamp b
651 orr r1, r1, r11, lsl #11 @ r1 = b | (r << 11)
652 orr r1, r1, r7, lsl #5 @ r1 |= (g << 5)
654 orr r12, r1, r11, asr #1 @ check if clamping is needed...
655 orr r12, r12, r7 @ ...at all
656 movs r12, r12, asr #15 @
658 movs r12, r1, asr #15 @ clamp b
659 mvnne r1, r12, lsr #15 @
660 andne r1, r1, #0x7c00 @ mask b only if clamped
661 movs r12, r11, asr #16 @ clamp r
662 mvnne r11, r12, lsr #16 @
663 movs r12, r7, asr #15 @ clamp g
664 mvnne r7, r12, lsr #15 @
667 and r11, r11, #0xf800 @ pack pixel
668 and r7, r7, #0x7e00 @ r1 = pixel = (r & 0xf800) |
669 orr r11, r11, r7, lsr #4 @ ((g & 0x7e00) >> 4) |
670 orr r1, r11, r1, lsr #10 @ (b >> 10)
673 #if LCD_WIDTH >= LCD_HEIGHT
674 add r0, r0, #2*LCD_WIDTH
675 strh r1, [r0, #2] @ store pixel
677 sub r0, r0, #(2*LCD_WIDTH)-4
679 sub r0, r0, #(2*LCD_WIDTH)
683 strh r1, [r0, #-2] @ store pixel
685 add r0, r0, #2*LCD_WIDTH @
687 add r0, r0, #LCD_WIDTH @
691 subs r2, r2, #2 @ subtract block from width
692 bgt 10b @ loop line @
694 ldmfd sp!, { r4-r11, pc } @ restore registers and return
695 .ltorg @ dump constant pool
696 .size lcd_write_yuv420_lines_odither, .-lcd_write_yuv420_lines_odither