FS#11335 by me: make ARM assembly functions thumb-friendly
[kugel-rb.git] / firmware / target / arm / as3525 / lcd-as-e200v2-fuze-fuzev2.S
blob690641c850bd5dbeb569bc953369b359610cf250
1 /***************************************************************************
2  *             __________               __   ___.
3  *   Open      \______   \ ____   ____ |  | _\_ |__   _______  ___
4  *   Source     |       _//  _ \_/ ___\|  |/ /| __ \ /  _ \  \/  /
5  *   Jukebox    |    |   (  <_> )  \___|    < | \_\ (  <_> > <  <
6  *   Firmware   |____|_  /\____/ \___  >__|_ \|___  /\____/__/\_ \
7  *                     \/            \/     \/    \/            \/
8  * $Id$
9  *
10  * Copyright (C) 2007 by Jens Arnold
11  * Heavily based on lcd-as-memframe.c by Michael Sevakis
12  * Adapted for Sansa Fuze/e200v2 by Rafaël Carré
13  *
14  * This program is free software; you can redistribute it and/or
15  * modify it under the terms of the GNU General Public License
16  * as published by the Free Software Foundation; either version 2
17  * of the License, or (at your option) any later version.
18  *
19  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20  * KIND, either express or implied.
21  *
22  ****************************************************************************/
24 #include "config.h"
25 #include "cpu.h"
27 #define DBOP_BUSY (1<<10)
29 /****************************************************************************
30  * void lcd_write_yuv_420_lines(unsigned char const * const src[3],
31  *                              int width,
32  *                              int stride);
33  *
34  *   |R|   |1.000000 -0.000001  1.402000| |Y'|
35  *   |G| = |1.000000 -0.334136 -0.714136| |Pb|
36  *   |B|   |1.000000  1.772000  0.000000| |Pr|
37  *   Scaled, normalized, rounded and tweaked to yield RGB 565:
38  *   |R|   |74   0 101| |Y' -  16| >> 9
39  *   |G| = |74 -24 -51| |Cb - 128| >> 8
40  *   |B|   |74 128   0| |Cr - 128| >> 9
41  *
42  * Write four RGB565 pixels in the following order on each loop:
43  * 1 3 + > down
44  * 2 4 \/ left
45  */
46     .section    .icode, "ax", %progbits
47     .align      2
48     .global     lcd_write_yuv420_lines
49     .type       lcd_write_yuv420_lines, %function
50 lcd_write_yuv420_lines:
51                                         @ r0 = yuv_src
52                                         @ r1 = width
53                                         @ r2 = stride
54     stmfd       sp!, { r4-r11, lr }     @ save non-scratch
56     mov         r3, #0xC8000000         @
57     orr         r3, r3, #0x120000       @ r3 = DBOP_BASE
59     ldmia       r0, { r4, r5, r6 }      @ r4 = yuv_src[0] = Y'_p
60                                         @ r5 = yuv_src[1] = Cb_p
61                                         @ r6 = yuv_src[2] = Cr_p
62                                         @ r0 = scratch
63     ldr         r12, [r3, #8]           @
64     sub         r2, r2, #1              @ stride -= 1
65     orr         r12, r12, #3<<13        @ DBOP_CTRL |= (1<<13|1<<14) (32bit mode)
66 #ifdef SANSA_FUZEV2
67     bic         r12, r12, #1<<13        @ DBOP_CTRL &= ~(1<<13),still 32bit mode
68 #endif
69     str         r12, [r3, #8]           @ 
70 10: @ loop line                         @
71     ldrb        r7, [r4], #1            @ r7 = *Y'_p++;
72     ldrb        r8, [r5], #1            @ r8 = *Cb_p++;
73     ldrb        r9, [r6], #1            @ r9 = *Cr_p++;
74                                         @
75     sub         r7, r7, #16             @ r7 = Y = (Y' - 16)*74
76     add         r12, r7, r7, asl #2     @ actually (Y' - 16)*37 and shift right
77     add         r7, r12, r7, asl #5     @ by one less when adding - same for all
78                                         @
79     sub         r8, r8, #128            @ Cb -= 128
80     sub         r9, r9, #128            @ Cr -= 128
81                                         @
82     add         r10, r9, r9, asl #1     @ r10 = Cr*51 + Cb*24
83     add         r10, r10, r10, asl #4   @
84     add         r10, r10, r8, asl #3    @
85     add         r10, r10, r8, asl #4    @
86                                         @
87     add         lr, r9, r9, asl #2      @ r9 = Cr*101
88     add         lr, lr, r9, asl #5      @
89     add         r9, lr, r9, asl #6      @
90                                         @
91     add         r8, r8, #2              @ r8 = bu = (Cb*128 + 128) >> 8
92     mov         r8, r8, asr #2          @
93     add         r9, r9, #256            @ r9 = rv = (r9 + 256) >> 9
94     mov         r9, r9, asr #9          @
95     rsb         r10, r10, #128          @ r10 = guv = (-r10 + 128) >> 8
96     mov         r10, r10, asr #8        @
97                                         @ compute R, G, and B
98     add         r0, r8, r7, asr #8      @ r0  = b = (Y >> 9) + bu
99     add         lr, r9, r7, asr #8      @ lr = r = (Y >> 9) + rv
100     add         r7, r10, r7, asr #7     @ r7  = g = (Y >> 8) + guv
101                                         @
102     orr         r12, r0, lr             @ check if clamping is needed...
103     orr         r12, r12, r7, asr #1    @ ...at all
104     cmp         r12, #31                @
105     bls         15f @ no clamp          @
106     cmp         r0, #31                 @ clamp b
107     mvnhi       r0, r0, asr #31         @
108     andhi       r0, r0, #31             @
109     cmp         lr, #31                 @ clamp r
110     mvnhi       lr, lr, asr #31         @
111     andhi       lr, lr, #31             @
112     cmp         r7, #63                 @ clamp g
113     mvnhi       r7, r7, asr #31         @
114     andhi       r7, r7, #63             @
115 15: @ no clamp                          @
116                                         @
117     ldrb        r12, [r4, r2]           @ r12 = Y' = *(Y'_p + stride)
118                                         @
119     orr         r0, r0, lr, lsl #11     @ r0 = (r << 11) | b
120     orr         r11, r0, r7, lsl #5     @ r11 = (r << 11) | (g << 5) | b
121     orr         r11, r0, r7, lsl #5     @ r11 = (r << 11) | (g << 5) | b
122 #ifdef SANSA_FUZEV2
123     mov         r0, r11, lsr #8         @
124     bic         r11, r11, #0xff00       @
125     orr         r11, r0, r11, lsl #8    @ swap bytes
126 #endif
127     sub         r7, r12, #16            @ r7 = Y = (Y' - 16)*74
128     add         r12, r7, r7, asl #2     @
129     add         r7, r12, r7, asl #5     @
130                                         @ compute R, G, and B
131     add         r0, r8, r7, asr #8      @ r0  = b = (Y >> 9) + bu
132     add         lr, r9, r7, asr #8      @ lr = r = (Y >> 9) + rv
133     add         r7, r10, r7, asr #7     @ r7  = g = (Y >> 8) + guv
134                                         @
135     orr         r12, r0, lr             @ check if clamping is needed...
136     orr         r12, r12, r7, asr #1    @ ...at all
137     cmp         r12, #31                @
138     bls         15f @ no clamp          @
139     cmp         r0, #31                 @ clamp b
140     mvnhi       r0, r0, asr #31         @
141     andhi       r0, r0, #31             @
142     cmp         lr, #31                 @ clamp r
143     mvnhi       lr, lr, asr #31         @
144     andhi       lr, lr, #31             @
145     cmp         r7, #63                 @ clamp g
146     mvnhi       r7, r7, asr #31         @
147     andhi       r7, r7, #63             @
148 15: @ no clamp                          @
149                                         @
150     ldrb        r12, [r4], #1           @ r12 = Y' = *(Y'_p++)
151                                         @
152     orr         r0, r0, lr, lsl #11     @ r0 = (r << 11) | b
153     orr         r0, r0, r7, lsl #5      @ r0 = (r << 11) | (g << 5) | b
155 #ifdef SANSA_FUZEV2
156     mov         r7, r0, lsr #8          @
157     bic         r7, r7, #0xff00         @
158     orr         r0, r7, r0, lsl #8      @ swap bytes
159 #endif
161     orr         r0, r11, r0, lsl#16     @ pack with 2nd pixel
162     str         r0, [r3, #0x10]         @ write pixel
163                                         @
164     sub         r7, r12, #16            @ r7 = Y = (Y' - 16)*74
165     add         r12, r7, r7, asl #2     @
166     add         r7, r12, r7, asl #5     @
167                                         @ compute R, G, and B
168     add         r0, r8, r7, asr #8      @ r0  = b = (Y >> 9) + bu
169     add         lr, r9, r7, asr #8      @ lr = r = (Y >> 9) + rv
170     add         r7, r10, r7, asr #7     @ r7  = g = (Y >> 8) + guv
171                                         @
172     orr         r12, r0, lr             @ check if clamping is needed...
173     orr         r12, r12, r7, asr #1    @ ...at all
174     cmp         r12, #31                @
175     bls         15f @ no clamp          @
176     cmp         r0, #31                 @ clamp b
177     mvnhi       r0, r0, asr #31         @
178     andhi       r0, r0, #31             @
179     cmp         lr, #31                 @ clamp r
180     mvnhi       lr, lr, asr #31         @
181     andhi       lr, lr, #31             @
182     cmp         r7, #63                 @ clamp g
183     mvnhi       r7, r7, asr #31         @
184     andhi       r7, r7, #63             @
185 15: @ no clamp                          @
186                                         @
187     ldrb        r12, [r4, r2]           @ r12 = Y' = *(Y'_p + stride)
188                                         @
189                                         @
190     orr         r0, r0, lr, lsl #11     @ r0 = (r << 11) | b
191     orr         r11, r0, r7, lsl #5     @ r0 = (r << 11) | (g << 5) | b
193 #ifdef SANSA_FUZEV2
194     mov         r0, r11, lsr #8         @
195     bic         r11, r11, #0xff00       @
196     orr         r11, r0, r11, lsl #8    @ swap byte
197 #endif
199     sub         r7, r12, #16            @ r7 = Y = (Y' - 16)*74
200     add         r12, r7, r7, asl #2     @
201     add         r7, r12, r7, asl #5     @
202                                         @ compute R, G, and B
203     add         r0, r8, r7, asr #8      @ r0  = b = (Y >> 9) + bu
204     add         lr, r9, r7, asr #8      @ lr = r = (Y >> 9) + rv
205     add         r7, r10, r7, asr #7     @ r7  = g = (Y >> 8) + guv
206                                         @
207     orr         r12, r0, lr             @ check if clamping is needed...
208     orr         r12, r12, r7, asr #1    @ ...at all
209     cmp         r12, #31                @
210     bls         15f @ no clamp          @
211     cmp         r0, #31                 @ clamp b
212     mvnhi       r0, r0, asr #31         @
213     andhi       r0, r0, #31             @
214     cmp         lr, #31                 @ clamp r
215     mvnhi       lr, lr, asr #31         @
216     andhi       lr, lr, #31             @
217     cmp         r7, #63                 @ clamp g
218     mvnhi       r7, r7, asr #31         @
219     andhi       r7, r7, #63             @
220 15: @ no clamp                          @
221                                         @
222     orr         r0, r0, lr, lsl #11     @ r0 = (r << 11) | b
223     orr         r0, r0, r7, lsl #5      @ r0 = (r << 11) | (g << 5) | b
224     
225 #ifdef SANSA_FUZEV2
226     mov         r7, r0, lsr #8          @
227     bic         r7, r7, #0xff00         @
228     orr         r0, r7, r0, lsl #8      @ swap bytes
229 #endif
231     orr         r0, r11, r0, lsl#16     @ pack with 2nd pixel
232     str         r0, [r3, #0x10]         @ write pixel
233                                         @
234     subs        r1, r1, #2              @ subtract block from width
235     bgt         10b @ loop line         @
236                                         @
237 1: @ busy
238     @ writing at max 110*32 (LCD_WIDTH/2), the fifo is bigger
239     @ so polling fifo empty only after each line is save
240     ldr         r7, [r3,#0xc]           @ r7 = DBOP_STATUS
241     tst         r7, #DBOP_BUSY          @ fifo not empty?
242     beq         1b                      @
244     ldmpc       regs=r4-r11             @ restore registers and return
245     .ltorg                              @ dump constant pool
246     .size   lcd_write_yuv420_lines, .-lcd_write_yuv420_lines
248 /****************************************************************************
249  * void lcd_write_yuv_420_lines_odither(unsigned char const * const src[3],
250  *                                      int width,
251  *                                      int stride,
252  *                                      int x_screen,
253  *                                      int y_screen);
255  *   |R|   |1.000000 -0.000001  1.402000| |Y'|
256  *   |G| = |1.000000 -0.334136 -0.714136| |Pb|
257  *   |B|   |1.000000  1.772000  0.000000| |Pr|
258  *   Red scaled at twice g & b but at same precision to place it in correct
259  *   bit position after multiply and leave instruction count lower.
260  *   |R|   |258   0  408| |Y' -  16|
261  *   |G| = |149 -49 -104| |Cb - 128|
262  *   |B|   |149 258    0| |Cr - 128|
264  * Write four RGB565 pixels in the following order on each loop:
265  * 1 3 + > down
266  * 2 4 \/ left
268  * Kernel pattern (raw|rotated|use order):
269  * 5 3 4 2   2 6 3 7     row0   row2          > down
270  * 1 7 0 6 | 4 0 5 1 | 2 4 6 0 3 5 7 1 col0     left
271  * 4 2 5 3 | 3 7 2 6 | 3 5 7 1 2 4 6 0 col2  \/
272  * 0 6 1 7   5 1 4 0
273  */
274     .section    .icode, "ax", %progbits
275     .align      2
276     .global     lcd_write_yuv420_lines_odither
277     .type       lcd_write_yuv420_lines_odither, %function
278 lcd_write_yuv420_lines_odither:
279                                         @ r0 = yuv_src
280                                         @ r1 = width
281                                         @ r2 = stride
282                                         @ r3 = x_screen
283                                         @ [sp] = y_screen
284     stmfd       sp!, { r4-r11, lr }     @ save non-scratch
285     ldmia       r0, { r4, r5, r6 }      @ r4 = yuv_src[0] = Y'_p
286                                         @ r5 = yuv_src[1] = Cb_p
287                                         @ r6 = yuv_src[2] = Cr_p
288                                         @
289     ldr         r14, [sp, #36]          @ Line up pattern and kernel quadrant
290     sub         r2, r2, #1              @ stride =- 1
291     eor         r14, r14, r3            @
292     and         r14, r14, #0x2          @
293     mov         r14, r14, lsl #6        @ 0x00 or 0x80
295     mov         r3, #0xC8000000         @
296     orr         r3, r3, #0x120000       @ r3 = DBOP_BASE, need to be redone
297                                         @ due to lack of registers
298     ldr         r12, [r3, #8]           @
299     orr         r12, r12, #3<<13        @ DBOP_CTRL |= (1<<13|1<<14)
300 #ifdef SANSA_FUZEV2
301     bic         r12, r12, #1<<13        @ DBOP_CTRL &= ~(1<<13), still 32bit mode
302 #endif
303     str         r12, [r3, #8]           @ (32bit mode)
304 10: @ loop line                         @
305                                         @
306     ldrb        r7, [r4], #1            @ r7 = *Y'_p++;
307     ldrb        r8, [r5], #1            @ r8 = *Cb_p++;
308     ldrb        r9, [r6], #1            @ r9 = *Cr_p++;
309                                         @
310     eor         r14, r14, #0x80         @ flip pattern quadrant
311                                         @
312     sub         r7, r7, #16             @ r7 = Y = (Y' - 16)*149
313     add         r12, r7, r7, asl #2     @
314     add         r12, r12, r12, asl #4   @
315     add         r7, r12, r7, asl #6     @
316                                         @
317     sub         r8, r8, #128            @ Cb -= 128
318     sub         r9, r9, #128            @ Cr -= 128
319                                         @
320     add         r10, r8, r8, asl #4     @ r10 = guv = Cr*104 + Cb*49
321     add         r10, r10, r8, asl #5    @
322     add         r10, r10, r9, asl #3    @
323     add         r10, r10, r9, asl #5    @
324     add         r10, r10, r9, asl #6    @
325                                         @
326     mov         r8, r8, asl #1          @ r8 = bu = Cb*258
327     add         r8, r8, r8, asl #7      @
328                                         @
329     add         r9, r9, r9, asl #1      @ r9 = rv = Cr*408
330     add         r9, r9, r9, asl #4      @
331     mov         r9, r9, asl #3          @
332                                         @
333                                         @ compute R, G, and B
334     add         r0, r8, r7              @ r0  = b' = Y + bu
335     add         r11, r9, r7, asl #1     @ r11 = r' = Y*2 + rv
336     rsb         r7, r10, r7             @ r7  = g' = Y + guv
337                                         @
338                                         @ r8 = bu, r9 = rv, r10 = guv
339                                         @
340     sub         r12, r0, r0, lsr #5     @ r0 = 31/32*b + b/256
341     add         r0, r12, r0, lsr #8     @
342                                         @
343     sub         r12, r11, r11, lsr #5   @ r11 = 31/32*r + r/256
344     add         r11, r12, r11, lsr #8   @
345                                         @
346     sub         r12, r7, r7, lsr #6     @ r7 = 63/64*g + g/256
347     add         r7, r12, r7, lsr #8     @
348                                         @
349     add         r12, r14, #0x100        @
350                                         @
351     add         r0, r0, r12             @ b = r0 + delta
352     add         r11, r11, r12, lsl #1   @ r = r11 + delta*2
353     add         r7, r7, r12, lsr #1     @ g = r7 + delta/2
354                                         @
355     orr         r12, r0, r11, asr #1    @ check if clamping is needed...
356     orr         r12, r12, r7            @ ...at all
357     movs        r12, r12, asr #15       @
358     beq         15f @ no clamp          @
359     movs        r12, r0, asr #15        @ clamp b
360     mvnne       r0, r12, lsr #15        @
361     andne       r0, r0, #0x7c00         @ mask b only if clamped
362     movs        r12, r11, asr #16       @ clamp r
363     mvnne       r11, r12, lsr #16       @
364     movs        r12, r7, asr #15        @ clamp g
365     mvnne       r7, r12, lsr #15        @
366 15: @ no clamp                          @
367                                         @
368     ldrb        r12, [r4, r2]           @ r12 = Y' = *(Y'_p + stride)
369                                         @
370     and         r11, r11, #0xf800       @ pack pixel
371     and         r7, r7, #0x7e00         @ r0 = pixel = (r & 0xf800) |
372     orr         r11, r11, r7, lsr #4    @              ((g & 0x7e00) >> 4) |
373     orr         r3, r11, r0, lsr #10    @              (b >> 10)
374 #ifdef SANSA_FUZEV2
375     mov         r7, r3, lsr #8          @
376     bic         r3, r3, #0xff00         @
377     orr         r3, r7, r3, lsl #8      @ swap pixel
378 #endif
379                                         @ save pixel
380     sub         r7, r12, #16            @ r7 = Y = (Y' - 16)*149
381     add         r12, r7, r7, asl #2     @
382     add         r12, r12, r12, asl #4   @
383     add         r7, r12, r7, asl #6     @
384                                         @ compute R, G, and B
385     add         r0, r8, r7              @ r0  = b' = Y + bu
386     add         r11, r9, r7, asl #1     @ r11 = r' = Y*2 + rv
387     rsb         r7, r10, r7             @ r7  = g' = Y + guv
388                                         @
389     sub         r12, r0, r0, lsr #5     @ r0  = 31/32*b' + b'/256
390     add         r0, r12, r0, lsr #8     @
391                                         @
392     sub         r12, r11, r11, lsr #5   @ r11 = 31/32*r' + r'/256
393     add         r11, r12, r11, lsr #8   @
394                                         @
395     sub         r12, r7, r7, lsr #6     @ r7  = 63/64*g' + g'/256
396     add         r7, r12, r7, lsr #8     @
397                                         @
398     add         r12, r14, #0x200        @
399                                         @
400     add         r0, r0, r12             @ b = r0 + delta
401     add         r11, r11, r12, lsl #1   @ r = r11 + delta*2
402     add         r7, r7, r12, lsr #1     @ g = r7 + delta/2
403                                         @
404     orr         r12, r0, r11, asr #1    @ check if clamping is needed...
405     orr         r12, r12, r7            @ ...at all
406     movs        r12, r12, asr #15       @
407     beq         15f @ no clamp          @
408     movs        r12, r0, asr #15        @ clamp b
409     mvnne       r0, r12, lsr #15        @
410     andne       r0, r0, #0x7c00         @ mask b only if clamped
411     movs        r12, r11, asr #16       @ clamp r
412     mvnne       r11, r12, lsr #16       @
413     movs        r12, r7, asr #15        @ clamp g
414     mvnne       r7, r12, lsr #15        @
415 15: @ no clamp                          @
416                                         @
417     ldrb        r12, [r4], #1           @ r12 = Y' = *(Y'_p++)
419     and         r11, r11, #0xf800       @ pack pixel
420     and         r7, r7, #0x7e00         @ r0 = pixel = (r & 0xf800) |
421     orr         r11, r11, r7, lsr #4    @              ((g & 0x7e00) >> 4) |
422     orr         r0, r11, r0, lsr #10    @              (b >> 10)
423 #ifdef SANSA_FUZEV2
424     mov         r7, r0, lsr #8          @
425     bic         r0, r0, #0xff00         @
426     orr         r0, r7, r0, lsl #8      @ swap pixel
427 #endif
428     orr         r3, r3, r0, lsl#16      @ pack with 2nd pixel
429     mov         r0, #0xC8000000         @
430     orr         r0, r0, #0x120000       @ r3 = DBOP_BASE
432     str         r3, [r0, #0x10]         @ write pixel
433                                         @
434     sub         r7, r12, #16            @ r7 = Y = (Y' - 16)*149
435     add         r12, r7, r7, asl #2     @
436     add         r12, r12, r12, asl #4   @
437     add         r7, r12, r7, asl #6     @
438                                         @ compute R, G, and B
439     add         r0, r8, r7              @ r0  = b' = Y + bu
440     add         r11, r9, r7, asl #1     @ r11 = r' = Y*2 + rv
441     rsb         r7, r10, r7             @ r7  = g' = Y + guv
442                                         @
443                                         @ r8 = bu, r9 = rv, r10 = guv
444                                         @
445     sub         r12, r0, r0, lsr #5     @ r0  = 31/32*b' + b'/256
446     add         r0, r12, r0, lsr #8     @
447                                         @
448     sub         r12, r11, r11, lsr #5   @ r11 = 31/32*r' + r'/256
449     add         r11, r12, r11, lsr #8   @
450                                         @
451     sub         r12, r7, r7, lsr #6     @ r7  = 63/64*g' + g'/256
452     add         r7, r12, r7, lsr #8     @
453                                         @
454     add         r12, r14, #0x300        @
455                                         @
456     add         r0, r0, r12             @ b = r0 + delta
457     add         r11, r11, r12, lsl #1   @ r = r11 + delta*2
458     add         r7, r7, r12, lsr #1     @ g = r7 + delta/2
459                                         @
460     orr         r12, r0, r11, asr #1    @ check if clamping is needed...
461     orr         r12, r12, r7            @ ...at all
462     movs        r12, r12, asr #15       @
463     beq         15f @ no clamp          @
464     movs        r12, r0, asr #15        @ clamp b
465     mvnne       r0, r12, lsr #15        @
466     andne       r0, r0, #0x7c00         @ mask b only if clamped
467     movs        r12, r11, asr #16       @ clamp r
468     mvnne       r11, r12, lsr #16       @
469     movs        r12, r7, asr #15        @ clamp g
470     mvnne       r7, r12, lsr #15        @
471 15: @ no clamp                          @
472                                         @
473     ldrb        r12, [r4, r2]           @ r12 = Y' = *(Y'_p + stride)
474                                         @
475     and         r11, r11, #0xf800       @ pack pixel
476     and         r7, r7, #0x7e00         @ r0 = pixel = (r & 0xf800) |
477     orr         r11, r11, r7, lsr #4    @              ((g & 0x7e00) >> 4) |
478     orr         r3, r11, r0, lsr #10    @              (b >> 10)
479 #ifdef SANSA_FUZEV2
480     mov         r7, r3, lsr #8          @
481     bic         r3, r3, #0xff00         @
482     orr         r3, r7, r3, lsl #8      @ swap pixel
483 #endif
484                                         @ save pixel
485                                         @
486     sub         r7, r12, #16            @ r7 = Y = (Y' - 16)*149
487     add         r12, r7, r7, asl #2     @
488     add         r12, r12, r12, asl #4   @
489     add         r7, r12, r7, asl #6     @
490                                         @ compute R, G, and B
491     add         r0, r8, r7              @ r0  = b' = Y + bu
492     add         r11, r9, r7, asl #1     @ r11 = r' = Y*2 + rv
493     rsb         r7, r10, r7             @ r7  = g' = Y + guv
494                                         @
495     sub         r12, r0, r0, lsr #5     @ r0 = 31/32*b + b/256
496     add         r0, r12, r0, lsr #8     @
497                                         @
498     sub         r12, r11, r11, lsr #5   @ r11 = 31/32*r + r/256
499     add         r11, r12, r11, lsr #8   @
500                                         @
501     sub         r12, r7, r7, lsr #6     @ r7 = 63/64*g + g/256
502     add         r7, r12, r7, lsr #8     @
503                                         @
504     @ This element is zero - use r14    @
505                                         @
506     add         r0, r0, r14             @ b = r0 + delta
507     add         r11, r11, r14, lsl #1   @ r = r11 + delta*2
508     add         r7, r7, r14, lsr #1     @ g = r7 + delta/2
509                                         @
510     orr         r12, r0, r11, asr #1    @ check if clamping is needed...
511     orr         r12, r12, r7            @ ...at all
512     movs        r12, r12, asr #15       @
513     beq         15f @ no clamp          @
514     movs        r12, r0, asr #15        @ clamp b
515     mvnne       r0, r12, lsr #15        @
516     andne       r0, r0, #0x7c00         @ mask b only if clamped
517     movs        r12, r11, asr #16       @ clamp r
518     mvnne       r11, r12, lsr #16       @
519     movs        r12, r7, asr #15        @ clamp g
520     mvnne       r7, r12, lsr #15        @
521 15: @ no clamp                          @
522                                         @
523     and         r11, r11, #0xf800       @ pack pixel
524     and         r7, r7, #0x7e00         @ r0 = pixel = (r & 0xf800) |
525     orr         r11, r11, r7, lsr #4    @              ((g & 0x7e00) >> 4) |
526     orr         r0, r11, r0, lsr #10    @              (b >> 10)
527 #ifdef SANSA_FUZEV2
528     mov         r7, r0, lsr #8          @
529     bic         r0, r0, #0xff00         @
530     orr         r0, r7, r0, lsl #8      @ swap pixel
531 #endif
532     orr         r3, r3, r0, lsl#16      @ pack with 2nd pixel
533     mov         r0, #0xC8000000         @
534     orr         r0, r0, #0x120000       @ r3 = DBOP_BASE
536     str         r3, [r0, #0x10]         @ write pixel
537                                         @
538     subs        r1, r1, #2              @ subtract block from width
539     bgt         10b @ loop line         @
540                                         @
541 1: @ busy                               @
542     @ writing at max 110*32 (LCD_WIDTH/2), the fifo is bigger (128*32)
543     @ so polling fifo empty only after each line is save
544     ldr         r7, [r0,#0xc]           @ r7 = DBOP_STATUS
545     tst         r7, #DBOP_BUSY          @ fifo not empty?
546     beq         1b                      @
548     ldmpc       regs=r4-r11             @ restore registers and return
549     .ltorg                              @ dump constant pool
550     .size   lcd_write_yuv420_lines_odither, .-lcd_write_yuv420_lines_odither