1 /* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */
3 * Copyright © 2000 SuSE, Inc.
4 * Copyright © 2007 Red Hat, Inc.
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that
9 * copyright notice and this permission notice appear in supporting
10 * documentation, and that the name of SuSE not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. SuSE makes no representations about the
13 * suitability of this software for any purpose. It is provided "as is"
14 * without express or implied warranty.
16 * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
18 * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
20 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
21 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 * Author: Keith Packard, SuSE, Inc.
31 #include "pixman-private.h"
32 #include "pixman-combine32.h"
33 #include "pixman-inlines.h"
35 static force_inline
uint32_t
38 if (((uintptr_t)a
) & 1)
40 #ifdef WORDS_BIGENDIAN
41 return (*a
<< 16) | (*(uint16_t *)(a
+ 1));
43 return *a
| (*(uint16_t *)(a
+ 1) << 8);
48 #ifdef WORDS_BIGENDIAN
49 return (*(uint16_t *)a
<< 8) | *(a
+ 2);
51 return *(uint16_t *)a
| (*(a
+ 2) << 16);
56 static force_inline
void
60 if (((uintptr_t)a
) & 1)
62 #ifdef WORDS_BIGENDIAN
63 *a
= (uint8_t) (v
>> 16);
64 *(uint16_t *)(a
+ 1) = (uint16_t) (v
);
67 *(uint16_t *)(a
+ 1) = (uint16_t) (v
>> 8);
72 #ifdef WORDS_BIGENDIAN
73 *(uint16_t *)a
= (uint16_t)(v
>> 8);
74 *(a
+ 2) = (uint8_t)v
;
76 *(uint16_t *)a
= (uint16_t)v
;
77 *(a
+ 2) = (uint8_t)(v
>> 16);
82 static force_inline
uint32_t
86 uint32_t a
= ~src
>> 24;
88 UN8x4_MUL_UN8_ADD_UN8x4 (dest
, a
, src
);
93 static force_inline
uint32_t
110 fast_composite_over_x888_8_8888 (pixman_implementation_t
*imp
,
111 pixman_composite_info_t
*info
)
113 PIXMAN_COMPOSITE_ARGS (info
);
114 uint32_t *src
, *src_line
;
115 uint32_t *dst
, *dst_line
;
116 uint8_t *mask
, *mask_line
;
117 int src_stride
, mask_stride
, dst_stride
;
122 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint32_t, dst_stride
, dst_line
, 1);
123 PIXMAN_IMAGE_GET_LINE (mask_image
, mask_x
, mask_y
, uint8_t, mask_stride
, mask_line
, 1);
124 PIXMAN_IMAGE_GET_LINE (src_image
, src_x
, src_y
, uint32_t, src_stride
, src_line
, 1);
129 src_line
+= src_stride
;
131 dst_line
+= dst_stride
;
133 mask_line
+= mask_stride
;
141 s
= *src
| 0xff000000;
150 *dst
= over (d
, *dst
);
160 fast_composite_in_n_8_8 (pixman_implementation_t
*imp
,
161 pixman_composite_info_t
*info
)
163 PIXMAN_COMPOSITE_ARGS (info
);
165 uint8_t *dst_line
, *dst
;
166 uint8_t *mask_line
, *mask
, m
;
167 int dst_stride
, mask_stride
;
171 src
= _pixman_image_get_solid (imp
, src_image
, dest_image
->bits
.format
);
175 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint8_t, dst_stride
, dst_line
, 1);
176 PIXMAN_IMAGE_GET_LINE (mask_image
, mask_x
, mask_y
, uint8_t, mask_stride
, mask_line
, 1);
183 dst_line
+= dst_stride
;
185 mask_line
+= mask_stride
;
195 *dst
= MUL_UN8 (m
, *dst
, t
);
206 dst_line
+= dst_stride
;
208 mask_line
+= mask_stride
;
214 m
= MUL_UN8 (m
, srca
, t
);
219 *dst
= MUL_UN8 (m
, *dst
, t
);
228 fast_composite_in_8_8 (pixman_implementation_t
*imp
,
229 pixman_composite_info_t
*info
)
231 PIXMAN_COMPOSITE_ARGS (info
);
232 uint8_t *dst_line
, *dst
;
233 uint8_t *src_line
, *src
;
234 int dst_stride
, src_stride
;
239 PIXMAN_IMAGE_GET_LINE (src_image
, src_x
, src_y
, uint8_t, src_stride
, src_line
, 1);
240 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint8_t, dst_stride
, dst_line
, 1);
245 dst_line
+= dst_stride
;
247 src_line
+= src_stride
;
257 *dst
= MUL_UN8 (s
, *dst
, t
);
265 fast_composite_over_n_8_8888 (pixman_implementation_t
*imp
,
266 pixman_composite_info_t
*info
)
268 PIXMAN_COMPOSITE_ARGS (info
);
270 uint32_t *dst_line
, *dst
, d
;
271 uint8_t *mask_line
, *mask
, m
;
272 int dst_stride
, mask_stride
;
275 src
= _pixman_image_get_solid (imp
, src_image
, dest_image
->bits
.format
);
281 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint32_t, dst_stride
, dst_line
, 1);
282 PIXMAN_IMAGE_GET_LINE (mask_image
, mask_x
, mask_y
, uint8_t, mask_stride
, mask_line
, 1);
287 dst_line
+= dst_stride
;
289 mask_line
+= mask_stride
;
300 *dst
= over (src
, *dst
);
305 *dst
= over (d
, *dst
);
313 fast_composite_add_n_8888_8888_ca (pixman_implementation_t
*imp
,
314 pixman_composite_info_t
*info
)
316 PIXMAN_COMPOSITE_ARGS (info
);
318 uint32_t *dst_line
, *dst
, d
;
319 uint32_t *mask_line
, *mask
, ma
;
320 int dst_stride
, mask_stride
;
323 src
= _pixman_image_get_solid (imp
, src_image
, dest_image
->bits
.format
);
328 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint32_t, dst_stride
, dst_line
, 1);
329 PIXMAN_IMAGE_GET_LINE (mask_image
, mask_x
, mask_y
, uint32_t, mask_stride
, mask_line
, 1);
334 dst_line
+= dst_stride
;
336 mask_line
+= mask_stride
;
348 UN8x4_MUL_UN8x4_ADD_UN8x4 (s
, ma
, d
);
359 fast_composite_over_n_8888_8888_ca (pixman_implementation_t
*imp
,
360 pixman_composite_info_t
*info
)
362 PIXMAN_COMPOSITE_ARGS (info
);
363 uint32_t src
, srca
, s
;
364 uint32_t *dst_line
, *dst
, d
;
365 uint32_t *mask_line
, *mask
, ma
;
366 int dst_stride
, mask_stride
;
369 src
= _pixman_image_get_solid (imp
, src_image
, dest_image
->bits
.format
);
375 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint32_t, dst_stride
, dst_line
, 1);
376 PIXMAN_IMAGE_GET_LINE (mask_image
, mask_x
, mask_y
, uint32_t, mask_stride
, mask_line
, 1);
381 dst_line
+= dst_stride
;
383 mask_line
+= mask_stride
;
389 if (ma
== 0xffffffff)
394 *dst
= over (src
, *dst
);
401 UN8x4_MUL_UN8x4 (s
, ma
);
402 UN8x4_MUL_UN8 (ma
, srca
);
404 UN8x4_MUL_UN8x4_ADD_UN8x4 (d
, ma
, s
);
415 fast_composite_over_n_8_0888 (pixman_implementation_t
*imp
,
416 pixman_composite_info_t
*info
)
418 PIXMAN_COMPOSITE_ARGS (info
);
420 uint8_t *dst_line
, *dst
;
422 uint8_t *mask_line
, *mask
, m
;
423 int dst_stride
, mask_stride
;
426 src
= _pixman_image_get_solid (imp
, src_image
, dest_image
->bits
.format
);
432 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint8_t, dst_stride
, dst_line
, 3);
433 PIXMAN_IMAGE_GET_LINE (mask_image
, mask_x
, mask_y
, uint8_t, mask_stride
, mask_line
, 1);
438 dst_line
+= dst_stride
;
440 mask_line
+= mask_stride
;
461 d
= over (in (src
, m
), fetch_24 (dst
));
470 fast_composite_over_n_8_0565 (pixman_implementation_t
*imp
,
471 pixman_composite_info_t
*info
)
473 PIXMAN_COMPOSITE_ARGS (info
);
475 uint16_t *dst_line
, *dst
;
477 uint8_t *mask_line
, *mask
, m
;
478 int dst_stride
, mask_stride
;
481 src
= _pixman_image_get_solid (imp
, src_image
, dest_image
->bits
.format
);
487 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint16_t, dst_stride
, dst_line
, 1);
488 PIXMAN_IMAGE_GET_LINE (mask_image
, mask_x
, mask_y
, uint8_t, mask_stride
, mask_line
, 1);
493 dst_line
+= dst_stride
;
495 mask_line
+= mask_stride
;
510 d
= over (src
, convert_0565_to_0888 (d
));
512 *dst
= convert_8888_to_0565 (d
);
517 d
= over (in (src
, m
), convert_0565_to_0888 (d
));
518 *dst
= convert_8888_to_0565 (d
);
526 fast_composite_over_n_8888_0565_ca (pixman_implementation_t
*imp
,
527 pixman_composite_info_t
*info
)
529 PIXMAN_COMPOSITE_ARGS (info
);
530 uint32_t src
, srca
, s
;
532 uint16_t *dst_line
, *dst
;
534 uint32_t *mask_line
, *mask
, ma
;
535 int dst_stride
, mask_stride
;
538 src
= _pixman_image_get_solid (imp
, src_image
, dest_image
->bits
.format
);
544 src16
= convert_8888_to_0565 (src
);
546 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint16_t, dst_stride
, dst_line
, 1);
547 PIXMAN_IMAGE_GET_LINE (mask_image
, mask_x
, mask_y
, uint32_t, mask_stride
, mask_line
, 1);
552 dst_line
+= dst_stride
;
554 mask_line
+= mask_stride
;
560 if (ma
== 0xffffffff)
569 d
= over (src
, convert_0565_to_0888 (d
));
570 *dst
= convert_8888_to_0565 (d
);
576 d
= convert_0565_to_0888 (d
);
580 UN8x4_MUL_UN8x4 (s
, ma
);
581 UN8x4_MUL_UN8 (ma
, srca
);
583 UN8x4_MUL_UN8x4_ADD_UN8x4 (d
, ma
, s
);
585 *dst
= convert_8888_to_0565 (d
);
593 fast_composite_over_8888_8888 (pixman_implementation_t
*imp
,
594 pixman_composite_info_t
*info
)
596 PIXMAN_COMPOSITE_ARGS (info
);
597 uint32_t *dst_line
, *dst
;
598 uint32_t *src_line
, *src
, s
;
599 int dst_stride
, src_stride
;
603 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint32_t, dst_stride
, dst_line
, 1);
604 PIXMAN_IMAGE_GET_LINE (src_image
, src_x
, src_y
, uint32_t, src_stride
, src_line
, 1);
609 dst_line
+= dst_stride
;
611 src_line
+= src_stride
;
621 *dst
= over (s
, *dst
);
628 fast_composite_src_x888_8888 (pixman_implementation_t
*imp
,
629 pixman_composite_info_t
*info
)
631 PIXMAN_COMPOSITE_ARGS (info
);
632 uint32_t *dst_line
, *dst
;
633 uint32_t *src_line
, *src
;
634 int dst_stride
, src_stride
;
637 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint32_t, dst_stride
, dst_line
, 1);
638 PIXMAN_IMAGE_GET_LINE (src_image
, src_x
, src_y
, uint32_t, src_stride
, src_line
, 1);
643 dst_line
+= dst_stride
;
645 src_line
+= src_stride
;
649 *dst
++ = (*src
++) | 0xff000000;
655 fast_composite_over_8888_0888 (pixman_implementation_t
*imp
,
656 pixman_composite_info_t
*info
)
658 PIXMAN_COMPOSITE_ARGS (info
);
659 uint8_t *dst_line
, *dst
;
661 uint32_t *src_line
, *src
, s
;
663 int dst_stride
, src_stride
;
666 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint8_t, dst_stride
, dst_line
, 3);
667 PIXMAN_IMAGE_GET_LINE (src_image
, src_x
, src_y
, uint32_t, src_stride
, src_line
, 1);
672 dst_line
+= dst_stride
;
674 src_line
+= src_stride
;
686 d
= over (s
, fetch_24 (dst
));
697 fast_composite_over_8888_0565 (pixman_implementation_t
*imp
,
698 pixman_composite_info_t
*info
)
700 PIXMAN_COMPOSITE_ARGS (info
);
701 uint16_t *dst_line
, *dst
;
703 uint32_t *src_line
, *src
, s
;
705 int dst_stride
, src_stride
;
708 PIXMAN_IMAGE_GET_LINE (src_image
, src_x
, src_y
, uint32_t, src_stride
, src_line
, 1);
709 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint16_t, dst_stride
, dst_line
, 1);
714 dst_line
+= dst_stride
;
716 src_line
+= src_stride
;
732 d
= over (s
, convert_0565_to_0888 (d
));
734 *dst
= convert_8888_to_0565 (d
);
742 fast_composite_add_8_8 (pixman_implementation_t
*imp
,
743 pixman_composite_info_t
*info
)
745 PIXMAN_COMPOSITE_ARGS (info
);
746 uint8_t *dst_line
, *dst
;
747 uint8_t *src_line
, *src
;
748 int dst_stride
, src_stride
;
753 PIXMAN_IMAGE_GET_LINE (src_image
, src_x
, src_y
, uint8_t, src_stride
, src_line
, 1);
754 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint8_t, dst_stride
, dst_line
, 1);
759 dst_line
+= dst_stride
;
761 src_line
+= src_stride
;
773 s
= t
| (0 - (t
>> 8));
783 fast_composite_add_0565_0565 (pixman_implementation_t
*imp
,
784 pixman_composite_info_t
*info
)
786 PIXMAN_COMPOSITE_ARGS (info
);
787 uint16_t *dst_line
, *dst
;
789 uint16_t *src_line
, *src
;
791 int dst_stride
, src_stride
;
794 PIXMAN_IMAGE_GET_LINE (src_image
, src_x
, src_y
, uint16_t, src_stride
, src_line
, 1);
795 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint16_t, dst_stride
, dst_line
, 1);
800 dst_line
+= dst_stride
;
802 src_line
+= src_stride
;
811 s
= convert_0565_to_8888 (s
);
814 d
= convert_0565_to_8888 (d
);
815 UN8x4_ADD_UN8x4 (s
, d
);
817 *dst
= convert_8888_to_0565 (s
);
825 fast_composite_add_8888_8888 (pixman_implementation_t
*imp
,
826 pixman_composite_info_t
*info
)
828 PIXMAN_COMPOSITE_ARGS (info
);
829 uint32_t *dst_line
, *dst
;
830 uint32_t *src_line
, *src
;
831 int dst_stride
, src_stride
;
835 PIXMAN_IMAGE_GET_LINE (src_image
, src_x
, src_y
, uint32_t, src_stride
, src_line
, 1);
836 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint32_t, dst_stride
, dst_line
, 1);
841 dst_line
+= dst_stride
;
843 src_line
+= src_stride
;
855 UN8x4_ADD_UN8x4 (s
, d
);
865 fast_composite_add_n_8_8 (pixman_implementation_t
*imp
,
866 pixman_composite_info_t
*info
)
868 PIXMAN_COMPOSITE_ARGS (info
);
869 uint8_t *dst_line
, *dst
;
870 uint8_t *mask_line
, *mask
;
871 int dst_stride
, mask_stride
;
876 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint8_t, dst_stride
, dst_line
, 1);
877 PIXMAN_IMAGE_GET_LINE (mask_image
, mask_x
, mask_y
, uint8_t, mask_stride
, mask_line
, 1);
878 src
= _pixman_image_get_solid (imp
, src_image
, dest_image
->bits
.format
);
884 dst_line
+= dst_stride
;
886 mask_line
+= mask_stride
;
899 m
= MUL_UN8 (sa
, a
, tmp
);
900 r
= ADD_UN8 (m
, d
, tmp
);
907 #ifdef WORDS_BIGENDIAN
908 #define CREATE_BITMASK(n) (0x80000000 >> (n))
909 #define UPDATE_BITMASK(n) ((n) >> 1)
911 #define CREATE_BITMASK(n) (1 << (n))
912 #define UPDATE_BITMASK(n) ((n) << 1)
915 #define TEST_BIT(p, n) \
916 (*((p) + ((n) >> 5)) & CREATE_BITMASK ((n) & 31))
917 #define SET_BIT(p, n) \
918 do { *((p) + ((n) >> 5)) |= CREATE_BITMASK ((n) & 31); } while (0);
921 fast_composite_add_1_1 (pixman_implementation_t
*imp
,
922 pixman_composite_info_t
*info
)
924 PIXMAN_COMPOSITE_ARGS (info
);
925 uint32_t *dst_line
, *dst
;
926 uint32_t *src_line
, *src
;
927 int dst_stride
, src_stride
;
930 PIXMAN_IMAGE_GET_LINE (src_image
, 0, src_y
, uint32_t,
931 src_stride
, src_line
, 1);
932 PIXMAN_IMAGE_GET_LINE (dest_image
, 0, dest_y
, uint32_t,
933 dst_stride
, dst_line
, 1);
938 dst_line
+= dst_stride
;
940 src_line
+= src_stride
;
946 * TODO: improve performance by processing uint32_t data instead
949 if (TEST_BIT (src
, src_x
+ w
))
950 SET_BIT (dst
, dest_x
+ w
);
956 fast_composite_over_n_1_8888 (pixman_implementation_t
*imp
,
957 pixman_composite_info_t
*info
)
959 PIXMAN_COMPOSITE_ARGS (info
);
961 uint32_t *dst
, *dst_line
;
962 uint32_t *mask
, *mask_line
;
963 int mask_stride
, dst_stride
;
964 uint32_t bitcache
, bitmask
;
970 src
= _pixman_image_get_solid (imp
, src_image
, dest_image
->bits
.format
);
975 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint32_t,
976 dst_stride
, dst_line
, 1);
977 PIXMAN_IMAGE_GET_LINE (mask_image
, 0, mask_y
, uint32_t,
978 mask_stride
, mask_line
, 1);
979 mask_line
+= mask_x
>> 5;
986 dst_line
+= dst_stride
;
988 mask_line
+= mask_stride
;
992 bitmask
= CREATE_BITMASK (mask_x
& 31);
999 bitmask
= CREATE_BITMASK (0);
1001 if (bitcache
& bitmask
)
1003 bitmask
= UPDATE_BITMASK (bitmask
);
1013 dst_line
+= dst_stride
;
1015 mask_line
+= mask_stride
;
1019 bitmask
= CREATE_BITMASK (mask_x
& 31);
1026 bitmask
= CREATE_BITMASK (0);
1028 if (bitcache
& bitmask
)
1029 *dst
= over (src
, *dst
);
1030 bitmask
= UPDATE_BITMASK (bitmask
);
1038 fast_composite_over_n_1_0565 (pixman_implementation_t
*imp
,
1039 pixman_composite_info_t
*info
)
1041 PIXMAN_COMPOSITE_ARGS (info
);
1043 uint16_t *dst
, *dst_line
;
1044 uint32_t *mask
, *mask_line
;
1045 int mask_stride
, dst_stride
;
1046 uint32_t bitcache
, bitmask
;
1054 src
= _pixman_image_get_solid (imp
, src_image
, dest_image
->bits
.format
);
1059 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint16_t,
1060 dst_stride
, dst_line
, 1);
1061 PIXMAN_IMAGE_GET_LINE (mask_image
, 0, mask_y
, uint32_t,
1062 mask_stride
, mask_line
, 1);
1063 mask_line
+= mask_x
>> 5;
1067 src565
= convert_8888_to_0565 (src
);
1071 dst_line
+= dst_stride
;
1073 mask_line
+= mask_stride
;
1077 bitmask
= CREATE_BITMASK (mask_x
& 31);
1084 bitmask
= CREATE_BITMASK (0);
1086 if (bitcache
& bitmask
)
1088 bitmask
= UPDATE_BITMASK (bitmask
);
1098 dst_line
+= dst_stride
;
1100 mask_line
+= mask_stride
;
1104 bitmask
= CREATE_BITMASK (mask_x
& 31);
1111 bitmask
= CREATE_BITMASK (0);
1113 if (bitcache
& bitmask
)
1115 d
= over (src
, convert_0565_to_0888 (*dst
));
1116 *dst
= convert_8888_to_0565 (d
);
1118 bitmask
= UPDATE_BITMASK (bitmask
);
1130 fast_composite_solid_fill (pixman_implementation_t
*imp
,
1131 pixman_composite_info_t
*info
)
1133 PIXMAN_COMPOSITE_ARGS (info
);
1136 src
= _pixman_image_get_solid (imp
, src_image
, dest_image
->bits
.format
);
1138 if (dest_image
->bits
.format
== PIXMAN_a1
)
1142 else if (dest_image
->bits
.format
== PIXMAN_a8
)
1146 else if (dest_image
->bits
.format
== PIXMAN_r5g6b5
||
1147 dest_image
->bits
.format
== PIXMAN_b5g6r5
)
1149 src
= convert_8888_to_0565 (src
);
1152 pixman_fill (dest_image
->bits
.bits
, dest_image
->bits
.rowstride
,
1153 PIXMAN_FORMAT_BPP (dest_image
->bits
.format
),
1160 fast_composite_src_memcpy (pixman_implementation_t
*imp
,
1161 pixman_composite_info_t
*info
)
1163 PIXMAN_COMPOSITE_ARGS (info
);
1164 int bpp
= PIXMAN_FORMAT_BPP (dest_image
->bits
.format
) / 8;
1165 uint32_t n_bytes
= width
* bpp
;
1166 int dst_stride
, src_stride
;
1170 src_stride
= src_image
->bits
.rowstride
* 4;
1171 dst_stride
= dest_image
->bits
.rowstride
* 4;
1173 src
= (uint8_t *)src_image
->bits
.bits
+ src_y
* src_stride
+ src_x
* bpp
;
1174 dst
= (uint8_t *)dest_image
->bits
.bits
+ dest_y
* dst_stride
+ dest_x
* bpp
;
1178 memcpy (dst
, src
, n_bytes
);
1185 FAST_NEAREST (8888_8888_cover
, 8888, 8888, uint32_t, uint32_t, SRC
, COVER
)
1186 FAST_NEAREST (8888_8888_none
, 8888, 8888, uint32_t, uint32_t, SRC
, NONE
)
1187 FAST_NEAREST (8888_8888_pad
, 8888, 8888, uint32_t, uint32_t, SRC
, PAD
)
1188 FAST_NEAREST (8888_8888_normal
, 8888, 8888, uint32_t, uint32_t, SRC
, NORMAL
)
1189 FAST_NEAREST (x888_8888_cover
, x888
, 8888, uint32_t, uint32_t, SRC
, COVER
)
1190 FAST_NEAREST (x888_8888_pad
, x888
, 8888, uint32_t, uint32_t, SRC
, PAD
)
1191 FAST_NEAREST (x888_8888_normal
, x888
, 8888, uint32_t, uint32_t, SRC
, NORMAL
)
1192 FAST_NEAREST (8888_8888_cover
, 8888, 8888, uint32_t, uint32_t, OVER
, COVER
)
1193 FAST_NEAREST (8888_8888_none
, 8888, 8888, uint32_t, uint32_t, OVER
, NONE
)
1194 FAST_NEAREST (8888_8888_pad
, 8888, 8888, uint32_t, uint32_t, OVER
, PAD
)
1195 FAST_NEAREST (8888_8888_normal
, 8888, 8888, uint32_t, uint32_t, OVER
, NORMAL
)
1196 FAST_NEAREST (8888_565_cover
, 8888, 0565, uint32_t, uint16_t, SRC
, COVER
)
1197 FAST_NEAREST (8888_565_none
, 8888, 0565, uint32_t, uint16_t, SRC
, NONE
)
1198 FAST_NEAREST (8888_565_pad
, 8888, 0565, uint32_t, uint16_t, SRC
, PAD
)
1199 FAST_NEAREST (8888_565_normal
, 8888, 0565, uint32_t, uint16_t, SRC
, NORMAL
)
1200 FAST_NEAREST (565_565_normal
, 0565, 0565, uint16_t, uint16_t, SRC
, NORMAL
)
1201 FAST_NEAREST (8888_565_cover
, 8888, 0565, uint32_t, uint16_t, OVER
, COVER
)
1202 FAST_NEAREST (8888_565_none
, 8888, 0565, uint32_t, uint16_t, OVER
, NONE
)
1203 FAST_NEAREST (8888_565_pad
, 8888, 0565, uint32_t, uint16_t, OVER
, PAD
)
1204 FAST_NEAREST (8888_565_normal
, 8888, 0565, uint32_t, uint16_t, OVER
, NORMAL
)
1206 #define REPEAT_MIN_WIDTH 32
1209 fast_composite_tiled_repeat (pixman_implementation_t
*imp
,
1210 pixman_composite_info_t
*info
)
1212 PIXMAN_COMPOSITE_ARGS (info
);
1213 pixman_composite_func_t func
;
1214 pixman_format_code_t mask_format
;
1215 uint32_t src_flags
, mask_flags
;
1217 int32_t width_remain
;
1221 pixman_image_t extended_src_image
;
1222 uint32_t extended_src
[REPEAT_MIN_WIDTH
* 2];
1223 pixman_bool_t need_src_extension
;
1227 pixman_composite_info_t info2
= *info
;
1229 src_flags
= (info
->src_flags
& ~FAST_PATH_NORMAL_REPEAT
) |
1230 FAST_PATH_SAMPLES_COVER_CLIP_NEAREST
;
1234 mask_format
= mask_image
->common
.extended_format_code
;
1235 mask_flags
= info
->mask_flags
;
1239 mask_format
= PIXMAN_null
;
1240 mask_flags
= FAST_PATH_IS_OPAQUE
;
1243 _pixman_implementation_lookup_composite (
1244 imp
->toplevel
, info
->op
,
1245 src_image
->common
.extended_format_code
, src_flags
,
1246 mask_format
, mask_flags
,
1247 dest_image
->common
.extended_format_code
, info
->dest_flags
,
1250 src_bpp
= PIXMAN_FORMAT_BPP (src_image
->bits
.format
);
1252 if (src_image
->bits
.width
< REPEAT_MIN_WIDTH
&&
1253 (src_bpp
== 32 || src_bpp
== 16 || src_bpp
== 8) &&
1254 !src_image
->bits
.indexed
)
1257 sx
= MOD (sx
, src_image
->bits
.width
);
1261 while (src_width
< REPEAT_MIN_WIDTH
&& src_width
<= sx
)
1262 src_width
+= src_image
->bits
.width
;
1264 src_stride
= (src_width
* (src_bpp
>> 3) + 3) / (int) sizeof (uint32_t);
1266 /* Initialize/validate stack-allocated temporary image */
1267 _pixman_bits_image_init (&extended_src_image
, src_image
->bits
.format
,
1268 src_width
, 1, &extended_src
[0], src_stride
,
1270 _pixman_image_validate (&extended_src_image
);
1272 info2
.src_image
= &extended_src_image
;
1273 need_src_extension
= TRUE
;
1277 src_width
= src_image
->bits
.width
;
1278 need_src_extension
= FALSE
;
1284 while (--height
>= 0)
1286 sx
= MOD (sx
, src_width
);
1287 sy
= MOD (sy
, src_image
->bits
.height
);
1289 if (need_src_extension
)
1293 PIXMAN_IMAGE_GET_LINE (src_image
, 0, sy
, uint32_t, src_stride
, src_line
, 1);
1295 for (i
= 0; i
< src_width
; )
1297 for (j
= 0; j
< src_image
->bits
.width
; j
++, i
++)
1298 extended_src
[i
] = src_line
[j
];
1301 else if (src_bpp
== 16)
1303 uint16_t *src_line_16
;
1305 PIXMAN_IMAGE_GET_LINE (src_image
, 0, sy
, uint16_t, src_stride
,
1307 src_line
= (uint32_t*)src_line_16
;
1309 for (i
= 0; i
< src_width
; )
1311 for (j
= 0; j
< src_image
->bits
.width
; j
++, i
++)
1312 ((uint16_t*)extended_src
)[i
] = ((uint16_t*)src_line
)[j
];
1315 else if (src_bpp
== 8)
1317 uint8_t *src_line_8
;
1319 PIXMAN_IMAGE_GET_LINE (src_image
, 0, sy
, uint8_t, src_stride
,
1321 src_line
= (uint32_t*)src_line_8
;
1323 for (i
= 0; i
< src_width
; )
1325 for (j
= 0; j
< src_image
->bits
.width
; j
++, i
++)
1326 ((uint8_t*)extended_src
)[i
] = ((uint8_t*)src_line
)[j
];
1337 width_remain
= width
;
1339 while (width_remain
> 0)
1341 num_pixels
= src_width
- sx
;
1343 if (num_pixels
> width_remain
)
1344 num_pixels
= width_remain
;
1347 info2
.width
= num_pixels
;
1352 width_remain
-= num_pixels
;
1353 info2
.mask_x
+= num_pixels
;
1354 info2
.dest_x
+= num_pixels
;
1360 info2
.mask_x
= info
->mask_x
;
1362 info2
.dest_x
= info
->dest_x
;
1366 if (need_src_extension
)
1367 _pixman_image_fini (&extended_src_image
);
1370 /* Use more unrolling for src_0565_0565 because it is typically CPU bound */
1371 static force_inline
void
1372 scaled_nearest_scanline_565_565_SRC (uint16_t * dst
,
1373 const uint16_t * src
,
1376 pixman_fixed_t unit_x
,
1377 pixman_fixed_t max_vx
,
1378 pixman_bool_t fully_transparent_src
)
1380 uint16_t tmp1
, tmp2
, tmp3
, tmp4
;
1381 while ((w
-= 4) >= 0)
1383 tmp1
= *(src
+ pixman_fixed_to_int (vx
));
1385 tmp2
= *(src
+ pixman_fixed_to_int (vx
));
1387 tmp3
= *(src
+ pixman_fixed_to_int (vx
));
1389 tmp4
= *(src
+ pixman_fixed_to_int (vx
));
1398 tmp1
= *(src
+ pixman_fixed_to_int (vx
));
1400 tmp2
= *(src
+ pixman_fixed_to_int (vx
));
1406 *dst
= *(src
+ pixman_fixed_to_int (vx
));
1409 FAST_NEAREST_MAINLOOP (565_565_cover_SRC
,
1410 scaled_nearest_scanline_565_565_SRC
,
1411 uint16_t, uint16_t, COVER
)
1412 FAST_NEAREST_MAINLOOP (565_565_none_SRC
,
1413 scaled_nearest_scanline_565_565_SRC
,
1414 uint16_t, uint16_t, NONE
)
1415 FAST_NEAREST_MAINLOOP (565_565_pad_SRC
,
1416 scaled_nearest_scanline_565_565_SRC
,
1417 uint16_t, uint16_t, PAD
)
1419 static force_inline
uint32_t
1420 fetch_nearest (pixman_repeat_t src_repeat
,
1421 pixman_format_code_t format
,
1422 uint32_t *src
, int x
, int src_width
)
1424 if (repeat (src_repeat
, &x
, src_width
))
1426 if (format
== PIXMAN_x8r8g8b8
|| format
== PIXMAN_x8b8g8r8
)
1427 return *(src
+ x
) | 0xff000000;
1437 static force_inline
void
1438 combine_over (uint32_t s
, uint32_t *dst
)
1442 uint8_t ia
= 0xff - (s
>> 24);
1445 UN8x4_MUL_UN8_ADD_UN8x4 (*dst
, ia
, s
);
1451 static force_inline
void
1452 combine_src (uint32_t s
, uint32_t *dst
)
1458 fast_composite_scaled_nearest (pixman_implementation_t
*imp
,
1459 pixman_composite_info_t
*info
)
1461 PIXMAN_COMPOSITE_ARGS (info
);
1464 int dst_stride
, src_stride
;
1465 int src_width
, src_height
;
1466 pixman_repeat_t src_repeat
;
1467 pixman_fixed_t unit_x
, unit_y
;
1468 pixman_format_code_t src_format
;
1472 PIXMAN_IMAGE_GET_LINE (dest_image
, dest_x
, dest_y
, uint32_t, dst_stride
, dst_line
, 1);
1473 /* pass in 0 instead of src_x and src_y because src_x and src_y need to be
1474 * transformed from destination space to source space
1476 PIXMAN_IMAGE_GET_LINE (src_image
, 0, 0, uint32_t, src_stride
, src_line
, 1);
1478 /* reference point is the center of the pixel */
1479 v
.vector
[0] = pixman_int_to_fixed (src_x
) + pixman_fixed_1
/ 2;
1480 v
.vector
[1] = pixman_int_to_fixed (src_y
) + pixman_fixed_1
/ 2;
1481 v
.vector
[2] = pixman_fixed_1
;
1483 if (!pixman_transform_point_3d (src_image
->common
.transform
, &v
))
1486 unit_x
= src_image
->common
.transform
->matrix
[0][0];
1487 unit_y
= src_image
->common
.transform
->matrix
[1][1];
1489 /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */
1490 v
.vector
[0] -= pixman_fixed_e
;
1491 v
.vector
[1] -= pixman_fixed_e
;
1493 src_height
= src_image
->bits
.height
;
1494 src_width
= src_image
->bits
.width
;
1495 src_repeat
= src_image
->common
.repeat
;
1496 src_format
= src_image
->bits
.format
;
1501 pixman_fixed_t vx
= v
.vector
[0];
1502 int y
= pixman_fixed_to_int (vy
);
1503 uint32_t *dst
= dst_line
;
1505 dst_line
+= dst_stride
;
1507 /* adjust the y location by a unit vector in the y direction
1508 * this is equivalent to transforming y+1 of the destination point to source space */
1511 if (!repeat (src_repeat
, &y
, src_height
))
1513 if (op
== PIXMAN_OP_SRC
)
1514 memset (dst
, 0, sizeof (*dst
) * width
);
1520 uint32_t *src
= src_line
+ y
* src_stride
;
1527 x1
= pixman_fixed_to_int (vx
);
1530 x2
= pixman_fixed_to_int (vx
);
1535 s1
= fetch_nearest (src_repeat
, src_format
, src
, x1
, src_width
);
1536 s2
= fetch_nearest (src_repeat
, src_format
, src
, x2
, src_width
);
1538 if (op
== PIXMAN_OP_OVER
)
1540 combine_over (s1
, dst
++);
1541 combine_over (s2
, dst
++);
1545 combine_src (s1
, dst
++);
1546 combine_src (s2
, dst
++);
1555 x
= pixman_fixed_to_int (vx
);
1558 s
= fetch_nearest (src_repeat
, src_format
, src
, x
, src_width
);
1560 if (op
== PIXMAN_OP_OVER
)
1561 combine_over (s
, dst
++);
1563 combine_src (s
, dst
++);
1569 #define CACHE_LINE_SIZE 64
1571 #define FAST_SIMPLE_ROTATE(suffix, pix_type) \
1574 blt_rotated_90_trivial_##suffix (pix_type *dst, \
1576 const pix_type *src, \
1582 for (y = 0; y < h; y++) \
1584 const pix_type *s = src + (h - y - 1); \
1585 pix_type *d = dst + dst_stride * y; \
1586 for (x = 0; x < w; x++) \
1595 blt_rotated_270_trivial_##suffix (pix_type *dst, \
1597 const pix_type *src, \
1603 for (y = 0; y < h; y++) \
1605 const pix_type *s = src + src_stride * (w - 1) + y; \
1606 pix_type *d = dst + dst_stride * y; \
1607 for (x = 0; x < w; x++) \
1616 blt_rotated_90_##suffix (pix_type *dst, \
1618 const pix_type *src, \
1624 int leading_pixels = 0, trailing_pixels = 0; \
1625 const int TILE_SIZE = CACHE_LINE_SIZE / sizeof(pix_type); \
1628 * split processing into handling destination as TILE_SIZExH cache line \
1629 * aligned vertical stripes (optimistically assuming that destination \
1630 * stride is a multiple of cache line, if not - it will be just a bit \
1634 if ((uintptr_t)dst & (CACHE_LINE_SIZE - 1)) \
1636 leading_pixels = TILE_SIZE - (((uintptr_t)dst & \
1637 (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \
1638 if (leading_pixels > W) \
1639 leading_pixels = W; \
1641 /* unaligned leading part NxH (where N < TILE_SIZE) */ \
1642 blt_rotated_90_trivial_##suffix ( \
1650 dst += leading_pixels; \
1651 src += leading_pixels * src_stride; \
1652 W -= leading_pixels; \
1655 if ((uintptr_t)(dst + W) & (CACHE_LINE_SIZE - 1)) \
1657 trailing_pixels = (((uintptr_t)(dst + W) & \
1658 (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \
1659 if (trailing_pixels > W) \
1660 trailing_pixels = W; \
1661 W -= trailing_pixels; \
1664 for (x = 0; x < W; x += TILE_SIZE) \
1666 /* aligned middle part TILE_SIZExH */ \
1667 blt_rotated_90_trivial_##suffix ( \
1670 src + src_stride * x, \
1676 if (trailing_pixels) \
1678 /* unaligned trailing part NxH (where N < TILE_SIZE) */ \
1679 blt_rotated_90_trivial_##suffix ( \
1682 src + W * src_stride, \
1690 blt_rotated_270_##suffix (pix_type *dst, \
1692 const pix_type *src, \
1698 int leading_pixels = 0, trailing_pixels = 0; \
1699 const int TILE_SIZE = CACHE_LINE_SIZE / sizeof(pix_type); \
1702 * split processing into handling destination as TILE_SIZExH cache line \
1703 * aligned vertical stripes (optimistically assuming that destination \
1704 * stride is a multiple of cache line, if not - it will be just a bit \
1708 if ((uintptr_t)dst & (CACHE_LINE_SIZE - 1)) \
1710 leading_pixels = TILE_SIZE - (((uintptr_t)dst & \
1711 (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \
1712 if (leading_pixels > W) \
1713 leading_pixels = W; \
1715 /* unaligned leading part NxH (where N < TILE_SIZE) */ \
1716 blt_rotated_270_trivial_##suffix ( \
1719 src + src_stride * (W - leading_pixels), \
1724 dst += leading_pixels; \
1725 W -= leading_pixels; \
1728 if ((uintptr_t)(dst + W) & (CACHE_LINE_SIZE - 1)) \
1730 trailing_pixels = (((uintptr_t)(dst + W) & \
1731 (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \
1732 if (trailing_pixels > W) \
1733 trailing_pixels = W; \
1734 W -= trailing_pixels; \
1735 src += trailing_pixels * src_stride; \
1738 for (x = 0; x < W; x += TILE_SIZE) \
1740 /* aligned middle part TILE_SIZExH */ \
1741 blt_rotated_270_trivial_##suffix ( \
1744 src + src_stride * (W - x - TILE_SIZE), \
1750 if (trailing_pixels) \
1752 /* unaligned trailing part NxH (where N < TILE_SIZE) */ \
1753 blt_rotated_270_trivial_##suffix ( \
1756 src - trailing_pixels * src_stride, \
1764 fast_composite_rotate_90_##suffix (pixman_implementation_t *imp, \
1765 pixman_composite_info_t *info) \
1767 PIXMAN_COMPOSITE_ARGS (info); \
1768 pix_type *dst_line; \
1769 pix_type *src_line; \
1770 int dst_stride, src_stride; \
1771 int src_x_t, src_y_t; \
1773 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, pix_type, \
1774 dst_stride, dst_line, 1); \
1775 src_x_t = -src_y + pixman_fixed_to_int ( \
1776 src_image->common.transform->matrix[0][2] + \
1777 pixman_fixed_1 / 2 - pixman_fixed_e) - height;\
1778 src_y_t = src_x + pixman_fixed_to_int ( \
1779 src_image->common.transform->matrix[1][2] + \
1780 pixman_fixed_1 / 2 - pixman_fixed_e); \
1781 PIXMAN_IMAGE_GET_LINE (src_image, src_x_t, src_y_t, pix_type, \
1782 src_stride, src_line, 1); \
1783 blt_rotated_90_##suffix (dst_line, dst_stride, src_line, src_stride, \
1788 fast_composite_rotate_270_##suffix (pixman_implementation_t *imp, \
1789 pixman_composite_info_t *info) \
1791 PIXMAN_COMPOSITE_ARGS (info); \
1792 pix_type *dst_line; \
1793 pix_type *src_line; \
1794 int dst_stride, src_stride; \
1795 int src_x_t, src_y_t; \
1797 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, pix_type, \
1798 dst_stride, dst_line, 1); \
1799 src_x_t = src_y + pixman_fixed_to_int ( \
1800 src_image->common.transform->matrix[0][2] + \
1801 pixman_fixed_1 / 2 - pixman_fixed_e); \
1802 src_y_t = -src_x + pixman_fixed_to_int ( \
1803 src_image->common.transform->matrix[1][2] + \
1804 pixman_fixed_1 / 2 - pixman_fixed_e) - width; \
1805 PIXMAN_IMAGE_GET_LINE (src_image, src_x_t, src_y_t, pix_type, \
1806 src_stride, src_line, 1); \
1807 blt_rotated_270_##suffix (dst_line, dst_stride, src_line, src_stride, \
1811 FAST_SIMPLE_ROTATE (8, uint8_t)
1812 FAST_SIMPLE_ROTATE (565, uint16_t)
1813 FAST_SIMPLE_ROTATE (8888, uint32_t)
1815 static const pixman_fast_path_t c_fast_paths
[] =
1817 PIXMAN_STD_FAST_PATH (OVER
, solid
, a8
, r5g6b5
, fast_composite_over_n_8_0565
),
1818 PIXMAN_STD_FAST_PATH (OVER
, solid
, a8
, b5g6r5
, fast_composite_over_n_8_0565
),
1819 PIXMAN_STD_FAST_PATH (OVER
, solid
, a8
, r8g8b8
, fast_composite_over_n_8_0888
),
1820 PIXMAN_STD_FAST_PATH (OVER
, solid
, a8
, b8g8r8
, fast_composite_over_n_8_0888
),
1821 PIXMAN_STD_FAST_PATH (OVER
, solid
, a8
, a8r8g8b8
, fast_composite_over_n_8_8888
),
1822 PIXMAN_STD_FAST_PATH (OVER
, solid
, a8
, x8r8g8b8
, fast_composite_over_n_8_8888
),
1823 PIXMAN_STD_FAST_PATH (OVER
, solid
, a8
, a8b8g8r8
, fast_composite_over_n_8_8888
),
1824 PIXMAN_STD_FAST_PATH (OVER
, solid
, a8
, x8b8g8r8
, fast_composite_over_n_8_8888
),
1825 PIXMAN_STD_FAST_PATH (OVER
, solid
, a1
, a8r8g8b8
, fast_composite_over_n_1_8888
),
1826 PIXMAN_STD_FAST_PATH (OVER
, solid
, a1
, x8r8g8b8
, fast_composite_over_n_1_8888
),
1827 PIXMAN_STD_FAST_PATH (OVER
, solid
, a1
, a8b8g8r8
, fast_composite_over_n_1_8888
),
1828 PIXMAN_STD_FAST_PATH (OVER
, solid
, a1
, x8b8g8r8
, fast_composite_over_n_1_8888
),
1829 PIXMAN_STD_FAST_PATH (OVER
, solid
, a1
, r5g6b5
, fast_composite_over_n_1_0565
),
1830 PIXMAN_STD_FAST_PATH (OVER
, solid
, a1
, b5g6r5
, fast_composite_over_n_1_0565
),
1831 PIXMAN_STD_FAST_PATH_CA (OVER
, solid
, a8r8g8b8
, a8r8g8b8
, fast_composite_over_n_8888_8888_ca
),
1832 PIXMAN_STD_FAST_PATH_CA (OVER
, solid
, a8r8g8b8
, x8r8g8b8
, fast_composite_over_n_8888_8888_ca
),
1833 PIXMAN_STD_FAST_PATH_CA (OVER
, solid
, a8r8g8b8
, r5g6b5
, fast_composite_over_n_8888_0565_ca
),
1834 PIXMAN_STD_FAST_PATH_CA (OVER
, solid
, a8b8g8r8
, a8b8g8r8
, fast_composite_over_n_8888_8888_ca
),
1835 PIXMAN_STD_FAST_PATH_CA (OVER
, solid
, a8b8g8r8
, x8b8g8r8
, fast_composite_over_n_8888_8888_ca
),
1836 PIXMAN_STD_FAST_PATH_CA (OVER
, solid
, a8b8g8r8
, b5g6r5
, fast_composite_over_n_8888_0565_ca
),
1837 PIXMAN_STD_FAST_PATH (OVER
, x8r8g8b8
, a8
, x8r8g8b8
, fast_composite_over_x888_8_8888
),
1838 PIXMAN_STD_FAST_PATH (OVER
, x8r8g8b8
, a8
, a8r8g8b8
, fast_composite_over_x888_8_8888
),
1839 PIXMAN_STD_FAST_PATH (OVER
, x8b8g8r8
, a8
, x8b8g8r8
, fast_composite_over_x888_8_8888
),
1840 PIXMAN_STD_FAST_PATH (OVER
, x8b8g8r8
, a8
, a8b8g8r8
, fast_composite_over_x888_8_8888
),
1841 PIXMAN_STD_FAST_PATH (OVER
, a8r8g8b8
, null
, a8r8g8b8
, fast_composite_over_8888_8888
),
1842 PIXMAN_STD_FAST_PATH (OVER
, a8r8g8b8
, null
, x8r8g8b8
, fast_composite_over_8888_8888
),
1843 PIXMAN_STD_FAST_PATH (OVER
, a8r8g8b8
, null
, r5g6b5
, fast_composite_over_8888_0565
),
1844 PIXMAN_STD_FAST_PATH (OVER
, a8b8g8r8
, null
, a8b8g8r8
, fast_composite_over_8888_8888
),
1845 PIXMAN_STD_FAST_PATH (OVER
, a8b8g8r8
, null
, x8b8g8r8
, fast_composite_over_8888_8888
),
1846 PIXMAN_STD_FAST_PATH (OVER
, a8b8g8r8
, null
, b5g6r5
, fast_composite_over_8888_0565
),
1847 PIXMAN_STD_FAST_PATH (ADD
, r5g6b5
, null
, r5g6b5
, fast_composite_add_0565_0565
),
1848 PIXMAN_STD_FAST_PATH (ADD
, b5g6r5
, null
, b5g6r5
, fast_composite_add_0565_0565
),
1849 PIXMAN_STD_FAST_PATH (ADD
, a8r8g8b8
, null
, a8r8g8b8
, fast_composite_add_8888_8888
),
1850 PIXMAN_STD_FAST_PATH (ADD
, a8b8g8r8
, null
, a8b8g8r8
, fast_composite_add_8888_8888
),
1851 PIXMAN_STD_FAST_PATH (ADD
, a8
, null
, a8
, fast_composite_add_8_8
),
1852 PIXMAN_STD_FAST_PATH (ADD
, a1
, null
, a1
, fast_composite_add_1_1
),
1853 PIXMAN_STD_FAST_PATH_CA (ADD
, solid
, a8r8g8b8
, a8r8g8b8
, fast_composite_add_n_8888_8888_ca
),
1854 PIXMAN_STD_FAST_PATH (ADD
, solid
, a8
, a8
, fast_composite_add_n_8_8
),
1855 PIXMAN_STD_FAST_PATH (SRC
, solid
, null
, a8r8g8b8
, fast_composite_solid_fill
),
1856 PIXMAN_STD_FAST_PATH (SRC
, solid
, null
, x8r8g8b8
, fast_composite_solid_fill
),
1857 PIXMAN_STD_FAST_PATH (SRC
, solid
, null
, a8b8g8r8
, fast_composite_solid_fill
),
1858 PIXMAN_STD_FAST_PATH (SRC
, solid
, null
, x8b8g8r8
, fast_composite_solid_fill
),
1859 PIXMAN_STD_FAST_PATH (SRC
, solid
, null
, a1
, fast_composite_solid_fill
),
1860 PIXMAN_STD_FAST_PATH (SRC
, solid
, null
, a8
, fast_composite_solid_fill
),
1861 PIXMAN_STD_FAST_PATH (SRC
, solid
, null
, r5g6b5
, fast_composite_solid_fill
),
1862 PIXMAN_STD_FAST_PATH (SRC
, x8r8g8b8
, null
, a8r8g8b8
, fast_composite_src_x888_8888
),
1863 PIXMAN_STD_FAST_PATH (SRC
, x8b8g8r8
, null
, a8b8g8r8
, fast_composite_src_x888_8888
),
1864 PIXMAN_STD_FAST_PATH (SRC
, a8r8g8b8
, null
, x8r8g8b8
, fast_composite_src_memcpy
),
1865 PIXMAN_STD_FAST_PATH (SRC
, a8r8g8b8
, null
, a8r8g8b8
, fast_composite_src_memcpy
),
1866 PIXMAN_STD_FAST_PATH (SRC
, x8r8g8b8
, null
, x8r8g8b8
, fast_composite_src_memcpy
),
1867 PIXMAN_STD_FAST_PATH (SRC
, a8b8g8r8
, null
, x8b8g8r8
, fast_composite_src_memcpy
),
1868 PIXMAN_STD_FAST_PATH (SRC
, a8b8g8r8
, null
, a8b8g8r8
, fast_composite_src_memcpy
),
1869 PIXMAN_STD_FAST_PATH (SRC
, x8b8g8r8
, null
, x8b8g8r8
, fast_composite_src_memcpy
),
1870 PIXMAN_STD_FAST_PATH (SRC
, b8g8r8a8
, null
, b8g8r8x8
, fast_composite_src_memcpy
),
1871 PIXMAN_STD_FAST_PATH (SRC
, b8g8r8a8
, null
, b8g8r8a8
, fast_composite_src_memcpy
),
1872 PIXMAN_STD_FAST_PATH (SRC
, b8g8r8x8
, null
, b8g8r8x8
, fast_composite_src_memcpy
),
1873 PIXMAN_STD_FAST_PATH (SRC
, r5g6b5
, null
, r5g6b5
, fast_composite_src_memcpy
),
1874 PIXMAN_STD_FAST_PATH (SRC
, b5g6r5
, null
, b5g6r5
, fast_composite_src_memcpy
),
1875 PIXMAN_STD_FAST_PATH (SRC
, r8g8b8
, null
, r8g8b8
, fast_composite_src_memcpy
),
1876 PIXMAN_STD_FAST_PATH (SRC
, b8g8r8
, null
, b8g8r8
, fast_composite_src_memcpy
),
1877 PIXMAN_STD_FAST_PATH (SRC
, x1r5g5b5
, null
, x1r5g5b5
, fast_composite_src_memcpy
),
1878 PIXMAN_STD_FAST_PATH (SRC
, a1r5g5b5
, null
, x1r5g5b5
, fast_composite_src_memcpy
),
1879 PIXMAN_STD_FAST_PATH (SRC
, a8
, null
, a8
, fast_composite_src_memcpy
),
1880 PIXMAN_STD_FAST_PATH (IN
, a8
, null
, a8
, fast_composite_in_8_8
),
1881 PIXMAN_STD_FAST_PATH (IN
, solid
, a8
, a8
, fast_composite_in_n_8_8
),
1883 SIMPLE_NEAREST_FAST_PATH (SRC
, x8r8g8b8
, x8r8g8b8
, 8888_8888
),
1884 SIMPLE_NEAREST_FAST_PATH (SRC
, a8r8g8b8
, x8r8g8b8
, 8888_8888
),
1885 SIMPLE_NEAREST_FAST_PATH (SRC
, x8b8g8r8
, x8b8g8r8
, 8888_8888
),
1886 SIMPLE_NEAREST_FAST_PATH (SRC
, a8b8g8r8
, x8b8g8r8
, 8888_8888
),
1888 SIMPLE_NEAREST_FAST_PATH (SRC
, a8r8g8b8
, a8r8g8b8
, 8888_8888
),
1889 SIMPLE_NEAREST_FAST_PATH (SRC
, a8b8g8r8
, a8b8g8r8
, 8888_8888
),
1891 SIMPLE_NEAREST_FAST_PATH (SRC
, x8r8g8b8
, r5g6b5
, 8888_565
),
1892 SIMPLE_NEAREST_FAST_PATH (SRC
, a8r8g8b8
, r5g6b5
, 8888_565
),
1894 SIMPLE_NEAREST_FAST_PATH (SRC
, r5g6b5
, r5g6b5
, 565_565
),
1896 SIMPLE_NEAREST_FAST_PATH_COVER (SRC
, x8r8g8b8
, a8r8g8b8
, x888_8888
),
1897 SIMPLE_NEAREST_FAST_PATH_COVER (SRC
, x8b8g8r8
, a8b8g8r8
, x888_8888
),
1898 SIMPLE_NEAREST_FAST_PATH_PAD (SRC
, x8r8g8b8
, a8r8g8b8
, x888_8888
),
1899 SIMPLE_NEAREST_FAST_PATH_PAD (SRC
, x8b8g8r8
, a8b8g8r8
, x888_8888
),
1900 SIMPLE_NEAREST_FAST_PATH_NORMAL (SRC
, x8r8g8b8
, a8r8g8b8
, x888_8888
),
1901 SIMPLE_NEAREST_FAST_PATH_NORMAL (SRC
, x8b8g8r8
, a8b8g8r8
, x888_8888
),
1903 SIMPLE_NEAREST_FAST_PATH (OVER
, a8r8g8b8
, x8r8g8b8
, 8888_8888
),
1904 SIMPLE_NEAREST_FAST_PATH (OVER
, a8b8g8r8
, x8b8g8r8
, 8888_8888
),
1905 SIMPLE_NEAREST_FAST_PATH (OVER
, a8r8g8b8
, a8r8g8b8
, 8888_8888
),
1906 SIMPLE_NEAREST_FAST_PATH (OVER
, a8b8g8r8
, a8b8g8r8
, 8888_8888
),
1908 SIMPLE_NEAREST_FAST_PATH (OVER
, a8r8g8b8
, r5g6b5
, 8888_565
),
1910 #define NEAREST_FAST_PATH(op,s,d) \
1911 { PIXMAN_OP_ ## op, \
1912 PIXMAN_ ## s, SCALED_NEAREST_FLAGS, \
1914 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1915 fast_composite_scaled_nearest, \
1918 NEAREST_FAST_PATH (SRC
, x8r8g8b8
, x8r8g8b8
),
1919 NEAREST_FAST_PATH (SRC
, a8r8g8b8
, x8r8g8b8
),
1920 NEAREST_FAST_PATH (SRC
, x8b8g8r8
, x8b8g8r8
),
1921 NEAREST_FAST_PATH (SRC
, a8b8g8r8
, x8b8g8r8
),
1923 NEAREST_FAST_PATH (SRC
, x8r8g8b8
, a8r8g8b8
),
1924 NEAREST_FAST_PATH (SRC
, a8r8g8b8
, a8r8g8b8
),
1925 NEAREST_FAST_PATH (SRC
, x8b8g8r8
, a8b8g8r8
),
1926 NEAREST_FAST_PATH (SRC
, a8b8g8r8
, a8b8g8r8
),
1928 NEAREST_FAST_PATH (OVER
, x8r8g8b8
, x8r8g8b8
),
1929 NEAREST_FAST_PATH (OVER
, a8r8g8b8
, x8r8g8b8
),
1930 NEAREST_FAST_PATH (OVER
, x8b8g8r8
, x8b8g8r8
),
1931 NEAREST_FAST_PATH (OVER
, a8b8g8r8
, x8b8g8r8
),
1933 NEAREST_FAST_PATH (OVER
, x8r8g8b8
, a8r8g8b8
),
1934 NEAREST_FAST_PATH (OVER
, a8r8g8b8
, a8r8g8b8
),
1935 NEAREST_FAST_PATH (OVER
, x8b8g8r8
, a8b8g8r8
),
1936 NEAREST_FAST_PATH (OVER
, a8b8g8r8
, a8b8g8r8
),
1938 #define SIMPLE_ROTATE_FLAGS(angle) \
1939 (FAST_PATH_ROTATE_ ## angle ## _TRANSFORM | \
1940 FAST_PATH_NEAREST_FILTER | \
1941 FAST_PATH_SAMPLES_COVER_CLIP_NEAREST | \
1942 FAST_PATH_STANDARD_FLAGS)
1944 #define SIMPLE_ROTATE_FAST_PATH(op,s,d,suffix) \
1945 { PIXMAN_OP_ ## op, \
1946 PIXMAN_ ## s, SIMPLE_ROTATE_FLAGS (90), \
1948 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1949 fast_composite_rotate_90_##suffix, \
1951 { PIXMAN_OP_ ## op, \
1952 PIXMAN_ ## s, SIMPLE_ROTATE_FLAGS (270), \
1954 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1955 fast_composite_rotate_270_##suffix, \
1958 SIMPLE_ROTATE_FAST_PATH (SRC
, a8r8g8b8
, a8r8g8b8
, 8888),
1959 SIMPLE_ROTATE_FAST_PATH (SRC
, a8r8g8b8
, x8r8g8b8
, 8888),
1960 SIMPLE_ROTATE_FAST_PATH (SRC
, x8r8g8b8
, x8r8g8b8
, 8888),
1961 SIMPLE_ROTATE_FAST_PATH (SRC
, r5g6b5
, r5g6b5
, 565),
1962 SIMPLE_ROTATE_FAST_PATH (SRC
, a8
, a8
, 8),
1964 /* Simple repeat fast path entry. */
1967 (FAST_PATH_STANDARD_FLAGS
| FAST_PATH_ID_TRANSFORM
| FAST_PATH_BITS_IMAGE
|
1968 FAST_PATH_NORMAL_REPEAT
),
1970 PIXMAN_any
, FAST_PATH_STD_DEST_FLAGS
,
1971 fast_composite_tiled_repeat
1977 #ifdef WORDS_BIGENDIAN
1978 #define A1_FILL_MASK(n, offs) (((1U << (n)) - 1) << (32 - (offs) - (n)))
1980 #define A1_FILL_MASK(n, offs) (((1U << (n)) - 1) << (offs))
1983 static force_inline
void
1984 pixman_fill1_line (uint32_t *dst
, int offs
, int width
, int v
)
1988 int leading_pixels
= 32 - offs
;
1989 if (leading_pixels
>= width
)
1992 *dst
|= A1_FILL_MASK (width
, offs
);
1994 *dst
&= ~A1_FILL_MASK (width
, offs
);
2000 *dst
++ |= A1_FILL_MASK (leading_pixels
, offs
);
2002 *dst
++ &= ~A1_FILL_MASK (leading_pixels
, offs
);
2003 width
-= leading_pixels
;
2009 *dst
++ = 0xFFFFFFFF;
2017 *dst
|= A1_FILL_MASK (width
, 0);
2019 *dst
&= ~A1_FILL_MASK (width
, 0);
2024 pixman_fill1 (uint32_t *bits
,
2032 uint32_t *dst
= bits
+ y
* stride
+ (x
>> 5);
2039 pixman_fill1_line (dst
, offs
, width
, 1);
2047 pixman_fill1_line (dst
, offs
, width
, 0);
2054 pixman_fill8 (uint32_t *bits
,
2062 int byte_stride
= stride
* (int) sizeof (uint32_t);
2063 uint8_t *dst
= (uint8_t *) bits
;
2064 uint8_t v
= filler
& 0xff;
2067 dst
= dst
+ y
* byte_stride
+ x
;
2071 for (i
= 0; i
< width
; ++i
)
2079 pixman_fill16 (uint32_t *bits
,
2088 (stride
* (int)sizeof (uint32_t)) / (int)sizeof (uint16_t);
2089 uint16_t *dst
= (uint16_t *)bits
;
2090 uint16_t v
= filler
& 0xffff;
2093 dst
= dst
+ y
* short_stride
+ x
;
2097 for (i
= 0; i
< width
; ++i
)
2100 dst
+= short_stride
;
2105 pixman_fill32 (uint32_t *bits
,
2115 bits
= bits
+ y
* stride
+ x
;
2119 for (i
= 0; i
< width
; ++i
)
2126 static pixman_bool_t
2127 fast_path_fill (pixman_implementation_t
*imp
,
2140 pixman_fill1 (bits
, stride
, x
, y
, width
, height
, filler
);
2144 pixman_fill8 (bits
, stride
, x
, y
, width
, height
, filler
);
2148 pixman_fill16 (bits
, stride
, x
, y
, width
, height
, filler
);
2152 pixman_fill32 (bits
, stride
, x
, y
, width
, height
, filler
);
2162 /*****************************************************************************/
2165 fast_fetch_r5g6b5 (pixman_iter_t
*iter
, const uint32_t *mask
)
2167 int32_t w
= iter
->width
;
2168 uint32_t *dst
= iter
->buffer
;
2169 const uint16_t *src
= (const uint16_t *)iter
->bits
;
2171 iter
->bits
+= iter
->stride
;
2173 /* Align the source buffer at 4 bytes boundary */
2174 if (w
> 0 && ((uintptr_t)src
& 3))
2176 *dst
++ = convert_0565_to_8888 (*src
++);
2179 /* Process two pixels per iteration */
2180 while ((w
-= 2) >= 0)
2182 uint32_t sr
, sb
, sg
, t0
, t1
;
2183 uint32_t s
= *(const uint32_t *)src
;
2185 sr
= (s
>> 8) & 0x00F800F8;
2186 sb
= (s
<< 3) & 0x00F800F8;
2187 sg
= (s
>> 3) & 0x00FC00FC;
2191 t0
= ((sr
<< 16) & 0x00FF0000) | ((sg
<< 8) & 0x0000FF00) |
2192 (sb
& 0xFF) | 0xFF000000;
2193 t1
= (sr
& 0x00FF0000) | ((sg
>> 8) & 0x0000FF00) |
2194 (sb
>> 16) | 0xFF000000;
2195 #ifdef WORDS_BIGENDIAN
2205 *dst
= convert_0565_to_8888 (*src
);
2208 return iter
->buffer
;
2212 fast_dest_fetch_noop (pixman_iter_t
*iter
, const uint32_t *mask
)
2214 iter
->bits
+= iter
->stride
;
2215 return iter
->buffer
;
2218 /* Helper function for a workaround, which tries to ensure that 0x1F001F
2219 * constant is always allocated in a register on RISC architectures.
2221 static force_inline
uint32_t
2222 convert_8888_to_0565_workaround (uint32_t s
, uint32_t x1F001F
)
2225 a
= (s
>> 3) & x1F001F
;
2233 fast_write_back_r5g6b5 (pixman_iter_t
*iter
)
2235 int32_t w
= iter
->width
;
2236 uint16_t *dst
= (uint16_t *)(iter
->bits
- iter
->stride
);
2237 const uint32_t *src
= iter
->buffer
;
2238 /* Workaround to ensure that x1F001F variable is allocated in a register */
2239 static volatile uint32_t volatile_x1F001F
= 0x1F001F;
2240 uint32_t x1F001F
= volatile_x1F001F
;
2242 while ((w
-= 4) >= 0)
2244 uint32_t s1
= *src
++;
2245 uint32_t s2
= *src
++;
2246 uint32_t s3
= *src
++;
2247 uint32_t s4
= *src
++;
2248 *dst
++ = convert_8888_to_0565_workaround (s1
, x1F001F
);
2249 *dst
++ = convert_8888_to_0565_workaround (s2
, x1F001F
);
2250 *dst
++ = convert_8888_to_0565_workaround (s3
, x1F001F
);
2251 *dst
++ = convert_8888_to_0565_workaround (s4
, x1F001F
);
2255 *dst
++ = convert_8888_to_0565_workaround (*src
++, x1F001F
);
2256 *dst
++ = convert_8888_to_0565_workaround (*src
++, x1F001F
);
2260 *dst
= convert_8888_to_0565_workaround (*src
, x1F001F
);
2279 fetch_horizontal (bits_image_t
*image
, line_t
*line
,
2280 int y
, pixman_fixed_t x
, pixman_fixed_t ux
, int n
)
2282 uint32_t *bits
= image
->bits
+ y
* image
->rowstride
;
2285 for (i
= 0; i
< n
; ++i
)
2287 int x0
= pixman_fixed_to_int (x
);
2291 uint32_t left
= *(bits
+ x0
);
2292 uint32_t right
= *(bits
+ x1
);
2294 dist_x
= pixman_fixed_to_bilinear_weight (x
);
2295 dist_x
<<= (8 - BILINEAR_INTERPOLATION_BITS
);
2297 #if SIZEOF_LONG <= 4
2299 uint32_t lag
, rag
, ag
;
2300 uint32_t lrb
, rrb
, rb
;
2302 lag
= (left
& 0xff00ff00) >> 8;
2303 rag
= (right
& 0xff00ff00) >> 8;
2304 ag
= (lag
<< 8) + dist_x
* (rag
- lag
);
2306 lrb
= (left
& 0x00ff00ff);
2307 rrb
= (right
& 0x00ff00ff);
2308 rb
= (lrb
<< 8) + dist_x
* (rrb
- lrb
);
2310 *((uint32_t *)(line
->buffer
+ i
)) = ag
;
2311 *((uint32_t *)(line
->buffer
+ i
) + 1) = rb
;
2315 uint64_t lagrb
, ragrb
;
2319 lag
= (left
& 0xff00ff00);
2320 lrb
= (left
& 0x00ff00ff);
2321 rag
= (right
& 0xff00ff00);
2322 rrb
= (right
& 0x00ff00ff);
2323 lagrb
= (((uint64_t)lag
) << 24) | lrb
;
2324 ragrb
= (((uint64_t)rag
) << 24) | rrb
;
2326 line
->buffer
[i
] = (lagrb
<< 8) + dist_x
* (ragrb
- lagrb
);
2337 fast_fetch_bilinear_cover (pixman_iter_t
*iter
, const uint32_t *mask
)
2339 pixman_fixed_t fx
, ux
;
2340 bilinear_info_t
*info
= iter
->data
;
2341 line_t
*line0
, *line1
;
2346 COMPILE_TIME_ASSERT (BILINEAR_INTERPOLATION_BITS
< 8);
2349 ux
= iter
->image
->common
.transform
->matrix
[0][0];
2351 y0
= pixman_fixed_to_int (info
->y
);
2353 dist_y
= pixman_fixed_to_bilinear_weight (info
->y
);
2354 dist_y
<<= (8 - BILINEAR_INTERPOLATION_BITS
);
2356 line0
= &info
->lines
[y0
& 0x01];
2357 line1
= &info
->lines
[y1
& 0x01];
2362 &iter
->image
->bits
, line0
, y0
, fx
, ux
, iter
->width
);
2368 &iter
->image
->bits
, line1
, y1
, fx
, ux
, iter
->width
);
2371 for (i
= 0; i
< iter
->width
; ++i
)
2373 #if SIZEOF_LONG <= 4
2374 uint32_t ta
, tr
, tg
, tb
;
2375 uint32_t ba
, br
, bg
, bb
;
2378 uint32_t a
, r
, g
, b
;
2380 tag
= *((uint32_t *)(line0
->buffer
+ i
));
2381 trb
= *((uint32_t *)(line0
->buffer
+ i
) + 1);
2382 bag
= *((uint32_t *)(line1
->buffer
+ i
));
2383 brb
= *((uint32_t *)(line1
->buffer
+ i
) + 1);
2387 a
= (ta
<< 8) + dist_y
* (ba
- ta
);
2391 r
= (tr
<< 8) + dist_y
* (br
- tr
);
2395 g
= (tg
<< 8) + dist_y
* (bg
- tg
);
2399 b
= (tb
<< 8) + dist_y
* (bb
- tb
);
2401 a
= (a
<< 8) & 0xff000000;
2402 r
= (r
<< 0) & 0x00ff0000;
2403 g
= (g
>> 8) & 0x0000ff00;
2404 b
= (b
>> 16) & 0x000000ff;
2406 uint64_t top
= line0
->buffer
[i
];
2407 uint64_t bot
= line1
->buffer
[i
];
2408 uint64_t tar
= (top
& 0xffff0000ffff0000ULL
) >> 16;
2409 uint64_t bar
= (bot
& 0xffff0000ffff0000ULL
) >> 16;
2410 uint64_t tgb
= (top
& 0x0000ffff0000ffffULL
);
2411 uint64_t bgb
= (bot
& 0x0000ffff0000ffffULL
);
2413 uint32_t a
, r
, g
, b
;
2415 ar
= (tar
<< 8) + dist_y
* (bar
- tar
);
2416 gb
= (tgb
<< 8) + dist_y
* (bgb
- tgb
);
2418 a
= ((ar
>> 24) & 0xff000000);
2419 r
= ((ar
>> 0) & 0x00ff0000);
2420 g
= ((gb
>> 40) & 0x0000ff00);
2421 b
= ((gb
>> 16) & 0x000000ff);
2424 iter
->buffer
[i
] = a
| r
| g
| b
;
2427 info
->y
+= iter
->image
->common
.transform
->matrix
[1][1];
2429 return iter
->buffer
;
2433 bilinear_cover_iter_fini (pixman_iter_t
*iter
)
2439 fast_bilinear_cover_iter_init (pixman_iter_t
*iter
, const pixman_iter_info_t
*iter_info
)
2441 int width
= iter
->width
;
2442 bilinear_info_t
*info
;
2445 /* Reference point is the center of the pixel */
2446 v
.vector
[0] = pixman_int_to_fixed (iter
->x
) + pixman_fixed_1
/ 2;
2447 v
.vector
[1] = pixman_int_to_fixed (iter
->y
) + pixman_fixed_1
/ 2;
2448 v
.vector
[2] = pixman_fixed_1
;
2450 if (!pixman_transform_point_3d (iter
->image
->common
.transform
, &v
))
2453 info
= malloc (sizeof (*info
) + (2 * width
- 1) * sizeof (uint64_t));
2457 info
->x
= v
.vector
[0] - pixman_fixed_1
/ 2;
2458 info
->y
= v
.vector
[1] - pixman_fixed_1
/ 2;
2460 /* It is safe to set the y coordinates to -1 initially
2461 * because COVER_CLIP_BILINEAR ensures that we will only
2462 * be asked to fetch lines in the [0, height) interval
2464 info
->lines
[0].y
= -1;
2465 info
->lines
[0].buffer
= &(info
->data
[0]);
2466 info
->lines
[1].y
= -1;
2467 info
->lines
[1].buffer
= &(info
->data
[width
]);
2469 iter
->get_scanline
= fast_fetch_bilinear_cover
;
2470 iter
->fini
= bilinear_cover_iter_fini
;
2476 /* Something went wrong, either a bad matrix or OOM; in such cases,
2477 * we don't guarantee any particular rendering.
2480 FUNC
, "Allocation failure or bad matrix, skipping rendering\n");
2482 iter
->get_scanline
= _pixman_iter_get_scanline_noop
;
2487 bits_image_fetch_bilinear_no_repeat_8888 (pixman_iter_t
*iter
,
2488 const uint32_t *mask
)
2491 pixman_image_t
* ima
= iter
->image
;
2492 int offset
= iter
->x
;
2493 int line
= iter
->y
++;
2494 int width
= iter
->width
;
2495 uint32_t * buffer
= iter
->buffer
;
2497 bits_image_t
*bits
= &ima
->bits
;
2498 pixman_fixed_t x_top
, x_bottom
, x
;
2499 pixman_fixed_t ux_top
, ux_bottom
, ux
;
2501 uint32_t top_mask
, bottom_mask
;
2503 uint32_t *bottom_row
;
2505 uint32_t zero
[2] = { 0, 0 };
2512 /* reference point is the center of the pixel */
2513 v
.vector
[0] = pixman_int_to_fixed (offset
) + pixman_fixed_1
/ 2;
2514 v
.vector
[1] = pixman_int_to_fixed (line
) + pixman_fixed_1
/ 2;
2515 v
.vector
[2] = pixman_fixed_1
;
2517 if (!pixman_transform_point_3d (bits
->common
.transform
, &v
))
2518 return iter
->buffer
;
2520 ux
= ux_top
= ux_bottom
= bits
->common
.transform
->matrix
[0][0];
2521 x
= x_top
= x_bottom
= v
.vector
[0] - pixman_fixed_1
/2;
2523 y
= v
.vector
[1] - pixman_fixed_1
/2;
2524 disty
= pixman_fixed_to_bilinear_weight (y
);
2526 /* Load the pointers to the first and second lines from the source
2527 * image that bilinear code must read.
2529 * The main trick in this code is about the check if any line are
2530 * outside of the image;
2532 * When I realize that a line (any one) is outside, I change
2533 * the pointer to a dummy area with zeros. Once I change this, I
2534 * must be sure the pointer will not change, so I set the
2535 * variables to each pointer increments inside the loop.
2537 y1
= pixman_fixed_to_int (y
);
2540 if (y1
< 0 || y1
>= bits
->height
)
2548 top_row
= bits
->bits
+ y1
* bits
->rowstride
;
2553 if (y2
< 0 || y2
>= bits
->height
)
2561 bottom_row
= bits
->bits
+ y2
* bits
->rowstride
;
2566 /* Instead of checking whether the operation uses the mast in
2567 * each loop iteration, verify this only once and prepare the
2568 * variables to make the code smaller inside the loop.
2577 /* If have a mask, prepare the variables to check it */
2581 /* If both are zero, then the whole thing is zero */
2582 if (top_row
== zero
&& bottom_row
== zero
)
2584 memset (buffer
, 0, width
* sizeof (uint32_t));
2585 return iter
->buffer
;
2587 else if (bits
->format
== PIXMAN_x8r8g8b8
)
2589 if (top_row
== zero
)
2592 bottom_mask
= 0xff000000;
2594 else if (bottom_row
== zero
)
2596 top_mask
= 0xff000000;
2601 top_mask
= 0xff000000;
2602 bottom_mask
= 0xff000000;
2611 end
= buffer
+ width
;
2613 /* Zero fill to the left of the image */
2614 while (buffer
< end
&& x
< pixman_fixed_minus_1
)
2619 x_bottom
+= ux_bottom
;
2625 while (buffer
< end
&& x
< 0)
2630 tr
= top_row
[pixman_fixed_to_int (x_top
) + 1] | top_mask
;
2631 br
= bottom_row
[pixman_fixed_to_int (x_bottom
) + 1] | bottom_mask
;
2633 distx
= pixman_fixed_to_bilinear_weight (x
);
2635 *buffer
++ = bilinear_interpolation (0, tr
, 0, br
, distx
, disty
);
2639 x_bottom
+= ux_bottom
;
2644 w
= pixman_int_to_fixed (bits
->width
- 1);
2646 while (buffer
< end
&& x
< w
)
2650 uint32_t tl
, tr
, bl
, br
;
2653 tl
= top_row
[pixman_fixed_to_int (x_top
)] | top_mask
;
2654 tr
= top_row
[pixman_fixed_to_int (x_top
) + 1] | top_mask
;
2655 bl
= bottom_row
[pixman_fixed_to_int (x_bottom
)] | bottom_mask
;
2656 br
= bottom_row
[pixman_fixed_to_int (x_bottom
) + 1] | bottom_mask
;
2658 distx
= pixman_fixed_to_bilinear_weight (x
);
2660 *buffer
= bilinear_interpolation (tl
, tr
, bl
, br
, distx
, disty
);
2666 x_bottom
+= ux_bottom
;
2671 w
= pixman_int_to_fixed (bits
->width
);
2672 while (buffer
< end
&& x
< w
)
2679 tl
= top_row
[pixman_fixed_to_int (x_top
)] | top_mask
;
2680 bl
= bottom_row
[pixman_fixed_to_int (x_bottom
)] | bottom_mask
;
2682 distx
= pixman_fixed_to_bilinear_weight (x
);
2684 *buffer
= bilinear_interpolation (tl
, 0, bl
, 0, distx
, disty
);
2690 x_bottom
+= ux_bottom
;
2694 /* Zero fill to the left of the image */
2695 while (buffer
< end
)
2698 return iter
->buffer
;
2701 typedef uint32_t (* convert_pixel_t
) (const uint8_t *row
, int x
);
2703 static force_inline
void
2704 bits_image_fetch_separable_convolution_affine (pixman_image_t
* image
,
2709 const uint32_t * mask
,
2711 convert_pixel_t convert_pixel
,
2712 pixman_format_code_t format
,
2713 pixman_repeat_t repeat_mode
)
2715 bits_image_t
*bits
= &image
->bits
;
2716 pixman_fixed_t
*params
= image
->common
.filter_params
;
2717 int cwidth
= pixman_fixed_to_int (params
[0]);
2718 int cheight
= pixman_fixed_to_int (params
[1]);
2719 int x_off
= ((cwidth
<< 16) - pixman_fixed_1
) >> 1;
2720 int y_off
= ((cheight
<< 16) - pixman_fixed_1
) >> 1;
2721 int x_phase_bits
= pixman_fixed_to_int (params
[2]);
2722 int y_phase_bits
= pixman_fixed_to_int (params
[3]);
2723 int x_phase_shift
= 16 - x_phase_bits
;
2724 int y_phase_shift
= 16 - y_phase_bits
;
2725 pixman_fixed_t vx
, vy
;
2726 pixman_fixed_t ux
, uy
;
2730 /* reference point is the center of the pixel */
2731 v
.vector
[0] = pixman_int_to_fixed (offset
) + pixman_fixed_1
/ 2;
2732 v
.vector
[1] = pixman_int_to_fixed (line
) + pixman_fixed_1
/ 2;
2733 v
.vector
[2] = pixman_fixed_1
;
2735 if (!pixman_transform_point_3d (image
->common
.transform
, &v
))
2738 ux
= image
->common
.transform
->matrix
[0][0];
2739 uy
= image
->common
.transform
->matrix
[1][0];
2744 for (k
= 0; k
< width
; ++k
)
2746 pixman_fixed_t
*y_params
;
2747 int satot
, srtot
, sgtot
, sbtot
;
2748 pixman_fixed_t x
, y
;
2749 int32_t x1
, x2
, y1
, y2
;
2753 if (mask
&& !mask
[k
])
2756 /* Round x and y to the middle of the closest phase before continuing. This
2757 * ensures that the convolution matrix is aligned right, since it was
2758 * positioned relative to a particular phase (and not relative to whatever
2759 * exact fraction we happen to get here).
2761 x
= ((vx
>> x_phase_shift
) << x_phase_shift
) + ((1 << x_phase_shift
) >> 1);
2762 y
= ((vy
>> y_phase_shift
) << y_phase_shift
) + ((1 << y_phase_shift
) >> 1);
2764 px
= (x
& 0xffff) >> x_phase_shift
;
2765 py
= (y
& 0xffff) >> y_phase_shift
;
2767 x1
= pixman_fixed_to_int (x
- pixman_fixed_e
- x_off
);
2768 y1
= pixman_fixed_to_int (y
- pixman_fixed_e
- y_off
);
2772 satot
= srtot
= sgtot
= sbtot
= 0;
2774 y_params
= params
+ 4 + (1 << x_phase_bits
) * cwidth
+ py
* cheight
;
2776 for (i
= y1
; i
< y2
; ++i
)
2778 pixman_fixed_t fy
= *y_params
++;
2782 pixman_fixed_t
*x_params
= params
+ 4 + px
* cwidth
;
2784 for (j
= x1
; j
< x2
; ++j
)
2786 pixman_fixed_t fx
= *x_params
++;
2793 uint32_t pixel
, mask
;
2796 mask
= PIXMAN_FORMAT_A (format
)? 0 : 0xff000000;
2798 if (repeat_mode
!= PIXMAN_REPEAT_NONE
)
2800 repeat (repeat_mode
, &rx
, bits
->width
);
2801 repeat (repeat_mode
, &ry
, bits
->height
);
2803 row
= (uint8_t *)bits
->bits
+ bits
->rowstride
* 4 * ry
;
2804 pixel
= convert_pixel (row
, rx
) | mask
;
2808 if (rx
< 0 || ry
< 0 || rx
>= bits
->width
|| ry
>= bits
->height
)
2814 row
= (uint8_t *)bits
->bits
+ bits
->rowstride
* 4 * ry
;
2815 pixel
= convert_pixel (row
, rx
) | mask
;
2819 f
= ((pixman_fixed_32_32_t
)fx
* fy
+ 0x8000) >> 16;
2820 srtot
+= (int)RED_8 (pixel
) * f
;
2821 sgtot
+= (int)GREEN_8 (pixel
) * f
;
2822 sbtot
+= (int)BLUE_8 (pixel
) * f
;
2823 satot
+= (int)ALPHA_8 (pixel
) * f
;
2829 satot
= (satot
+ 0x8000) >> 16;
2830 srtot
= (srtot
+ 0x8000) >> 16;
2831 sgtot
= (sgtot
+ 0x8000) >> 16;
2832 sbtot
= (sbtot
+ 0x8000) >> 16;
2834 satot
= CLIP (satot
, 0, 0xff);
2835 srtot
= CLIP (srtot
, 0, 0xff);
2836 sgtot
= CLIP (sgtot
, 0, 0xff);
2837 sbtot
= CLIP (sbtot
, 0, 0xff);
2839 buffer
[k
] = (satot
<< 24) | (srtot
<< 16) | (sgtot
<< 8) | (sbtot
<< 0);
2847 static const uint8_t zero
[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
2849 static force_inline
void
2850 bits_image_fetch_bilinear_affine (pixman_image_t
* image
,
2855 const uint32_t * mask
,
2857 convert_pixel_t convert_pixel
,
2858 pixman_format_code_t format
,
2859 pixman_repeat_t repeat_mode
)
2861 pixman_fixed_t x
, y
;
2862 pixman_fixed_t ux
, uy
;
2864 bits_image_t
*bits
= &image
->bits
;
2867 /* reference point is the center of the pixel */
2868 v
.vector
[0] = pixman_int_to_fixed (offset
) + pixman_fixed_1
/ 2;
2869 v
.vector
[1] = pixman_int_to_fixed (line
) + pixman_fixed_1
/ 2;
2870 v
.vector
[2] = pixman_fixed_1
;
2872 if (!pixman_transform_point_3d (image
->common
.transform
, &v
))
2875 ux
= image
->common
.transform
->matrix
[0][0];
2876 uy
= image
->common
.transform
->matrix
[1][0];
2881 for (i
= 0; i
< width
; ++i
)
2884 uint32_t tl
, tr
, bl
, br
;
2885 int32_t distx
, disty
;
2886 int width
= image
->bits
.width
;
2887 int height
= image
->bits
.height
;
2888 const uint8_t *row1
;
2889 const uint8_t *row2
;
2891 if (mask
&& !mask
[i
])
2894 x1
= x
- pixman_fixed_1
/ 2;
2895 y1
= y
- pixman_fixed_1
/ 2;
2897 distx
= pixman_fixed_to_bilinear_weight (x1
);
2898 disty
= pixman_fixed_to_bilinear_weight (y1
);
2900 y1
= pixman_fixed_to_int (y1
);
2902 x1
= pixman_fixed_to_int (x1
);
2905 if (repeat_mode
!= PIXMAN_REPEAT_NONE
)
2909 mask
= PIXMAN_FORMAT_A (format
)? 0 : 0xff000000;
2911 repeat (repeat_mode
, &x1
, width
);
2912 repeat (repeat_mode
, &y1
, height
);
2913 repeat (repeat_mode
, &x2
, width
);
2914 repeat (repeat_mode
, &y2
, height
);
2916 row1
= (uint8_t *)bits
->bits
+ bits
->rowstride
* 4 * y1
;
2917 row2
= (uint8_t *)bits
->bits
+ bits
->rowstride
* 4 * y2
;
2919 tl
= convert_pixel (row1
, x1
) | mask
;
2920 tr
= convert_pixel (row1
, x2
) | mask
;
2921 bl
= convert_pixel (row2
, x1
) | mask
;
2922 br
= convert_pixel (row2
, x2
) | mask
;
2926 uint32_t mask1
, mask2
;
2929 /* Note: PIXMAN_FORMAT_BPP() returns an unsigned value,
2930 * which means if you use it in expressions, those
2931 * expressions become unsigned themselves. Since
2932 * the variables below can be negative in some cases,
2933 * that will lead to crashes on 64 bit architectures.
2935 * So this line makes sure bpp is signed
2937 bpp
= PIXMAN_FORMAT_BPP (format
);
2939 if (x1
>= width
|| x2
< 0 || y1
>= height
|| y2
< 0)
2952 row1
= (uint8_t *)bits
->bits
+ bits
->rowstride
* 4 * y1
;
2953 row1
+= bpp
/ 8 * x1
;
2955 mask1
= PIXMAN_FORMAT_A (format
)? 0 : 0xff000000;
2958 if (y1
== height
- 1)
2965 row2
= (uint8_t *)bits
->bits
+ bits
->rowstride
* 4 * y2
;
2966 row2
+= bpp
/ 8 * x1
;
2968 mask2
= PIXMAN_FORMAT_A (format
)? 0 : 0xff000000;
2978 tl
= convert_pixel (row1
, 0) | mask1
;
2979 bl
= convert_pixel (row2
, 0) | mask2
;
2982 if (x1
== width
- 1)
2989 tr
= convert_pixel (row1
, 1) | mask1
;
2990 br
= convert_pixel (row2
, 1) | mask2
;
2994 buffer
[i
] = bilinear_interpolation (
2995 tl
, tr
, bl
, br
, distx
, disty
);
3003 static force_inline
void
3004 bits_image_fetch_nearest_affine (pixman_image_t
* image
,
3009 const uint32_t * mask
,
3011 convert_pixel_t convert_pixel
,
3012 pixman_format_code_t format
,
3013 pixman_repeat_t repeat_mode
)
3015 pixman_fixed_t x
, y
;
3016 pixman_fixed_t ux
, uy
;
3018 bits_image_t
*bits
= &image
->bits
;
3021 /* reference point is the center of the pixel */
3022 v
.vector
[0] = pixman_int_to_fixed (offset
) + pixman_fixed_1
/ 2;
3023 v
.vector
[1] = pixman_int_to_fixed (line
) + pixman_fixed_1
/ 2;
3024 v
.vector
[2] = pixman_fixed_1
;
3026 if (!pixman_transform_point_3d (image
->common
.transform
, &v
))
3029 ux
= image
->common
.transform
->matrix
[0][0];
3030 uy
= image
->common
.transform
->matrix
[1][0];
3035 for (i
= 0; i
< width
; ++i
)
3037 int width
, height
, x0
, y0
;
3040 if (mask
&& !mask
[i
])
3043 width
= image
->bits
.width
;
3044 height
= image
->bits
.height
;
3045 x0
= pixman_fixed_to_int (x
- pixman_fixed_e
);
3046 y0
= pixman_fixed_to_int (y
- pixman_fixed_e
);
3048 if (repeat_mode
== PIXMAN_REPEAT_NONE
&&
3049 (y0
< 0 || y0
>= height
|| x0
< 0 || x0
>= width
))
3055 uint32_t mask
= PIXMAN_FORMAT_A (format
)? 0 : 0xff000000;
3057 if (repeat_mode
!= PIXMAN_REPEAT_NONE
)
3059 repeat (repeat_mode
, &x0
, width
);
3060 repeat (repeat_mode
, &y0
, height
);
3063 row
= (uint8_t *)bits
->bits
+ bits
->rowstride
* 4 * y0
;
3065 buffer
[i
] = convert_pixel (row
, x0
) | mask
;
3074 static force_inline
uint32_t
3075 convert_a8r8g8b8 (const uint8_t *row
, int x
)
3077 return *(((uint32_t *)row
) + x
);
3080 static force_inline
uint32_t
3081 convert_x8r8g8b8 (const uint8_t *row
, int x
)
3083 return *(((uint32_t *)row
) + x
);
3086 static force_inline
uint32_t
3087 convert_a8 (const uint8_t *row
, int x
)
3089 return *(row
+ x
) << 24;
3092 static force_inline
uint32_t
3093 convert_r5g6b5 (const uint8_t *row
, int x
)
3095 return convert_0565_to_0888 (*((uint16_t *)row
+ x
));
3098 #define MAKE_SEPARABLE_CONVOLUTION_FETCHER(name, format, repeat_mode) \
3100 bits_image_fetch_separable_convolution_affine_ ## name (pixman_iter_t *iter, \
3101 const uint32_t * mask) \
3103 bits_image_fetch_separable_convolution_affine ( \
3105 iter->x, iter->y++, \
3107 iter->buffer, mask, \
3108 convert_ ## format, \
3109 PIXMAN_ ## format, \
3112 return iter->buffer; \
3115 #define MAKE_BILINEAR_FETCHER(name, format, repeat_mode) \
3117 bits_image_fetch_bilinear_affine_ ## name (pixman_iter_t *iter, \
3118 const uint32_t * mask) \
3120 bits_image_fetch_bilinear_affine (iter->image, \
3121 iter->x, iter->y++, \
3123 iter->buffer, mask, \
3124 convert_ ## format, \
3125 PIXMAN_ ## format, \
3127 return iter->buffer; \
3130 #define MAKE_NEAREST_FETCHER(name, format, repeat_mode) \
3132 bits_image_fetch_nearest_affine_ ## name (pixman_iter_t *iter, \
3133 const uint32_t * mask) \
3135 bits_image_fetch_nearest_affine (iter->image, \
3136 iter->x, iter->y++, \
3138 iter->buffer, mask, \
3139 convert_ ## format, \
3140 PIXMAN_ ## format, \
3142 return iter->buffer; \
3145 #define MAKE_FETCHERS(name, format, repeat_mode) \
3146 MAKE_NEAREST_FETCHER (name, format, repeat_mode) \
3147 MAKE_BILINEAR_FETCHER (name, format, repeat_mode) \
3148 MAKE_SEPARABLE_CONVOLUTION_FETCHER (name, format, repeat_mode)
3150 MAKE_FETCHERS (pad_a8r8g8b8
, a8r8g8b8
, PIXMAN_REPEAT_PAD
)
3151 MAKE_FETCHERS (none_a8r8g8b8
, a8r8g8b8
, PIXMAN_REPEAT_NONE
)
3152 MAKE_FETCHERS (reflect_a8r8g8b8
, a8r8g8b8
, PIXMAN_REPEAT_REFLECT
)
3153 MAKE_FETCHERS (normal_a8r8g8b8
, a8r8g8b8
, PIXMAN_REPEAT_NORMAL
)
3154 MAKE_FETCHERS (pad_x8r8g8b8
, x8r8g8b8
, PIXMAN_REPEAT_PAD
)
3155 MAKE_FETCHERS (none_x8r8g8b8
, x8r8g8b8
, PIXMAN_REPEAT_NONE
)
3156 MAKE_FETCHERS (reflect_x8r8g8b8
, x8r8g8b8
, PIXMAN_REPEAT_REFLECT
)
3157 MAKE_FETCHERS (normal_x8r8g8b8
, x8r8g8b8
, PIXMAN_REPEAT_NORMAL
)
3158 MAKE_FETCHERS (pad_a8
, a8
, PIXMAN_REPEAT_PAD
)
3159 MAKE_FETCHERS (none_a8
, a8
, PIXMAN_REPEAT_NONE
)
3160 MAKE_FETCHERS (reflect_a8
, a8
, PIXMAN_REPEAT_REFLECT
)
3161 MAKE_FETCHERS (normal_a8
, a8
, PIXMAN_REPEAT_NORMAL
)
3162 MAKE_FETCHERS (pad_r5g6b5
, r5g6b5
, PIXMAN_REPEAT_PAD
)
3163 MAKE_FETCHERS (none_r5g6b5
, r5g6b5
, PIXMAN_REPEAT_NONE
)
3164 MAKE_FETCHERS (reflect_r5g6b5
, r5g6b5
, PIXMAN_REPEAT_REFLECT
)
3165 MAKE_FETCHERS (normal_r5g6b5
, r5g6b5
, PIXMAN_REPEAT_NORMAL
)
3167 #define IMAGE_FLAGS \
3168 (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | \
3169 FAST_PATH_BITS_IMAGE | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST)
3171 static const pixman_iter_info_t fast_iters
[] =
3173 { PIXMAN_r5g6b5
, IMAGE_FLAGS
, ITER_NARROW
| ITER_SRC
,
3174 _pixman_iter_init_bits_stride
, fast_fetch_r5g6b5
, NULL
},
3176 { PIXMAN_r5g6b5
, FAST_PATH_STD_DEST_FLAGS
,
3177 ITER_NARROW
| ITER_DEST
,
3178 _pixman_iter_init_bits_stride
,
3179 fast_fetch_r5g6b5
, fast_write_back_r5g6b5
},
3181 { PIXMAN_r5g6b5
, FAST_PATH_STD_DEST_FLAGS
,
3182 ITER_NARROW
| ITER_DEST
| ITER_IGNORE_RGB
| ITER_IGNORE_ALPHA
,
3183 _pixman_iter_init_bits_stride
,
3184 fast_dest_fetch_noop
, fast_write_back_r5g6b5
},
3187 (FAST_PATH_STANDARD_FLAGS
|
3188 FAST_PATH_SCALE_TRANSFORM
|
3189 FAST_PATH_BILINEAR_FILTER
|
3190 FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR
),
3191 ITER_NARROW
| ITER_SRC
,
3192 fast_bilinear_cover_iter_init
,
3196 #define FAST_BILINEAR_FLAGS \
3197 (FAST_PATH_NO_ALPHA_MAP | \
3198 FAST_PATH_NO_ACCESSORS | \
3199 FAST_PATH_HAS_TRANSFORM | \
3200 FAST_PATH_AFFINE_TRANSFORM | \
3201 FAST_PATH_X_UNIT_POSITIVE | \
3202 FAST_PATH_Y_UNIT_ZERO | \
3203 FAST_PATH_NONE_REPEAT | \
3204 FAST_PATH_BILINEAR_FILTER)
3207 FAST_BILINEAR_FLAGS
,
3208 ITER_NARROW
| ITER_SRC
,
3209 NULL
, bits_image_fetch_bilinear_no_repeat_8888
, NULL
3213 FAST_BILINEAR_FLAGS
,
3214 ITER_NARROW
| ITER_SRC
,
3215 NULL
, bits_image_fetch_bilinear_no_repeat_8888
, NULL
3218 #define GENERAL_BILINEAR_FLAGS \
3219 (FAST_PATH_NO_ALPHA_MAP | \
3220 FAST_PATH_NO_ACCESSORS | \
3221 FAST_PATH_HAS_TRANSFORM | \
3222 FAST_PATH_AFFINE_TRANSFORM | \
3223 FAST_PATH_BILINEAR_FILTER)
3225 #define GENERAL_NEAREST_FLAGS \
3226 (FAST_PATH_NO_ALPHA_MAP | \
3227 FAST_PATH_NO_ACCESSORS | \
3228 FAST_PATH_HAS_TRANSFORM | \
3229 FAST_PATH_AFFINE_TRANSFORM | \
3230 FAST_PATH_NEAREST_FILTER)
3232 #define GENERAL_SEPARABLE_CONVOLUTION_FLAGS \
3233 (FAST_PATH_NO_ALPHA_MAP | \
3234 FAST_PATH_NO_ACCESSORS | \
3235 FAST_PATH_HAS_TRANSFORM | \
3236 FAST_PATH_AFFINE_TRANSFORM | \
3237 FAST_PATH_SEPARABLE_CONVOLUTION_FILTER)
3239 #define SEPARABLE_CONVOLUTION_AFFINE_FAST_PATH(name, format, repeat) \
3240 { PIXMAN_ ## format, \
3241 GENERAL_SEPARABLE_CONVOLUTION_FLAGS | FAST_PATH_ ## repeat ## _REPEAT, \
3242 ITER_NARROW | ITER_SRC, \
3243 NULL, bits_image_fetch_separable_convolution_affine_ ## name, NULL \
3246 #define BILINEAR_AFFINE_FAST_PATH(name, format, repeat) \
3247 { PIXMAN_ ## format, \
3248 GENERAL_BILINEAR_FLAGS | FAST_PATH_ ## repeat ## _REPEAT, \
3249 ITER_NARROW | ITER_SRC, \
3250 NULL, bits_image_fetch_bilinear_affine_ ## name, NULL, \
3253 #define NEAREST_AFFINE_FAST_PATH(name, format, repeat) \
3254 { PIXMAN_ ## format, \
3255 GENERAL_NEAREST_FLAGS | FAST_PATH_ ## repeat ## _REPEAT, \
3256 ITER_NARROW | ITER_SRC, \
3257 NULL, bits_image_fetch_nearest_affine_ ## name, NULL \
3260 #define AFFINE_FAST_PATHS(name, format, repeat) \
3261 SEPARABLE_CONVOLUTION_AFFINE_FAST_PATH(name, format, repeat) \
3262 BILINEAR_AFFINE_FAST_PATH(name, format, repeat) \
3263 NEAREST_AFFINE_FAST_PATH(name, format, repeat)
3265 AFFINE_FAST_PATHS (pad_a8r8g8b8
, a8r8g8b8
, PAD
)
3266 AFFINE_FAST_PATHS (none_a8r8g8b8
, a8r8g8b8
, NONE
)
3267 AFFINE_FAST_PATHS (reflect_a8r8g8b8
, a8r8g8b8
, REFLECT
)
3268 AFFINE_FAST_PATHS (normal_a8r8g8b8
, a8r8g8b8
, NORMAL
)
3269 AFFINE_FAST_PATHS (pad_x8r8g8b8
, x8r8g8b8
, PAD
)
3270 AFFINE_FAST_PATHS (none_x8r8g8b8
, x8r8g8b8
, NONE
)
3271 AFFINE_FAST_PATHS (reflect_x8r8g8b8
, x8r8g8b8
, REFLECT
)
3272 AFFINE_FAST_PATHS (normal_x8r8g8b8
, x8r8g8b8
, NORMAL
)
3273 AFFINE_FAST_PATHS (pad_a8
, a8
, PAD
)
3274 AFFINE_FAST_PATHS (none_a8
, a8
, NONE
)
3275 AFFINE_FAST_PATHS (reflect_a8
, a8
, REFLECT
)
3276 AFFINE_FAST_PATHS (normal_a8
, a8
, NORMAL
)
3277 AFFINE_FAST_PATHS (pad_r5g6b5
, r5g6b5
, PAD
)
3278 AFFINE_FAST_PATHS (none_r5g6b5
, r5g6b5
, NONE
)
3279 AFFINE_FAST_PATHS (reflect_r5g6b5
, r5g6b5
, REFLECT
)
3280 AFFINE_FAST_PATHS (normal_r5g6b5
, r5g6b5
, NORMAL
)
3285 pixman_implementation_t
*
3286 _pixman_implementation_create_fast_path (pixman_implementation_t
*fallback
)
3288 pixman_implementation_t
*imp
= _pixman_implementation_create (fallback
, c_fast_paths
);
3290 imp
->fill
= fast_path_fill
;
3291 imp
->iter_info
= fast_iters
;