no bug - Import translations from android-l10n r=release a=l10n CLOSED TREE
[gecko.git] / gfx / 2d / FilterProcessingScalar.cpp
blob9cb6040349e4c07199e622ccf918b4f31bf5aced
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #define FILTER_PROCESSING_SCALAR
9 #include "FilterProcessingSIMD-inl.h"
10 #include "Logging.h"
12 namespace mozilla {
13 namespace gfx {
15 void FilterProcessing::ExtractAlpha_Scalar(const IntSize& size,
16 uint8_t* sourceData,
17 int32_t sourceStride,
18 uint8_t* alphaData,
19 int32_t alphaStride) {
20 for (int32_t y = 0; y < size.height; y++) {
21 for (int32_t x = 0; x < size.width; x++) {
22 int32_t sourceIndex = y * sourceStride + 4 * x;
23 int32_t targetIndex = y * alphaStride + x;
24 alphaData[targetIndex] =
25 sourceData[sourceIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_A];
30 already_AddRefed<DataSourceSurface> FilterProcessing::ConvertToB8G8R8A8_Scalar(
31 SourceSurface* aSurface) {
32 return ConvertToB8G8R8A8_SIMD<simd::Scalaru8x16_t>(aSurface);
35 template <MorphologyOperator Operator>
36 static void ApplyMorphologyHorizontal_Scalar(
37 uint8_t* aSourceData, int32_t aSourceStride, uint8_t* aDestData,
38 int32_t aDestStride, const IntRect& aDestRect, int32_t aRadius) {
39 static_assert(Operator == MORPHOLOGY_OPERATOR_ERODE ||
40 Operator == MORPHOLOGY_OPERATOR_DILATE,
41 "unexpected morphology operator");
43 for (int32_t y = aDestRect.Y(); y < aDestRect.YMost(); y++) {
44 int32_t startX = aDestRect.X() - aRadius;
45 int32_t endX = aDestRect.X() + aRadius;
46 for (int32_t x = aDestRect.X(); x < aDestRect.XMost();
47 x++, startX++, endX++) {
48 int32_t sourceIndex = y * aSourceStride + 4 * startX;
49 uint8_t u[4];
50 for (size_t i = 0; i < 4; i++) {
51 u[i] = aSourceData[sourceIndex + i];
53 sourceIndex += 4;
54 for (int32_t ix = startX + 1; ix <= endX; ix++, sourceIndex += 4) {
55 for (size_t i = 0; i < 4; i++) {
56 if (Operator == MORPHOLOGY_OPERATOR_ERODE) {
57 u[i] = umin(u[i], aSourceData[sourceIndex + i]);
58 } else {
59 u[i] = umax(u[i], aSourceData[sourceIndex + i]);
64 int32_t destIndex = y * aDestStride + 4 * x;
65 for (size_t i = 0; i < 4; i++) {
66 aDestData[destIndex + i] = u[i];
72 void FilterProcessing::ApplyMorphologyHorizontal_Scalar(
73 uint8_t* aSourceData, int32_t aSourceStride, uint8_t* aDestData,
74 int32_t aDestStride, const IntRect& aDestRect, int32_t aRadius,
75 MorphologyOperator aOp) {
76 if (aOp == MORPHOLOGY_OPERATOR_ERODE) {
77 gfx::ApplyMorphologyHorizontal_Scalar<MORPHOLOGY_OPERATOR_ERODE>(
78 aSourceData, aSourceStride, aDestData, aDestStride, aDestRect, aRadius);
79 } else {
80 gfx::ApplyMorphologyHorizontal_Scalar<MORPHOLOGY_OPERATOR_DILATE>(
81 aSourceData, aSourceStride, aDestData, aDestStride, aDestRect, aRadius);
85 template <MorphologyOperator Operator>
86 static void ApplyMorphologyVertical_Scalar(
87 uint8_t* aSourceData, int32_t aSourceStride, uint8_t* aDestData,
88 int32_t aDestStride, const IntRect& aDestRect, int32_t aRadius) {
89 static_assert(Operator == MORPHOLOGY_OPERATOR_ERODE ||
90 Operator == MORPHOLOGY_OPERATOR_DILATE,
91 "unexpected morphology operator");
93 int32_t startY = aDestRect.Y() - aRadius;
94 int32_t endY = aDestRect.Y() + aRadius;
95 for (int32_t y = aDestRect.Y(); y < aDestRect.YMost();
96 y++, startY++, endY++) {
97 for (int32_t x = aDestRect.X(); x < aDestRect.XMost(); x++) {
98 int32_t sourceIndex = startY * aSourceStride + 4 * x;
99 uint8_t u[4];
100 for (size_t i = 0; i < 4; i++) {
101 u[i] = aSourceData[sourceIndex + i];
103 sourceIndex += aSourceStride;
104 for (int32_t iy = startY + 1; iy <= endY;
105 iy++, sourceIndex += aSourceStride) {
106 for (size_t i = 0; i < 4; i++) {
107 if (Operator == MORPHOLOGY_OPERATOR_ERODE) {
108 u[i] = umin(u[i], aSourceData[sourceIndex + i]);
109 } else {
110 u[i] = umax(u[i], aSourceData[sourceIndex + i]);
115 int32_t destIndex = y * aDestStride + 4 * x;
116 for (size_t i = 0; i < 4; i++) {
117 aDestData[destIndex + i] = u[i];
123 void FilterProcessing::ApplyMorphologyVertical_Scalar(
124 uint8_t* aSourceData, int32_t aSourceStride, uint8_t* aDestData,
125 int32_t aDestStride, const IntRect& aDestRect, int32_t aRadius,
126 MorphologyOperator aOp) {
127 if (aOp == MORPHOLOGY_OPERATOR_ERODE) {
128 gfx::ApplyMorphologyVertical_Scalar<MORPHOLOGY_OPERATOR_ERODE>(
129 aSourceData, aSourceStride, aDestData, aDestStride, aDestRect, aRadius);
130 } else {
131 gfx::ApplyMorphologyVertical_Scalar<MORPHOLOGY_OPERATOR_DILATE>(
132 aSourceData, aSourceStride, aDestData, aDestStride, aDestRect, aRadius);
136 already_AddRefed<DataSourceSurface> FilterProcessing::ApplyColorMatrix_Scalar(
137 DataSourceSurface* aInput, const Matrix5x4& aMatrix) {
138 return ApplyColorMatrix_SIMD<simd::Scalari32x4_t, simd::Scalari16x8_t,
139 simd::Scalaru8x16_t>(aInput, aMatrix);
142 void FilterProcessing::ApplyComposition_Scalar(DataSourceSurface* aSource,
143 DataSourceSurface* aDest,
144 CompositeOperator aOperator) {
145 return ApplyComposition_SIMD<simd::Scalari32x4_t, simd::Scalaru16x8_t,
146 simd::Scalaru8x16_t>(aSource, aDest, aOperator);
149 void FilterProcessing::SeparateColorChannels_Scalar(
150 const IntSize& size, uint8_t* sourceData, int32_t sourceStride,
151 uint8_t* channel0Data, uint8_t* channel1Data, uint8_t* channel2Data,
152 uint8_t* channel3Data, int32_t channelStride) {
153 for (int32_t y = 0; y < size.height; y++) {
154 for (int32_t x = 0; x < size.width; x++) {
155 int32_t sourceIndex = y * sourceStride + 4 * x;
156 int32_t targetIndex = y * channelStride + x;
157 channel0Data[targetIndex] = sourceData[sourceIndex];
158 channel1Data[targetIndex] = sourceData[sourceIndex + 1];
159 channel2Data[targetIndex] = sourceData[sourceIndex + 2];
160 channel3Data[targetIndex] = sourceData[sourceIndex + 3];
165 void FilterProcessing::CombineColorChannels_Scalar(
166 const IntSize& size, int32_t resultStride, uint8_t* resultData,
167 int32_t channelStride, uint8_t* channel0Data, uint8_t* channel1Data,
168 uint8_t* channel2Data, uint8_t* channel3Data) {
169 for (int32_t y = 0; y < size.height; y++) {
170 for (int32_t x = 0; x < size.width; x++) {
171 int32_t resultIndex = y * resultStride + 4 * x;
172 int32_t channelIndex = y * channelStride + x;
173 resultData[resultIndex] = channel0Data[channelIndex];
174 resultData[resultIndex + 1] = channel1Data[channelIndex];
175 resultData[resultIndex + 2] = channel2Data[channelIndex];
176 resultData[resultIndex + 3] = channel3Data[channelIndex];
181 void FilterProcessing::DoPremultiplicationCalculation_Scalar(
182 const IntSize& aSize, uint8_t* aTargetData, int32_t aTargetStride,
183 uint8_t* aSourceData, int32_t aSourceStride) {
184 for (int32_t y = 0; y < aSize.height; y++) {
185 for (int32_t x = 0; x < aSize.width; x++) {
186 int32_t inputIndex = y * aSourceStride + 4 * x;
187 int32_t targetIndex = y * aTargetStride + 4 * x;
188 uint8_t alpha = aSourceData[inputIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_A];
189 aTargetData[targetIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_R] =
190 FastDivideBy255<uint8_t>(
191 aSourceData[inputIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_R] *
192 alpha);
193 aTargetData[targetIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_G] =
194 FastDivideBy255<uint8_t>(
195 aSourceData[inputIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_G] *
196 alpha);
197 aTargetData[targetIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_B] =
198 FastDivideBy255<uint8_t>(
199 aSourceData[inputIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_B] *
200 alpha);
201 aTargetData[targetIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_A] = alpha;
206 void FilterProcessing::DoUnpremultiplicationCalculation_Scalar(
207 const IntSize& aSize, uint8_t* aTargetData, int32_t aTargetStride,
208 uint8_t* aSourceData, int32_t aSourceStride) {
209 for (int32_t y = 0; y < aSize.height; y++) {
210 for (int32_t x = 0; x < aSize.width; x++) {
211 int32_t inputIndex = y * aSourceStride + 4 * x;
212 int32_t targetIndex = y * aTargetStride + 4 * x;
213 uint8_t alpha = aSourceData[inputIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_A];
214 uint16_t alphaFactor = sAlphaFactors[alpha];
215 // inputColor * alphaFactor + 128 is guaranteed to fit into uint16_t
216 // because the input is premultiplied and thus inputColor <= inputAlpha.
217 // The maximum value this can attain is 65520 (which is less than 65535)
218 // for color == alpha == 244:
219 // 244 * sAlphaFactors[244] + 128 == 244 * 268 + 128 == 65520
220 aTargetData[targetIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_R] =
221 (aSourceData[inputIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_R] *
222 alphaFactor +
223 128) >>
225 aTargetData[targetIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_G] =
226 (aSourceData[inputIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_G] *
227 alphaFactor +
228 128) >>
230 aTargetData[targetIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_B] =
231 (aSourceData[inputIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_B] *
232 alphaFactor +
233 128) >>
235 aTargetData[targetIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_A] = alpha;
240 void FilterProcessing::DoOpacityCalculation_Scalar(
241 const IntSize& aSize, uint8_t* aTargetData, int32_t aTargetStride,
242 uint8_t* aSourceData, int32_t aSourceStride, Float aValue) {
243 uint8_t alpha = uint8_t(roundf(255.f * aValue));
244 for (int32_t y = 0; y < aSize.height; y++) {
245 for (int32_t x = 0; x < aSize.width; x++) {
246 int32_t inputIndex = y * aSourceStride + 4 * x;
247 int32_t targetIndex = y * aTargetStride + 4 * x;
248 aTargetData[targetIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_R] =
249 (aSourceData[inputIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_R] * alpha) >>
251 aTargetData[targetIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_G] =
252 (aSourceData[inputIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_G] * alpha) >>
254 aTargetData[targetIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_B] =
255 (aSourceData[inputIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_B] * alpha) >>
257 aTargetData[targetIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_A] =
258 (aSourceData[inputIndex + B8G8R8A8_COMPONENT_BYTEOFFSET_A] * alpha) >>
264 void FilterProcessing::DoOpacityCalculationA8_Scalar(
265 const IntSize& aSize, uint8_t* aTargetData, int32_t aTargetStride,
266 uint8_t* aSourceData, int32_t aSourceStride, Float aValue) {
267 uint8_t alpha = uint8_t(255.f * aValue);
268 for (int32_t y = 0; y < aSize.height; y++) {
269 for (int32_t x = 0; x < aSize.width; x++) {
270 int32_t inputIndex = y * aSourceStride;
271 int32_t targetIndex = y * aTargetStride;
272 aTargetData[targetIndex] =
273 FastDivideBy255<uint8_t>(aSourceData[inputIndex] * alpha);
278 already_AddRefed<DataSourceSurface> FilterProcessing::RenderTurbulence_Scalar(
279 const IntSize& aSize, const Point& aOffset, const Size& aBaseFrequency,
280 int32_t aSeed, int aNumOctaves, TurbulenceType aType, bool aStitch,
281 const Rect& aTileRect) {
282 return RenderTurbulence_SIMD<simd::Scalarf32x4_t, simd::Scalari32x4_t,
283 simd::Scalaru8x16_t>(
284 aSize, aOffset, aBaseFrequency, aSeed, aNumOctaves, aType, aStitch,
285 aTileRect);
288 already_AddRefed<DataSourceSurface>
289 FilterProcessing::ApplyArithmeticCombine_Scalar(DataSourceSurface* aInput1,
290 DataSourceSurface* aInput2,
291 Float aK1, Float aK2, Float aK3,
292 Float aK4) {
293 return ApplyArithmeticCombine_SIMD<simd::Scalari32x4_t, simd::Scalari16x8_t,
294 simd::Scalaru8x16_t>(aInput1, aInput2, aK1,
295 aK2, aK3, aK4);
298 } // namespace gfx
299 } // namespace mozilla