1 // Copyright 2002, 2003, 2004 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * image.h: Abstract base class for the internal representations of images used
31 #include "exposure/exposure.h"
33 #define IMAGE_BAYER_NONE 0
36 * This constant indicates that some other default value should be filled in.
39 #define IMAGE_BAYER_DEFAULT 0x8
42 * Do not change these values without inspecting
43 * image_bayer_ale_real::r_*_offset().
45 #define IMAGE_BAYER_RGBG 0x4 /* binary 100 */
46 #define IMAGE_BAYER_GBGR 0x5 /* binary 101 */
47 #define IMAGE_BAYER_GRGB 0x6 /* binary 110 */
48 #define IMAGE_BAYER_BGRG 0x7 /* binary 111 */
50 #define ALE_GLSL_IMAGE_INCLUDE \
54 "vec3 image_get_pixel(image _this, vec4 pos);\n"
56 class image
: protected exposure::listener
{
58 static double resident
;
59 unsigned int _dimx
, _dimy
, _depth
;
62 mutable exposure
*_exp
;
65 image (const image
&source
) {
67 assert (source
._depth
== 3);
72 _offset
= source
._offset
;
78 _exp
->add_listener(this, name
);
82 static void set_resident(double r
) {
86 static double get_resident() {
90 image (unsigned int dimy
, unsigned int dimx
, unsigned int depth
,
91 const char *name
= "anonymous", exposure
*_exp
= NULL
,
92 unsigned int bayer
= IMAGE_BAYER_NONE
) {
99 _offset
= point(0, 0);
105 _exp
->add_listener(this, name
);
108 unsigned int get_bayer() const {
112 virtual char get_channels(int i
, int j
) const {
116 virtual unsigned int bayer_color(unsigned int i
, unsigned int j
) const {
120 double storage_size() const {
121 if (bayer
!= IMAGE_BAYER_NONE
)
122 return _dimx
* _dimy
* sizeof(ale_real
);
124 return 3 * _dimx
* _dimy
* sizeof(ale_real
);
127 exposure
&exp() const {
131 point
offset() const {
135 void set_offset(int i
, int j
) {
140 void set_offset(point p
) {
144 unsigned int width() const {
148 unsigned int height() const {
152 unsigned int depth() const {
156 virtual void set_pixel(unsigned int y
, unsigned int x
, spixel p
) = 0;
158 virtual spixel
get_pixel(unsigned int y
, unsigned int x
) const = 0;
160 virtual spixel
get_raw_pixel(unsigned int y
, unsigned int x
) const {
161 return ((const image
*)this)->get_pixel(y
, x
);
164 virtual void add_pixel(unsigned int y
, unsigned int x
, pixel p
) {
168 virtual void mul_pixel(unsigned int y
, unsigned int x
, pixel p
) {
172 virtual void div_pixel(unsigned int y
, unsigned int x
, pixel p
) {
176 virtual void add_chan(unsigned int y
, unsigned int x
, unsigned int k
, ale_real c
) {
180 virtual void div_chan(unsigned int y
, unsigned int x
, unsigned int k
, ale_real c
) {
184 virtual void set_chan(unsigned int y
, unsigned int x
, unsigned int k
, ale_sreal c
) = 0;
186 virtual ale_sreal
get_chan(unsigned int y
, unsigned int x
, unsigned int k
) const = 0;
188 ale_real
maxval() const {
189 ale_real result
= get_pixel(0, 0)[0];
191 for (unsigned int i
= 0; i
< _dimy
; i
++)
192 for (unsigned int j
= 0; j
< _dimx
; j
++) {
193 pixel p
= get_pixel(i
, j
);
195 for (unsigned int k
= 0; k
< _depth
; k
++)
196 if (p
[k
] > result
|| !finite(result
))
203 ale_real
minval() const {
204 ale_real result
= get_pixel(0, 0)[0];
206 for (unsigned int i
= 0; i
< _dimy
; i
++)
207 for (unsigned int j
= 0; j
< _dimx
; j
++) {
208 pixel p
= get_pixel(i
, j
);
210 for (unsigned int k
= 0; k
< _depth
; k
++)
211 if (p
[k
] < result
|| !finite(result
))
219 * Get the maximum difference among adjacent pixels.
222 pixel
get_max_diff(unsigned int i
, unsigned int j
) const {
223 assert(i
<= _dimy
- 1);
224 assert(j
<= _dimx
- 1);
226 pixel max
= get_pixel(i
, j
), min
= get_pixel(i
, j
);
228 for (int ii
= -1; ii
<= 1; ii
++)
229 for (int jj
= -1; jj
<= 1; jj
++) {
237 if ((unsigned int) iii
> _dimy
- 1)
239 if ((unsigned int) jjj
> _dimx
- 1)
242 pixel p
= get_pixel(iii
, jjj
);
244 for (int d
= 0; d
< 3; d
++) {
255 pixel
get_max_diff(point x
) const {
258 assert (x
[0] <= _dimy
- 1);
259 assert (x
[1] <= _dimx
- 1);
261 unsigned int i
= (unsigned int) round(x
[0]);
262 unsigned int j
= (unsigned int) round(x
[1]);
264 return get_max_diff(i
, j
);
267 int in_bounds(point x
) const {
270 || x
[0] > height() - 1
271 || x
[1] > width() - 1)
279 * Get a color value at a given position using bilinear interpolation between the
280 * four nearest pixels.
282 pixel
get_bl(point x
, int defined
= 0) const {
283 // fprintf(stderr, "get_bl x=%f %f\n", (double) x[0], (double) x[1]);
289 assert (x
[0] <= _dimy
- 1);
290 assert (x
[1] <= _dimx
- 1);
292 int lx
= (int) floor(x
[1]);
293 int hx
= (int) floor(x
[1]) + 1;
294 int ly
= (int) floor(x
[0]);
295 int hy
= (int) floor(x
[0]) + 1;
297 // fprintf(stderr, "get_bl l=%d %d h=%d %d\n", ly, lx, hy, hx);
302 neighbor
[0] = get_pixel(ly
, lx
);
303 neighbor
[1] = get_pixel(hy
% _dimy
, lx
);
304 neighbor
[2] = get_pixel(hy
% _dimy
, hx
% _dimx
);
305 neighbor
[3] = get_pixel(ly
, hx
% _dimx
);
307 // for (int d = 0; d < 4; d++)
308 // fprintf(stderr, "neighbor_%d=%f %f %f\n", d,
309 // (double) neighbor[d][0],
310 // (double) neighbor[d][1],
311 // (double) neighbor[d][2]);
313 factor
[0] = (ale_real
) (hx
- x
[1]) * (ale_real
) (hy
- x
[0]);
314 factor
[1] = (ale_real
) (hx
- x
[1]) * (ale_real
) (x
[0] - ly
);
315 factor
[2] = (ale_real
) (x
[1] - lx
) * (ale_real
) (x
[0] - ly
);
316 factor
[3] = (ale_real
) (x
[1] - lx
) * (ale_real
) (hy
- x
[0]);
318 // for (int d = 0; d < 4; d++)
319 // fprintf(stderr, "factor_%d=%f\n", d,
320 // (double) factor[d]);
323 * Use bilinear and/or geometric interpolation
327 result
= pixel(0, 0, 0);
329 for (int n
= 0; n
< 4; n
++)
330 result
+= factor
[n
] * neighbor
[n
];
334 * Calculating the geometric mean may be expensive on
335 * some platforms (e.g., those without floating-point
339 result
= pixel(1, 1, 1);
341 for (int n
= 0; n
< 4; n
++)
342 result
*= ppow(neighbor
[n
], factor
[n
]);
345 * Taking the minimum value may be cheaper than
346 * calculating a geometric mean.
349 result
= neighbor
[0];
351 for (int n
= 1; n
< 4; n
++)
352 for (int k
= 0; k
< 3; k
++)
353 if (neighbor
[n
][k
] < result
[k
])
354 result
[k
] = neighbor
[n
][k
];
358 // fprintf(stderr, "result=%f %f %f\n",
359 // (double) result[0],
360 // (double) result[1],
361 // (double) result[2]);
366 pixel
get_scaled_bl(point x
, ale_pos f
, int defined
= 0) const {
368 x
[0]/f
<= height() - 1
370 : (ale_pos
) (height() - 1),
371 x
[1]/f
<= width() - 1
373 : (ale_pos
) (width() - 1));
375 return get_bl(scaled
, defined
);
380 * Make a new image suitable for receiving scaled values.
382 virtual image
*scale_generator(int height
, int width
, int depth
, const char *name
) const = 0;
385 * Generate an image of medians within a given radius
388 image
*medians(int radius
) const {
390 assert (radius
>= 0);
392 image
*is
= scale_generator(height(), width(), depth(), "median");
395 for (unsigned int i
= 0; i
< height(); i
++)
396 for (unsigned int j
= 0; j
< width(); j
++) {
398 std::vector
<ale_real
> p
[3];
400 for (int ii
= -radius
; ii
<= radius
; ii
++)
401 for (int jj
= -radius
; jj
<= radius
; jj
++) {
405 if (in_bounds(point(iii
, jjj
)))
406 for (int k
= 0; k
< 3; k
++)
407 if (finite(get_pixel(iii
, jjj
)[k
]))
408 p
[k
].push_back(get_pixel(iii
, jjj
)[k
]);
411 is
->set_pixel(i
, j
, d2::pixel::undefined());
413 for (int k
= 0; k
< 3; k
++) {
414 std::sort(p
[k
].begin(), p
[k
].end());
416 unsigned int pkc
= p
[k
].size();
422 is
->set_chan(i
, j
, k
,
423 (p
[k
][pkc
/ 2] + p
[k
][pkc
/ 2 - 1]) / 2);
425 is
->set_chan(i
, j
, k
,
434 * Generate an image of differences of the first channel. The first
435 * coordinate differences are stored in the first channel, second in the
439 image
*fcdiffs() const {
440 image
*is
= scale_generator(height(), width(), depth(), "diff");
444 for (unsigned int i
= 0; i
< height(); i
++)
445 for (unsigned int j
= 0; j
< width(); j
++) {
449 && !finite(get_chan(i
, j
, 0))) {
451 is
->set_chan(i
, j
, 0, (get_chan(i
+ 1, j
, 0)
452 - get_chan(i
- 1, j
, 0)) / 2);
454 } else if (i
+ 1 < height()
456 && finite(get_chan(i
+ 1, j
, 0))
457 && finite(get_chan(i
- 1, j
, 0))) {
459 is
->set_chan(i
, j
, 0, ((get_chan(i
, j
, 0) - get_chan(i
- 1, j
, 0))
460 + (get_chan(i
+ 1, j
, 0) - get_chan(i
, j
, 0))) / 2);
462 } else if (i
+ 1 < height()
463 && finite(get_chan(i
+ 1, j
, 0))) {
465 is
->set_chan(i
, j
, 0, get_chan(i
+ 1, j
, 0) - get_chan(i
, j
, 0));
468 && finite(get_chan(i
- 1, j
, 0))) {
470 is
->set_chan(i
, j
, 0, get_chan(i
, j
, 0) - get_chan(i
- 1, j
, 0));
473 is
->set_chan(i
, j
, 0, 0);
478 && !finite(get_chan(i
, j
, 0))) {
480 is
->set_chan(i
, j
, 1, (get_chan(i
, j
+ 1, 0) - get_chan(i
, j
- 1, 0)) / 2);
482 } else if (j
+ 1 < width()
484 && finite(get_chan(i
, j
+ 1, 0))
485 && finite(get_chan(i
, j
- 1, 0))) {
487 is
->set_chan(i
, j
, 1, ((get_chan(i
, j
, 0) - get_chan(i
, j
- 1, 0))
488 + (get_chan(i
, j
+ 1, 0) - get_chan(i
, j
, 0))) / 2);
490 } else if (j
+ 1 < width() && finite(get_chan(i
, j
+ 1, 0))) {
492 is
->set_chan(i
, j
, 1, get_chan(i
, j
+ 1, 0) - get_chan(i
, j
, 0));
494 } else if (j
> 0 && finite(get_chan(i
, j
- 1, 0))) {
496 is
->set_chan(i
, j
, 1, get_chan(i
, j
, 0) - get_chan(i
, j
- 1, 0));
499 is
->set_chan(i
, j
, 1, 0);
507 * Generate an image of median (within a given radius) difference of the
511 image
*fcdiff_median(int radius
) const {
512 image
*diff
= fcdiffs();
516 image
*median
= diff
->medians(radius
);
526 * Scale by half. We use the following filter:
532 * At the edges, these values are normalized so that the sum of the
533 * weights of contributing pixels is 1.
535 class scale_by_half_threaded
: public thread::decompose_domain
{
539 void subdomain_algorithm(unsigned int thread
,
540 int i_min
, int i_max
, int j_min
, int j_max
) {
542 ale_real _0625
= (ale_real
) 0.0625;
543 ale_real _125
= (ale_real
) 0.125;
544 ale_real _25
= (ale_real
) 0.25;
545 ale_real _0
= (ale_real
) 0;
547 unsigned int ui_min
= (unsigned int) i_min
;
548 unsigned int ui_max
= (unsigned int) i_max
;
549 unsigned int uj_min
= (unsigned int) j_min
;
550 unsigned int uj_max
= (unsigned int) j_max
;
552 for (unsigned int i
= ui_min
; i
< ui_max
; i
++)
553 for (unsigned int j
= uj_min
; j
< uj_max
; j
++) {
556 ( ( ((i
> 0 && j
> 0)
557 ? iu
->get_pixel(2 * i
- 1, 2 * j
- 1) * _0625
560 ? iu
->get_pixel(2 * i
- 1, 2 * j
) * _125
562 + ((i
> 0 && j
< is
->width() - 1)
563 ? iu
->get_pixel(2 * i
- 1, 2 * j
+ 1) * _0625
566 ? iu
->get_pixel(2 * i
, 2 * j
- 1) * _125
568 + iu
->get_pixel(2 * i
, 2 * j
) * _25
569 + ((j
< is
->width() - 1)
570 ? iu
->get_pixel(2 * i
, 2 * j
+ 1) * _125
572 + ((i
< is
->height() - 1 && j
> 0)
573 ? iu
->get_pixel(2 * i
+ 1, 2 * j
- 1) * _0625
575 + ((i
< is
->height() - 1)
576 ? iu
->get_pixel(2 * i
+ 1, 2 * j
) * _125
578 + ((i
< is
->height() && j
< is
->width() - 1)
579 ? iu
->get_pixel(2 * i
+ 1, 2 * j
+ 1) * _0625
590 + ((i
> 0 && j
< is
->width() - 1)
597 + ((j
< is
->width() - 1)
600 + ((i
< is
->height() - 1 && j
> 0)
603 + ((i
< is
->height() - 1)
606 + ((i
< is
->height() && j
< is
->width() - 1)
613 scale_by_half_threaded(image
*_is
, const image
*_iu
)
614 : decompose_domain(0, _is
->height(),
621 image
*scale_by_half(const char *name
) const {
624 image
*is
= scale_generator(
625 (int) floor(height() * (double) f
),
626 (int) floor(width() * (double) f
), depth(), name
);
630 scale_by_half_threaded
sbht(is
, this);
633 is
->_offset
= point(_offset
[0] * f
, _offset
[1] * f
);
639 * Scale by half. This function uses externally-provided weights,
640 * multiplied by the following filter:
646 * Values are normalized so that the sum of the weights of contributing
649 image
*scale_by_half(const image
*weights
, const char *name
) const {
652 return scale_by_half(name
);
656 image
*is
= scale_generator(
657 (int) floor(height() * (double) f
),
658 (int) floor(width() * (double) f
), depth(), name
);
662 for (unsigned int i
= 0; i
< is
->height(); i
++)
663 for (unsigned int j
= 0; j
< is
->width(); j
++) {
667 ( ( ((i
> 0 && j
> 0)
668 ? (pixel
) get_pixel(2 * i
- 1, 2 * j
- 1)
669 * (pixel
) weights
->get_pixel(2 * i
- 1, 2 * j
- 1)
673 ? (pixel
) get_pixel(2 * i
- 1, 2 * j
)
674 * (pixel
) weights
->get_pixel(2 * i
- 1, 2 * j
)
677 + ((i
> 0 && j
< is
->width() - 1)
678 ? (pixel
) get_pixel(2 * i
- 1, 2 * j
+ 1)
679 * (pixel
) weights
->get_pixel(2 * i
- 1, 2 * j
+ 1)
683 ? (pixel
) get_pixel(2 * i
, 2 * j
- 1)
684 * (pixel
) weights
->get_pixel(2 * i
, 2 * j
- 1)
687 + get_pixel(2 * i
, 2 * j
)
688 * (pixel
) weights
->get_pixel(2 * i
, 2 * j
)
690 + ((j
< is
->width() - 1)
691 ? (pixel
) get_pixel(2 * i
, 2 * j
+ 1)
692 * (pixel
) weights
->get_pixel(2 * i
, 2 * j
+ 1)
695 + ((i
< is
->height() - 1 && j
> 0)
696 ? (pixel
) get_pixel(2 * i
+ 1, 2 * j
- 1)
697 * (pixel
) weights
->get_pixel(2 * i
+ 1, 2 * j
- 1)
700 + ((i
< is
->height() - 1)
701 ? (pixel
) get_pixel(2 * i
+ 1, 2 * j
)
702 * (pixel
) weights
->get_pixel(2 * i
+ 1, 2 * j
)
705 + ((i
< is
->height() && j
< is
->width() - 1)
706 ? (pixel
) get_pixel(2 * i
+ 1, 2 * j
+ 1)
707 * (pixel
) weights
->get_pixel(2 * i
+ 1, 2 * j
+ 1)
714 ? weights
->get_pixel(2 * i
- 1, 2 * j
- 1)
718 ? weights
->get_pixel(2 * i
- 1, 2 * j
)
721 + ((i
> 0 && j
< is
->width() - 1)
722 ? weights
->get_pixel(2 * i
- 1, 2 * j
+ 1)
726 ? weights
->get_pixel(2 * i
, 2 * j
- 1)
729 + weights
->get_pixel(2 * i
, 2 * j
)
731 + ((j
< is
->width() - 1)
732 ? weights
->get_pixel(2 * i
, 2 * j
+ 1)
735 + ((i
< is
->height() - 1 && j
> 0)
736 ? weights
->get_pixel(2 * i
+ 1, 2 * j
- 1)
739 + ((i
< is
->height() - 1)
740 ? weights
->get_pixel(2 * i
+ 1, 2 * j
)
743 + ((i
< is
->height() && j
< is
->width() - 1)
744 ? weights
->get_pixel(2 * i
+ 1, 2 * j
+ 1)
746 : pixel(0, 0, 0)) ) );
748 for (int k
= 0; k
< 3; k
++)
749 if (!finite(value
[k
]))
752 is
->set_pixel(i
, j
, value
);
755 is
->_offset
= point(_offset
[0] * f
, _offset
[1] * f
);
761 * Scale an image definition array by 1/2.
763 * ALE considers an image definition array as a special kind of image
764 * weight array (typedefs of which should appear below the definition
765 * of this class). ALE uses nonzero pixel values to mean 'defined' and
766 * zero values to mean 'undefined'. Through this interpretation, the
767 * image weight array implementation that ALE uses allows image weight
768 * arrays to also serve as image definition arrays.
770 * Whereas scaling of image weight arrays is not generally obvious in
771 * either purpose or method, ALE requires that image definition arrays
772 * be scalable. (Note that in the special case where weight is treated
773 * as certainty, using a geometric mean is probably correct.)
775 * We currently use a geometric mean to implement scaling of
779 class defined_scale_by_half_threaded
: public thread::decompose_domain
{
783 void subdomain_algorithm(unsigned int thread
,
784 int i_min
, int i_max
, int j_min
, int j_max
) {
787 ale_real _0625
= (ale_real
) 0.0625;
788 ale_real _125
= (ale_real
) 0.125;
789 ale_real _25
= (ale_real
) 0.25;
792 int ui_min
= (int) i_min
;
793 int ui_max
= (int) i_max
;
794 int uj_min
= (int) j_min
;
795 int uj_max
= (int) j_max
;
797 for (int i
= ui_min
; i
< ui_max
; i
++)
798 for (int j
= uj_min
; j
< uj_max
; j
++) {
803 * Calculate a geometric mean; this approach
804 * may be expensive on some platforms (e.g.,
805 * those without floating-point support in
811 ( ( ((i
> 0 && j
> 0)
812 ? ppow(iu
->get_pixel(2 * i
- 1, 2 * j
- 1), _0625
)
815 ? ppow(iu
->get_pixel(2 * i
- 1, 2 * j
), _125
)
817 * ((i
> 0 && j
< is
->width() - 1)
818 ? ppow(iu
->get_pixel(2 * i
- 1, 2 * j
+ 1), _0625
)
821 ? ppow(iu
->get_pixel(2 * i
, 2 * j
- 1), _125
)
823 * ppow(iu
->get_pixel(2 * i
, 2 * j
), _25
)
824 * ((j
< is
->width() - 1)
825 ? ppow(iu
->get_pixel(2 * i
, 2 * j
+ 1), _125
)
827 * ((i
< is
->height() - 1 && j
> 0)
828 ? ppow(iu
->get_pixel(2 * i
+ 1, 2 * j
- 1), _0625
)
830 * ((i
< is
->height() - 1)
831 ? ppow(iu
->get_pixel(2 * i
+ 1, 2 * j
), _125
)
833 * ((i
< is
->height() && j
< is
->width() - 1)
834 ? ppow(iu
->get_pixel(2 * i
+ 1, 2 * j
+ 1), _0625
)
838 pixel value
= iu
->get_pixel(2 * i
, 2 * j
);
840 for (int ii
= 2 * i
- 1; ii
<= 2 * i
+ 1; ii
++)
841 for (int jj
= 2 * j
- 1; jj
<= 2 * j
+ 1; jj
++) {
844 || ii
> (int) iu
->height() - 1
845 || jj
> (int) iu
->height() - 1)
848 pixel value2
= iu
->get_pixel(ii
, jj
);
850 for (int k
= 0; k
< 3; k
++)
851 if (value2
[k
] < value
[k
]
852 || !finite(value2
[k
])) /* propagate non-finites */
853 value
[k
] = value2
[k
];
859 for (int k
= 0; k
< 3; k
++)
860 if (!finite(value
[k
]))
863 is
->set_pixel(i
, j
, value
);
868 defined_scale_by_half_threaded(image
*_is
, const image
*_iu
)
869 : decompose_domain(0, _is
->height(),
876 image
*defined_scale_by_half(const char *name
) const {
879 image
*is
= scale_generator(
880 (int) floor(height() * (double) f
),
881 (int) floor(width() * (double) f
), depth(), name
);
885 defined_scale_by_half_threaded
dsbht(is
, this);
888 is
->_offset
= point(_offset
[0] * f
, _offset
[1] * f
);
894 * Return an image scaled by some factor != 1.0, using bilinear
897 image
*scale(ale_pos f
, const char *name
, int defined
= 0) const {
900 * We probably don't want to scale images by a factor of 1.0,
901 * or by non-positive values.
903 assert (f
!= 1.0 && f
> 0);
906 image
*is
= scale_generator(
907 (int) floor(height() * (double) f
),
908 (int) floor(width() * (double) f
), depth(), name
);
912 unsigned int i
, j
, k
;
914 for (i
= 0; i
< is
->height(); i
++)
915 for (j
= 0; j
< is
->width(); j
++)
916 for (k
= 0; k
< is
->depth(); k
++)
918 get_scaled_bl(point(i
, j
), f
, defined
));
920 is
->_offset
= point(_offset
[0] * f
, _offset
[1] * f
);
923 } else if (f
== 0.5) {
925 return scale_by_half(name
);
927 return defined_scale_by_half(name
);
929 image
*is
= scale(2*f
, name
, defined
);
930 image
*result
= is
->scale(0.5, name
, defined
);
938 * Extend the image area to the top, bottom, left, and right,
939 * initializing the new image areas with black pixels. Negative values
942 virtual image
*_extend(int top
, int bottom
, int left
, int right
) = 0;
944 static void extend(image
**i
, int top
, int bottom
, int left
, int right
) {
945 image
*is
= (*i
)->_extend(top
, bottom
, left
, right
);
956 image
*clone(const char *name
) const {
957 image
*ic
= scale_generator(
958 height(), width(), depth(), name
);
962 for (unsigned int i
= 0; i
< height(); i
++)
963 for (unsigned int j
= 0; j
< width(); j
++)
968 ic
->_offset
= _offset
;
974 * Acceleration domain sequence point.
977 virtual void accel_domain_sequence() {
981 * Acceleration type. 0 indicates that the type's pixels are directly
982 * accessible; 1 indicates pixels are of image_accel type.
985 virtual int accel_type() {
990 * Unaccelerated equivalent of an image. Unaccelerated images return
994 virtual image
*unaccel_equiv() const {