bugs: Advantages for incremental library separation by analogy with incremental
[Ale.git] / d2 / render.h
blob6327c6c3b80e7a666417b29060cf0124afe05895
1 // Copyright 2002, 2004 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * render.h: A superclass for all rendering classes.
25 #ifndef __render_h__
26 #define __render_h__
28 #include "transformation.h"
29 #include "image.h"
30 #include "point.h"
32 #define ACTIVE_RENDERER_COUNT 30
34 #define ALE_GLSL_RENDER_INCLUDE \
35 "struct exclusion {\n"\
36 " bool is_render;\n"\
37 " float x[6];\n"\
38 "};\n"\
39 "struct render {\n"\
40 " int rx_count;\n"\
41 " exclusion rx_parameters[EXCLUSION_ARRAY_SIZE];\n"\
42 "};\n"\
43 "uniform render render_static;\n"\
44 "bool render_is_excluded_r(vec2 offset, vec4 position, int frame);\n"
47 * Class render accepts messages synchronizing rendering steps through the
48 * methods sync(n) and sync(), and returns information about the currently
49 * rendered image via methods get_image() and get_defined(). This class is
50 * abstract, and must be subclassed to be instantiated.
53 class render : public gpu::program::library {
54 private:
55 static unsigned int rx_count;
56 static exclusion *rx_parameters;
57 static int rx_show;
58 static render *directory[ACTIVE_RENDERER_COUNT];
59 static int directory_length;
60 static int extend;
61 static ale_pos scale_factor;
62 static ale_real wt;
64 image **queue;
65 unsigned int queue_size;
66 int step_num;
67 int entry_number;
69 static int strpfix(const char *a, const char *b) {
70 return strncmp(a, b, strlen(a));
73 protected:
75 * Constructor
77 render() {
78 const char *shader_code =
79 ALE_GLSL_RENDER_INCLUDE
80 "bool render_is_excluded_r(vec2 offset, vec4 p, int f) {\n"
81 " for (int param = 0; param < render_static.rx_count; param++)\n"
82 " if (render_static.rx_parameters[param].is_render\n"
83 " && p.x + offset.x >= render_static.rx_parameters[param].x[0]\n"
84 " && p.x + offset.x <= render_static.rx_parameters[param].x[1]\n"
85 " && p.y + offset.y >= render_static.rx_parameters[param].x[2]\n"
86 " && p.y + offset.y <= render_static.rx_parameters[param].x[3]\n"
87 " && float(f) >= render_static.rx_parameters[param].x[4]\n"
88 " && float(f) <= render_static.rx_parameters[param].x[5])\n"
89 " return true;\n"
90 " return false;\n"
91 "}\n";
93 gpu_shader = new gpu::program::shader(shader_code);
95 if (directory_length >= ACTIVE_RENDERER_COUNT) {
96 fprintf(stderr, "\n\n*** Too many renderers in d2::render::render() ***\n\n");
97 exit(1);
100 directory[directory_length] = this;
101 entry_number = directory_length;
103 directory_length++;
105 step_num = -1;
106 queue = NULL;
107 queue_size = 0;
111 * Get extension state
113 int is_extend() {
114 return extend;
118 * Get the scale factor
120 ale_pos get_scale_factor() {
121 return scale_factor;
125 * Get the current step number
128 int get_step() {
129 return step_num;
133 * Perform the current rendering step.
136 virtual void step() = 0;
138 public:
141 * Check for render-coordinate excluded regions. (Applies an offset to
142 * spatial coordinates internally.)
144 static int is_excluded_r(point offset, point p, int f) {
146 for (unsigned int param = 0; param < rx_count; param++)
147 if (rx_parameters[param].type == exclusion::RENDER
148 && p[0] + offset[0] >= rx_parameters[param].x[0]
149 && p[0] + offset[0] <= rx_parameters[param].x[1]
150 && p[1] + offset[1] >= rx_parameters[param].x[2]
151 && p[1] + offset[1] <= rx_parameters[param].x[3]
152 && f >= rx_parameters[param].x[4]
153 && f <= rx_parameters[param].x[5])
154 return 1;
156 return 0;
158 static int is_excluded_r(point offset, int i, int j, int f) {
160 for (unsigned int param = 0; param < rx_count; param++)
161 if (rx_parameters[param].type == exclusion::RENDER
162 && i + offset[0] >= rx_parameters[param].x[0]
163 && i + offset[0] <= rx_parameters[param].x[1]
164 && j + offset[1] >= rx_parameters[param].x[2]
165 && j + offset[1] <= rx_parameters[param].x[3]
166 && f >= rx_parameters[param].x[4]
167 && f <= rx_parameters[param].x[5])
168 return 1;
170 return 0;
172 int is_excluded_r(int i, int j, int f) {
173 return is_excluded_r(get_image()->offset(), i, j, f);
177 * Check for frame-coordinate excluded regions.
179 static int is_excluded_f(point p, int f) {
181 for (unsigned int param = 0; param < rx_count; param++)
182 if (rx_parameters[param].type == exclusion::FRAME
183 && p[0] >= rx_parameters[param].x[0]
184 && p[0] <= rx_parameters[param].x[1]
185 && p[1] >= rx_parameters[param].x[2]
186 && p[1] <= rx_parameters[param].x[3]
187 && f >= rx_parameters[param].x[4]
188 && f <= rx_parameters[param].x[5])
189 return 1;
191 return 0;
193 static int is_excluded_f(int i, int j, int f) {
195 for (unsigned int param = 0; param < rx_count; param++)
196 if (rx_parameters[param].type == exclusion::FRAME
197 && i >= rx_parameters[param].x[0]
198 && i <= rx_parameters[param].x[1]
199 && j >= rx_parameters[param].x[2]
200 && j <= rx_parameters[param].x[3]
201 && f >= rx_parameters[param].x[4]
202 && f <= rx_parameters[param].x[5])
203 return 1;
205 return 0;
208 static int render_count() {
209 return directory_length;
211 static render *render_num(int n) {
212 assert (n < directory_length);
213 return directory[n];
216 static void render_init(unsigned int _rx_count, exclusion *_rx_parameters,
217 int _rx_show, int _extend, ale_pos _scale_factor) {
218 rx_count = _rx_count;
219 rx_show = _rx_show;
220 extend = _extend;
221 scale_factor = _scale_factor;
223 rx_parameters = (exclusion *) malloc(rx_count * sizeof(exclusion));
225 for (unsigned int region = 0; region < rx_count; region++) {
227 rx_parameters[region] = _rx_parameters[region];
230 * Scale spatial rendering coordinates
233 if (rx_parameters[region].type == exclusion::RENDER)
234 for (int p = 0; p < 4; p++)
235 rx_parameters[region].x[p] *= scale_factor;
239 static void set_wt(ale_real _wt) {
240 wt = _wt;
243 static ale_real get_wt() {
244 return wt;
247 static int is_rx_show() {
248 return rx_show;
251 static unsigned int get_rx_count() {
252 return rx_count;
255 static const exclusion *get_rx_parameters() {
256 return rx_parameters;
260 * Current rendering result.
263 virtual const image *get_image() const = 0;
266 * Result of rendering at the given frame.
269 const image *get_image(unsigned int n) {
270 sync(n);
272 if (n == (unsigned int) step_num)
273 return get_image();
275 n = step_num - n - 1;
277 assert (n < queue_size);
279 return queue[n];
283 * Extend the rendering queue.
286 void extend_queue(unsigned int n) {
288 * Increase the size of the queue, if necessary, to
289 * accommodate the given lag.
291 if (n > queue_size) {
292 unsigned int new_size = n;
293 queue = (image **) realloc(queue, new_size * sizeof(image *));
294 assert(queue);
295 if (queue == NULL) {
296 fprintf(stderr, "\n\n*** VISE: Unable to allocate memory ***\n\n\n");
297 exit(1);
299 memset(queue + queue_size, 0, (new_size - queue_size) * sizeof(image *));
300 queue_size = new_size;
305 * Definition map. Unit-depth image whose pixels are nonzero where
306 * the image is defined.
309 virtual const image *get_defined() const = 0;
312 * Sync.
315 virtual void sync(int n) {
316 assert (step_num >= -1);
317 for (int i = step_num + 1; i <= n; i++) {
318 if (queue_size > 0 && step_num >= 0) {
320 * Shift the current queue so that the new head remains at the
321 * zero index. There are more time-efficient ways to handle
322 * queues, but the benefits are not clear in this case.
324 delete queue[queue_size - 1];
325 for (int i = queue_size - 1; i > 0; i--) {
326 queue[i] = queue[i - 1];
328 queue[0] = get_image()->clone("Render queue clone");
331 step_num++;
332 step();
337 * Perform any final rendering steps. Return a non-zero value if
338 * anything changed.
341 virtual int sync() {
342 return 0;
346 * Set point rendering bounds, if possible.
349 virtual void init_point_renderer(unsigned int h, unsigned int w, unsigned int d) {
350 assert(0);
351 fprintf(stderr, "Error: init_point_renderer() not supported by this renderer\n");
352 exit(1);
356 * Point render.
359 virtual void point_render(unsigned int i, unsigned int j, unsigned int f, transformation t) {
360 assert(0);
361 fprintf(stderr, "Error: point_render() not supported by this renderer\n");
362 exit(1);
366 * Finish point rendering.
369 virtual void finish_point_rendering() {
370 assert(0);
371 fprintf(stderr, "Error: finish_point_rendering() not supported by this renderer\n");
372 exit(1);
375 virtual ~render() {
376 directory[entry_number] = NULL;
379 int entry() {
380 return entry_number;
383 virtual void free_memory() = 0;
385 static void free_entry(int n) {
386 if (directory[n] != NULL) {
387 directory[n]->free_memory();
388 delete directory[n];
389 directory[n] = NULL;
393 static void free_all_memory() {
394 for (int i = 0; i < ACTIVE_RENDERER_COUNT; i++)
395 free_entry(i);
397 directory_length = 0;
400 static void reset() {
401 free_all_memory();
405 #endif