render: Get rid of RENDER_LINE_* in favor of similar TABLE_STROKE_*.
[pspp.git] / src / output / render.c
blob184b52c00758c7ca0396eed3e8e2a85cf191b0e5
1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2009, 2010, 2011, 2013, 2014, 2016 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
17 #include <config.h>
19 #include <math.h>
20 #include <stdio.h>
21 #include <stdint.h>
22 #include <stdlib.h>
23 #include <string.h>
25 #include "libpspp/assertion.h"
26 #include "libpspp/hash-functions.h"
27 #include "libpspp/hmap.h"
28 #include "libpspp/pool.h"
29 #include "output/pivot-output.h"
30 #include "output/pivot-table.h"
31 #include "output/render.h"
32 #include "output/table.h"
34 #include "gl/minmax.h"
35 #include "gl/xalloc.h"
37 #include "gettext.h"
38 #define _(msgid) gettext (msgid)
40 /* This file uses TABLE_HORZ and TABLE_VERT enough to warrant abbreviating. */
41 #define H TABLE_HORZ
42 #define V TABLE_VERT
44 /* A layout for rendering a specific table on a specific device.
46 May represent the layout of an entire table presented to
47 render_page_create(), or a rectangular subregion of a table broken out using
48 render_break_next() to allow a table to be broken across multiple pages.
50 A page's size is not limited to the size passed in as part of render_params.
51 render_pager breaks a render_page into smaller render_pages that will fit in
52 the available space. */
53 struct render_page
55 const struct render_params *params; /* Parameters of the target device. */
56 struct table *table; /* Table rendered. */
57 int ref_cnt;
59 /* Region of 'table' to render.
61 The horizontal cells rendered are the leftmost h[H][0], then
62 r[H][0] through r[H][1], exclusive, then the rightmost h[H][1].
64 The vertical cells rendered are the topmost h[V][0], then r[V][0]
65 through r[V][1], exclusive, then the bottommost h[V][1].
67 n[H] = h[H][0] + (r[H][1] - r[H][0]) + h[H][1]
68 n[V] = h[V][0] + (r[V][1] - r[V][0]) + h[V][1]
70 int h[TABLE_N_AXES][2];
71 int r[TABLE_N_AXES][2];
72 int n[TABLE_N_AXES];
74 /* "Cell positions".
76 cp[H] represents x positions within the table.
77 cp[H][0] = 0.
78 cp[H][1] = the width of the leftmost vertical rule.
79 cp[H][2] = cp[H][1] + the width of the leftmost column.
80 cp[H][3] = cp[H][2] + the width of the second-from-left vertical rule.
81 and so on:
82 cp[H][2 * n[H]] = x position of the rightmost vertical rule.
83 cp[H][2 * n[H] + 1] = total table width including all rules.
85 Similarly, cp[V] represents y positions within the table.
86 cp[V][0] = 0.
87 cp[V][1] = the height of the topmost horizontal rule.
88 cp[V][2] = cp[V][1] + the height of the topmost row.
89 cp[V][3] = cp[V][2] + the height of the second-from-top horizontal rule.
90 and so on:
91 cp[V][2 * n[V]] = y position of the bottommost horizontal rule.
92 cp[V][2 * n[V] + 1] = total table height including all rules.
94 Rules and columns can have width or height 0, in which case consecutive
95 values in this array are equal. */
96 int *cp[TABLE_N_AXES];
98 /* render_break_next() can break a table such that some cells are not fully
99 contained within a render_page. This will happen if a cell is too wide
100 or two tall to fit on a single page, or if a cell spans multiple rows or
101 columns and the page only includes some of those rows or columns.
103 This hash table contains "struct render_overflow"s that represents each
104 such cell that doesn't completely fit on this page.
106 Each overflow cell borders at least one header edge of the table and may
107 border more. (A single table cell that is so large that it fills the
108 entire page can overflow on all four sides!) */
109 struct hmap overflows;
111 /* If a single column (or row) is too wide (or tall) to fit on a page
112 reasonably, then render_break_next() will split a single row or column
113 across multiple render_pages. This member indicates when this has
114 happened:
116 is_edge_cutoff[H][0] is true if pixels have been cut off the left side
117 of the leftmost column in this page, and false otherwise.
119 is_edge_cutoff[H][1] is true if pixels have been cut off the right side
120 of the rightmost column in this page, and false otherwise.
122 is_edge_cutoff[V][0] and is_edge_cutoff[V][1] are similar for the top
123 and bottom of the table.
125 The effect of is_edge_cutoff is to prevent rules along the edge in
126 question from being rendered.
128 When is_edge_cutoff is true for a given edge, the 'overflows' hmap will
129 contain a node for each cell along that edge. */
130 bool is_edge_cutoff[TABLE_N_AXES][2];
132 /* If part of a joined cell would be cut off by breaking a table along
133 'axis' at the rule with offset 'z' (where 0 <= z <= n[axis]), then
134 join_crossing[axis][z] is the thickness of the rule that would be cut
135 off.
137 This is used to know to allocate extra space for breaking at such a
138 position, so that part of the cell's content is not lost.
140 This affects breaking a table only when headers are present. When
141 headers are not present, the rule's thickness is used for cell content,
142 so no part of the cell's content is lost (and in fact it is duplicated
143 across both pages). */
144 int *join_crossing[TABLE_N_AXES];
147 static struct render_page *render_page_create (const struct render_params *,
148 struct table *, int min_width);
150 struct render_page *render_page_ref (const struct render_page *page_);
151 static void render_page_unref (struct render_page *);
153 /* Returns the offset in struct render_page's cp[axis] array of the rule with
154 index RULE_IDX. That is, if RULE_IDX is 0, then the offset is that of the
155 leftmost or topmost rule; if RULE_IDX is 1, then the offset is that of the
156 next rule to the right (or below); and so on. */
157 static int
158 rule_ofs (int rule_idx)
160 return rule_idx * 2;
163 /* Returns the offset in struct render_page's cp[axis] array of the rule with
164 index RULE_IDX_R, which counts from the right side (or bottom) of the page
165 left (or up), according to whether AXIS is H or V, respectively. That is,
166 if RULE_IDX_R is 0, then the offset is that of the rightmost or bottommost
167 rule; if RULE_IDX is 1, then the offset is that of the next rule to the left
168 (or above); and so on. */
169 static int
170 rule_ofs_r (const struct render_page *page, int axis, int rule_idx_r)
172 return (page->n[axis] - rule_idx_r) * 2;
175 /* Returns the offset in struct render_page's cp[axis] array of the cell with
176 index CELL_IDX. That is, if CELL_IDX is 0, then the offset is that of the
177 leftmost or topmost cell; if CELL_IDX is 1, then the offset is that of the
178 next cell to the right (or below); and so on. */
179 static int
180 cell_ofs (int cell_idx)
182 return cell_idx * 2 + 1;
185 /* Returns the width of PAGE along AXIS from OFS0 to OFS1, exclusive. */
186 static int
187 axis_width (const struct render_page *page, int axis, int ofs0, int ofs1)
189 return page->cp[axis][ofs1] - page->cp[axis][ofs0];
192 /* Returns the total width of PAGE along AXIS. */
193 static int
194 table_width (const struct render_page *page, int axis)
196 return page->cp[axis][2 * page->n[axis] + 1];
199 /* Returns the width of the headers in PAGE along AXIS. */
200 static int
201 headers_width (const struct render_page *page, int axis)
203 int h0 = page->h[axis][0];
204 int w0 = axis_width (page, axis, rule_ofs (0), cell_ofs (h0));
205 int n = page->n[axis];
206 int h1 = page->h[axis][1];
207 int w1 = axis_width (page, axis, rule_ofs_r (page, axis, h1), cell_ofs (n));
208 return w0 + w1;
211 /* Returns the width of cell X along AXIS in PAGE. */
212 static int
213 cell_width (const struct render_page *page, int axis, int x)
215 return axis_width (page, axis, cell_ofs (x), cell_ofs (x) + 1);
218 /* Returns the width of rule X along AXIS in PAGE. */
219 static int
220 rule_width (const struct render_page *page, int axis, int x)
222 return axis_width (page, axis, rule_ofs (x), rule_ofs (x) + 1);
225 /* Returns the width of rule X along AXIS in PAGE. */
226 static int
227 rule_width_r (const struct render_page *page, int axis, int x)
229 int ofs = rule_ofs_r (page, axis, x);
230 return axis_width (page, axis, ofs, ofs + 1);
233 /* Returns the width of cells X0 through X1, exclusive, along AXIS in PAGE. */
234 static int
235 joined_width (const struct render_page *page, int axis, int x0, int x1)
237 return axis_width (page, axis, cell_ofs (x0), cell_ofs (x1) - 1);
240 /* Returns the width of the widest cell, excluding headers, along AXIS in
241 PAGE. */
242 static int
243 max_cell_width (const struct render_page *page, int axis)
245 int n = page->n[axis];
246 int x0 = page->h[axis][0];
247 int x1 = n - page->h[axis][1];
249 int max = 0;
250 for (int x = x0; x < x1; x++)
252 int w = cell_width (page, axis, x);
253 if (w > max)
254 max = w;
256 return max;
259 /* A cell that doesn't completely fit on the render_page. */
260 struct render_overflow
262 struct hmap_node node; /* In render_page's 'overflows' hmap. */
264 /* Occupied region of page.
266 d[H][0] is the leftmost column.
267 d[H][1] is the rightmost column, plus 1.
268 d[V][0] is the top row.
269 d[V][1] is the bottom row, plus 1.
271 The cell in its original table might occupy a larger region. This
272 member reflects the size of the cell in the current render_page, after
273 trimming off any rows or columns due to page-breaking. */
274 int d[TABLE_N_AXES];
276 /* The space that has been trimmed off the cell:
278 overflow[H][0]: space trimmed off its left side.
279 overflow[H][1]: space trimmed off its right side.
280 overflow[V][0]: space trimmed off its top.
281 overflow[V][1]: space trimmed off its bottom.
283 During rendering, this information is used to position the rendered
284 portion of the cell within the available space.
286 When a cell is rendered, sometimes it is permitted to spill over into
287 space that is ordinarily reserved for rules. Either way, this space is
288 still included in overflow values.
290 Suppose, for example, that a cell that joins 2 columns has a width of 60
291 pixels and content "abcdef", that the 2 columns that it joins have
292 widths of 20 and 30 pixels, respectively, and that therefore the rule
293 between the two joined columns has a width of 10 (20 + 10 + 30 = 60).
294 It might render like this, if each character is 10x10, and showing a few
295 extra table cells for context:
297 +------+
298 |abcdef|
299 +--+---+
300 |gh|ijk|
301 +--+---+
303 If this render_page is broken at the rule that separates "gh" from
304 "ijk", then the page that contains the left side of the "abcdef" cell
305 will have overflow[H][1] of 10 + 30 = 40 for its portion of the cell,
306 and the page that contains the right side of the cell will have
307 overflow[H][0] of 20 + 10 = 30. The two resulting pages would look like
308 this:
311 +---
312 |abc
313 +--+
314 |gh|
315 +--+
317 and:
319 ----+
320 cdef|
321 +---+
322 |ijk|
323 +---+
325 int overflow[TABLE_N_AXES][2];
328 /* Returns a hash value for (,Y). */
329 static unsigned int
330 hash_cell (int x, int y)
332 return hash_int (x + (y << 16), 0);
335 /* Searches PAGE's set of render_overflow for one whose top-left cell is
336 (X,Y). Returns it, if there is one, otherwise a null pointer. */
337 static const struct render_overflow *
338 find_overflow (const struct render_page *page, int x, int y)
340 if (!hmap_is_empty (&page->overflows))
342 const struct render_overflow *of;
344 HMAP_FOR_EACH_WITH_HASH (of, struct render_overflow, node,
345 hash_cell (x, y), &page->overflows)
346 if (x == of->d[H] && y == of->d[V])
347 return of;
350 return NULL;
353 /* Row or column dimensions. Used to figure the size of a table in
354 render_page_create() and discarded after that. */
355 struct render_row
357 /* Width without considering rows (or columns) that span more than one (or
358 column). */
359 int unspanned;
361 /* Width taking spanned rows (or columns) into consideration. */
362 int width;
365 /* Modifies the 'width' members of the N elements of ROWS so that their sum,
366 when added to rule widths RULES[1] through RULES[N - 1] inclusive, is at
367 least WIDTH. */
368 static void
369 distribute_spanned_width (int width,
370 struct render_row *rows, const int *rules, int n)
372 /* Sum up the unspanned widths of the N rows for use as weights. */
373 int total_unspanned = 0;
374 for (int x = 0; x < n; x++)
375 total_unspanned += rows[x].unspanned;
376 for (int x = 0; x < n - 1; x++)
377 total_unspanned += rules[x + 1];
378 if (total_unspanned >= width)
379 return;
381 /* The algorithm used here is based on the following description from HTML 4:
383 For cells that span multiple columns, a simple approach consists of
384 apportioning the min/max widths evenly to each of the constituent
385 columns. A slightly more complex approach is to use the min/max
386 widths of unspanned cells to weight how spanned widths are
387 apportioned. Experiments suggest that a blend of the two approaches
388 gives good results for a wide range of tables.
390 We blend the two approaches half-and-half, except that we cannot use the
391 unspanned weights when 'total_unspanned' is 0 (because that would cause a
392 division by zero).
394 The calculation we want to do is this:
396 w0 = width / n
397 w1 = width * (column's unspanned width) / (total unspanned width)
398 (column's width) = (w0 + w1) / 2
400 We implement it as a precise calculation in integers by multiplying w0 and
401 w1 by the common denominator of all three calculations (d), dividing that
402 out in the column width calculation, and then keeping the remainder for
403 the next iteration.
405 (We actually compute the unspanned width of a column as twice the
406 unspanned width, plus the width of the rule on the left, plus the width of
407 the rule on the right. That way each rule contributes to both the cell on
408 its left and on its right.)
410 long long int d0 = n;
411 long long int d1 = 2LL * MAX (total_unspanned, 1);
412 long long int d = d0 * d1;
413 if (total_unspanned > 0)
414 d *= 2;
415 long long int w = d / 2;
416 for (int x = 0; x < n; x++)
418 w += width * d1;
419 if (total_unspanned > 0)
421 long long int unspanned = rows[x].unspanned * 2LL;
422 if (x < n - 1)
423 unspanned += rules[x + 1];
424 if (x > 0)
425 unspanned += rules[x];
426 w += width * unspanned * d0;
429 rows[x].width = MAX (rows[x].width, w / d);
430 w -= rows[x].width * d;
434 /* Initializes PAGE->cp[AXIS] from the row widths in ROWS and the rule widths
435 in RULES. */
436 static void
437 accumulate_row_widths (const struct render_page *page, enum table_axis axis,
438 const struct render_row *rows, const int *rules)
440 int n = page->n[axis];
441 int *cp = page->cp[axis];
442 cp[0] = 0;
443 for (int z = 0; z < n; z++)
445 cp[1] = cp[0] + rules[z];
446 cp[2] = cp[1] + rows[z].width;
447 cp += 2;
449 cp[1] = cp[0] + rules[n];
452 /* Returns the sum of widths of the N ROWS and N+1 RULES. */
453 static int
454 calculate_table_width (int n, const struct render_row *rows, int *rules)
456 int width = 0;
457 for (int x = 0; x < n; x++)
458 width += rows[x].width;
459 for (int x = 0; x <= n; x++)
460 width += rules[x];
462 return width;
465 /* Rendering utility functions. */
467 /* Returns the width of the rule in TABLE that is at offset Z along axis A, if
468 rendered with PARAMS. */
469 static int
470 measure_rule (const struct render_params *params, const struct table *table,
471 enum table_axis a, int z)
473 enum table_axis b = !a;
475 /* Determine all types of rules that are present, as a bitmap in 'rules'
476 where rule type 't' is present if bit 2**t is set. */
477 struct cell_color color;
478 unsigned int rules = 0;
479 int d[TABLE_N_AXES];
480 d[a] = z;
481 for (d[b] = 0; d[b] < table->n[b]; d[b]++)
482 rules |= 1u << table_get_rule (table, a, d[H], d[V], &color);
484 /* Turn off TABLE_STROKE_NONE because it has width 0 and we needn't bother.
485 However, if the device doesn't support margins, make sure that there is at
486 least a small gap between cells (but we don't need any at the left or
487 right edge of the table). */
488 if (rules & (1u << TABLE_STROKE_NONE))
490 rules &= ~(1u << TABLE_STROKE_NONE);
491 if (z > 0 && z < table->n[a] && !params->supports_margins && a == H)
492 rules |= 1u << TABLE_STROKE_SOLID;
495 /* Calculate maximum width of the rules that are present. */
496 int width = 0;
497 for (size_t i = 0; i < TABLE_N_STROKES; i++)
498 if (rules & (1u << i))
499 width = MAX (width, params->line_widths[i]);
500 return width;
503 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
504 space for rendering a table with dimensions given in N. The caller must
505 initialize most of the members itself. */
506 static struct render_page *
507 render_page_allocate__ (const struct render_params *params,
508 struct table *table, int n[TABLE_N_AXES])
510 struct render_page *page = xmalloc (sizeof *page);
511 page->params = params;
512 page->table = table;
513 page->ref_cnt = 1;
514 page->n[H] = n[H];
515 page->n[V] = n[V];
517 for (int i = 0; i < TABLE_N_AXES; i++)
519 page->cp[i] = xcalloc ((2 * n[i] + 2) , sizeof *page->cp[i]);
520 page->join_crossing[i] = xcalloc ((n[i] + 1) , sizeof *page->join_crossing[i]);
523 hmap_init (&page->overflows);
524 memset (page->is_edge_cutoff, 0, sizeof page->is_edge_cutoff);
526 return page;
529 /* Allocates and returns a new render_page using PARAMS and TABLE. Allocates
530 space for all of the members of the new page, but the caller must initialize
531 the 'cp' member itself. */
532 static struct render_page *
533 render_page_allocate (const struct render_params *params, struct table *table)
535 struct render_page *page = render_page_allocate__ (params, table, table->n);
536 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
538 page->h[a][0] = table->h[a][0];
539 page->h[a][1] = table->h[a][1];
540 page->r[a][0] = table->h[a][0];
541 page->r[a][1] = table->n[a] - table->h[a][1];
543 return page;
546 /* Allocates and returns a new render_page for PARAMS and TABLE, initializing
547 cp[H] in the new page from ROWS and RULES. The caller must still initialize
548 cp[V]. */
549 static struct render_page *
550 create_page_with_exact_widths (const struct render_params *params,
551 struct table *table,
552 const struct render_row *rows, int *rules)
554 struct render_page *page = render_page_allocate (params, table);
555 accumulate_row_widths (page, H, rows, rules);
556 return page;
559 /* Allocates and returns a new render_page for PARAMS and TABLE.
561 Initializes cp[H] in the new page by setting the width of each row 'i' to
562 somewhere between the minimum cell width ROW_MIN[i].width and the maximum
563 ROW_MAX[i].width. Sets the width of rules to those in RULES.
565 W_MIN is the sum of ROWS_MIN[].width.
567 W_MAX is the sum of ROWS_MAX[].width.
569 The caller must still initialize cp[V]. */
570 static struct render_page *
571 create_page_with_interpolated_widths (const struct render_params *params,
572 struct table *table,
573 const struct render_row *rows_min,
574 const struct render_row *rows_max,
575 int w_min, int w_max, const int *rules)
577 const int n = table->n[H];
578 const long long int avail = params->size[H] - w_min;
579 const long long int wanted = w_max - w_min;
581 assert (wanted > 0);
583 struct render_page *page = render_page_allocate (params, table);
585 int *cph = page->cp[H];
586 *cph = 0;
587 long long int w = wanted / 2;
588 for (int x = 0; x < n; x++)
590 w += avail * (rows_max[x].width - rows_min[x].width);
591 int extra = w / wanted;
592 w -= extra * wanted;
594 cph[1] = cph[0] + rules[x];
595 cph[2] = cph[1] + rows_min[x].width + extra;
596 cph += 2;
598 cph[1] = cph[0] + rules[n];
600 assert (page->cp[H][n * 2 + 1] == params->size[H]);
601 return page;
604 static void
605 set_join_crossings (struct render_page *page, enum table_axis axis,
606 const struct table_cell *cell, int *rules)
608 for (int z = cell->d[axis][0] + 1; z <= cell->d[axis][1] - 1; z++)
609 page->join_crossing[axis][z] = rules[z];
612 /* Maps a contiguous range of cells from a page to the underlying table along
613 the horizpntal or vertical dimension. */
614 struct map
616 int p0; /* First ordinate in the page. */
617 int t0; /* First ordinate in the table. */
618 int n; /* Number of ordinates in page and table. */
621 /* Initializes M to a mapping from PAGE to PAGE->table along axis A. The
622 mapping includes ordinate Z (in PAGE). */
623 static void
624 get_map (const struct render_page *page, enum table_axis a, int z,
625 struct map *m)
627 if (z < page->h[a][0])
629 m->p0 = 0;
630 m->t0 = 0;
631 m->n = page->h[a][0];
633 else if (z < page->n[a] - page->h[a][1])
635 m->p0 = page->h[a][0];
636 m->t0 = page->r[a][0];
637 m->n = page->r[a][1] - page->r[a][0];
639 else
641 m->p0 = page->n[a] - page->h[a][1];
642 m->t0 = page->table->n[a] - page->table->h[a][1];
643 m->n = page->h[a][1];
647 /* Initializes CELL with the contents of the table cell at column X and row Y
648 within PAGE. When CELL is no longer needed, the caller is responsible for
649 freeing it by calling table_cell_free(CELL).
651 The caller must ensure that CELL is destroyed before TABLE is unref'ed.
653 This is equivalent to table_get_cell(), except X and Y are in terms of the
654 page's rows and columns rather than the underlying table's. */
655 static void
656 render_get_cell (const struct render_page *page, int x, int y,
657 struct table_cell *cell)
659 int d[TABLE_N_AXES] = { [H] = x, [V] = y };
660 struct map map[TABLE_N_AXES];
662 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
664 struct map *m = &map[a];
665 get_map (page, a, d[a], m);
666 d[a] += m->t0 - m->p0;
668 table_get_cell (page->table, d[H], d[V], cell);
670 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
672 struct map *m = &map[a];
674 for (int i = 0; i < 2; i++)
675 cell->d[a][i] -= m->t0 - m->p0;
676 cell->d[a][0] = MAX (cell->d[a][0], m->p0);
677 cell->d[a][1] = MIN (cell->d[a][1], m->p0 + m->n);
681 /* Creates and returns a new render_page for rendering TABLE on a device
682 described by PARAMS.
684 The new render_page will be suitable for rendering on a device whose page
685 size is PARAMS->size, but the caller is responsible for actually breaking it
686 up to fit on such a device, using the render_break abstraction. */
687 static struct render_page *
688 render_page_create (const struct render_params *params, struct table *table,
689 int min_width)
691 enum { MIN, MAX };
693 int nc = table->n[H];
694 int nr = table->n[V];
696 /* Figure out rule widths. */
697 int *rules[TABLE_N_AXES];
698 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
700 int n = table->n[axis] + 1;
702 rules[axis] = xnmalloc (n, sizeof *rules);
703 for (int z = 0; z < n; z++)
704 rules[axis][z] = measure_rule (params, table, axis, z);
707 /* Calculate minimum and maximum widths of cells that do not
708 span multiple columns. */
709 struct render_row *columns[2];
710 for (int i = 0; i < 2; i++)
711 columns[i] = xcalloc (nc, sizeof *columns[i]);
712 for (int y = 0; y < nr; y++)
713 for (int x = 0; x < nc;)
715 struct table_cell cell;
717 table_get_cell (table, x, y, &cell);
718 if (y == cell.d[V][0])
720 if (table_cell_colspan (&cell) == 1)
722 int w[2];
723 params->ops->measure_cell_width (params->aux, &cell,
724 &w[MIN], &w[MAX]);
725 for (int i = 0; i < 2; i++)
726 if (columns[i][x].unspanned < w[i])
727 columns[i][x].unspanned = w[i];
730 x = cell.d[H][1];
733 /* Distribute widths of spanned columns. */
734 for (int i = 0; i < 2; i++)
735 for (int x = 0; x < nc; x++)
736 columns[i][x].width = columns[i][x].unspanned;
737 for (int y = 0; y < nr; y++)
738 for (int x = 0; x < nc;)
740 struct table_cell cell;
742 table_get_cell (table, x, y, &cell);
743 if (y == cell.d[V][0] && table_cell_colspan (&cell) > 1)
745 int w[2];
747 params->ops->measure_cell_width (params->aux, &cell,
748 &w[MIN], &w[MAX]);
749 for (int i = 0; i < 2; i++)
750 distribute_spanned_width (w[i], &columns[i][cell.d[H][0]],
751 rules[H], table_cell_colspan (&cell));
753 x = cell.d[H][1];
755 if (min_width > 0)
756 for (int i = 0; i < 2; i++)
757 distribute_spanned_width (min_width, &columns[i][0], rules[H], nc);
759 /* In pathological cases, spans can cause the minimum width of a column to
760 exceed the maximum width. This bollixes our interpolation algorithm
761 later, so fix it up. */
762 for (int i = 0; i < nc; i++)
763 if (columns[MIN][i].width > columns[MAX][i].width)
764 columns[MAX][i].width = columns[MIN][i].width;
766 /* Decide final column widths. */
767 int table_widths[2];
768 for (int i = 0; i < 2; i++)
769 table_widths[i] = calculate_table_width (table->n[H],
770 columns[i], rules[H]);
772 struct render_page *page;
773 if (table_widths[MAX] <= params->size[H])
775 /* Fits even with maximum widths. Use them. */
776 page = create_page_with_exact_widths (params, table, columns[MAX],
777 rules[H]);
779 else if (table_widths[MIN] <= params->size[H])
781 /* Fits with minimum widths, so distribute the leftover space. */
782 page = create_page_with_interpolated_widths (
783 params, table, columns[MIN], columns[MAX],
784 table_widths[MIN], table_widths[MAX], rules[H]);
786 else
788 /* Doesn't fit even with minimum widths. Assign minimums for now, and
789 later we can break it horizontally into multiple pages. */
790 page = create_page_with_exact_widths (params, table, columns[MIN],
791 rules[H]);
794 /* Calculate heights of cells that do not span multiple rows. */
795 struct render_row *rows = XCALLOC (nr, struct render_row);
796 for (int y = 0; y < nr; y++)
797 for (int x = 0; x < nc;)
799 struct render_row *r = &rows[y];
800 struct table_cell cell;
802 render_get_cell (page, x, y, &cell);
803 if (y == cell.d[V][0])
805 if (table_cell_rowspan (&cell) == 1)
807 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
808 int h = params->ops->measure_cell_height (params->aux,
809 &cell, w);
810 if (h > r->unspanned)
811 r->unspanned = r->width = h;
813 else
814 set_join_crossings (page, V, &cell, rules[V]);
816 if (table_cell_colspan (&cell) > 1)
817 set_join_crossings (page, H, &cell, rules[H]);
819 x = cell.d[H][1];
821 for (int i = 0; i < 2; i++)
822 free (columns[i]);
824 /* Distribute heights of spanned rows. */
825 for (int y = 0; y < nr; y++)
826 for (int x = 0; x < nc;)
828 struct table_cell cell;
830 render_get_cell (page, x, y, &cell);
831 if (y == cell.d[V][0] && table_cell_rowspan (&cell) > 1)
833 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
834 int h = params->ops->measure_cell_height (params->aux, &cell, w);
835 distribute_spanned_width (h, &rows[cell.d[V][0]], rules[V],
836 table_cell_rowspan (&cell));
838 x = cell.d[H][1];
841 /* Decide final row heights. */
842 accumulate_row_widths (page, V, rows, rules[V]);
843 free (rows);
845 /* Measure headers. If they are "too big", get rid of them. */
846 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
848 int hw = headers_width (page, axis);
849 if (hw * 2 >= page->params->size[axis]
850 || hw + max_cell_width (page, axis) > page->params->size[axis])
852 page->h[axis][0] = page->h[axis][1] = 0;
853 page->r[axis][0] = 0;
854 page->r[axis][1] = page->n[axis];
858 free (rules[H]);
859 free (rules[V]);
861 return page;
864 /* Increases PAGE's reference count. */
865 struct render_page *
866 render_page_ref (const struct render_page *page_)
868 struct render_page *page = CONST_CAST (struct render_page *, page_);
869 page->ref_cnt++;
870 return page;
873 /* Decreases PAGE's reference count and destroys PAGE if this causes the
874 reference count to fall to zero. */
875 static void
876 render_page_unref (struct render_page *page)
878 if (page != NULL && --page->ref_cnt == 0)
880 struct render_overflow *overflow, *next;
881 HMAP_FOR_EACH_SAFE (overflow, next, struct render_overflow, node,
882 &page->overflows)
883 free (overflow);
884 hmap_destroy (&page->overflows);
886 table_unref (page->table);
888 for (int i = 0; i < TABLE_N_AXES; ++i)
890 free (page->join_crossing[i]);
891 free (page->cp[i]);
894 free (page);
898 /* Returns the size of PAGE along AXIS. (This might be larger than the page
899 size specified in the parameters passed to render_page_create(). Use a
900 render_break to break up a render_page into page-sized chunks.) */
901 static int
902 render_page_get_size (const struct render_page *page, enum table_axis axis)
904 return page->cp[axis][page->n[axis] * 2 + 1];
907 static int
908 render_page_get_best_breakpoint (const struct render_page *page, int height)
910 /* If there's no room for at least the top row and the rules above and below
911 it, don't include any of the table. */
912 if (page->cp[V][3] > height)
913 return 0;
915 /* Otherwise include as many rows and rules as we can. */
916 for (int y = 5; y <= 2 * page->n[V] + 1; y += 2)
917 if (page->cp[V][y] > height)
918 return page->cp[V][y - 2];
919 return height;
922 /* Drawing render_pages. */
924 /* This is like table_get_rule() except:
926 - D is in terms of the page's rows and column rather than the underlying
927 table's.
929 - The result is in the form of a table_stroke. */
930 static enum table_stroke
931 get_rule (const struct render_page *page, enum table_axis axis,
932 const int d_[TABLE_N_AXES], struct cell_color *color)
934 int d[TABLE_N_AXES] = { d_[0] / 2, d_[1] / 2 };
935 int d2 = -1;
937 enum table_axis a = axis;
938 if (d[a] < page->h[a][0])
939 /* Nothing to do */;
940 else if (d[a] <= page->n[a] - page->h[a][1])
942 if (page->h[a][0] && d[a] == page->h[a][0])
943 d2 = page->h[a][0];
944 else if (page->h[a][1] && d[a] == page->n[a] - page->h[a][1])
945 d2 = page->table->n[a] - page->h[a][1];
946 d[a] += page->r[a][0] - page->h[a][0];
948 else
949 d[a] += ((page->table->n[a] - page->table->h[a][1])
950 - (page->n[a] - page->h[a][1]));
952 enum table_axis b = !axis;
953 struct map m;
954 get_map (page, b, d[b], &m);
955 d[b] += m.t0 - m.p0;
957 int r = table_get_rule (page->table, axis, d[H], d[V], color);
958 if (d2 >= 0)
960 d[a] = d2;
961 int r2 = table_get_rule (page->table, axis, d[H], d[V], color);
962 r = table_stroke_combine (r, r2);
964 return r;
967 static bool
968 is_rule (int z)
970 return !(z & 1);
973 bool
974 render_direction_rtl (void)
976 /* TRANSLATORS: Do not translate this string. If the script of your language
977 reads from right to left (eg Persian, Arabic, Hebrew etc), then replace
978 this string with "output-direction-rtl". Otherwise either leave it
979 untranslated or copy it verbatim. */
980 const char *dir = _("output-direction-ltr");
981 if (0 == strcmp ("output-direction-rtl", dir))
982 return true;
984 if (0 != strcmp ("output-direction-ltr", dir))
985 fprintf (stderr, "This localisation has been incorrectly translated. "
986 "Complain to the translator.\n");
988 return false;
991 static void
992 render_rule (const struct render_page *page, const int ofs[TABLE_N_AXES],
993 const int d[TABLE_N_AXES])
995 enum table_stroke styles[TABLE_N_AXES][2];
996 struct cell_color colors[TABLE_N_AXES][2];
998 for (enum table_axis a = 0; a < TABLE_N_AXES; a++)
1000 enum table_axis b = !a;
1002 styles[a][0] = styles[a][1] = TABLE_STROKE_NONE;
1004 if (!is_rule (d[a])
1005 || (page->is_edge_cutoff[a][0] && d[a] == 0)
1006 || (page->is_edge_cutoff[a][1] && d[a] == page->n[a] * 2))
1007 continue;
1009 if (is_rule (d[b]))
1011 if (d[b] > 0)
1013 int e[TABLE_N_AXES];
1014 e[H] = d[H];
1015 e[V] = d[V];
1016 e[b]--;
1017 styles[a][0] = get_rule (page, a, e, &colors[a][0]);
1020 if (d[b] / 2 < page->n[b])
1021 styles[a][1] = get_rule (page, a, d, &colors[a][1]);
1023 else
1025 styles[a][0] = styles[a][1] = get_rule (page, a, d, &colors[a][0]);
1026 colors[a][1] = colors[a][0];
1030 if (styles[H][0] != TABLE_STROKE_NONE || styles[H][1] != TABLE_STROKE_NONE
1031 || styles[V][0] != TABLE_STROKE_NONE || styles[V][1] != TABLE_STROKE_NONE)
1033 int bb[TABLE_N_AXES][2];
1035 bb[H][0] = ofs[H] + page->cp[H][d[H]];
1036 bb[H][1] = ofs[H] + page->cp[H][d[H] + 1];
1037 if (page->params->rtl)
1039 int temp = bb[H][0];
1040 bb[H][0] = render_page_get_size (page, H) - bb[H][1];
1041 bb[H][1] = render_page_get_size (page, H) - temp;
1043 bb[V][0] = ofs[V] + page->cp[V][d[V]];
1044 bb[V][1] = ofs[V] + page->cp[V][d[V] + 1];
1045 page->params->ops->draw_line (page->params->aux, bb, styles, colors);
1049 static void
1050 render_cell (const struct render_page *page, const int ofs[TABLE_N_AXES],
1051 const struct table_cell *cell)
1053 int bb[TABLE_N_AXES][2];
1054 int clip[TABLE_N_AXES][2];
1056 bb[H][0] = clip[H][0] = ofs[H] + page->cp[H][cell->d[H][0] * 2 + 1];
1057 bb[H][1] = clip[H][1] = ofs[H] + page->cp[H][cell->d[H][1] * 2];
1058 if (page->params->rtl)
1060 int temp = bb[H][0];
1061 bb[H][0] = clip[H][0] = render_page_get_size (page, H) - bb[H][1];
1062 bb[H][1] = clip[H][1] = render_page_get_size (page, H) - temp;
1064 bb[V][0] = clip[V][0] = ofs[V] + page->cp[V][cell->d[V][0] * 2 + 1];
1065 bb[V][1] = clip[V][1] = ofs[V] + page->cp[V][cell->d[V][1] * 2];
1067 enum table_valign valign = cell->cell_style->valign;
1068 int valign_offset = 0;
1069 if (valign != TABLE_VALIGN_TOP)
1071 int height = page->params->ops->measure_cell_height (
1072 page->params->aux, cell, bb[H][1] - bb[H][0]);
1073 int extra = bb[V][1] - bb[V][0] - height;
1074 if (extra > 0)
1076 if (valign == TABLE_VALIGN_CENTER)
1077 extra /= 2;
1078 valign_offset += extra;
1082 const struct render_overflow *of = find_overflow (
1083 page, cell->d[H][0], cell->d[V][0]);
1084 if (of)
1085 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
1087 if (of->overflow[axis][0])
1089 bb[axis][0] -= of->overflow[axis][0];
1090 if (cell->d[axis][0] == 0 && !page->is_edge_cutoff[axis][0])
1091 clip[axis][0] = ofs[axis] + page->cp[axis][cell->d[axis][0] * 2];
1093 if (of->overflow[axis][1])
1095 bb[axis][1] += of->overflow[axis][1];
1096 if (cell->d[axis][1] == page->n[axis]
1097 && !page->is_edge_cutoff[axis][1])
1098 clip[axis][1] = ofs[axis] + page->cp[axis][cell->d[axis][1] * 2
1099 + 1];
1103 int spill[TABLE_N_AXES][2];
1104 for (enum table_axis axis = 0; axis < TABLE_N_AXES; axis++)
1106 spill[axis][0] = rule_width (page, axis, cell->d[axis][0]) / 2;
1107 spill[axis][1] = rule_width (page, axis, cell->d[axis][1]) / 2;
1110 int color_idx = (cell->d[V][0] < page->h[V][0]
1111 || page->n[V] - (cell->d[V][0] + 1) < page->h[V][1]
1113 : (cell->d[V][0] - page->h[V][0]) & 1);
1114 page->params->ops->draw_cell (page->params->aux, cell, color_idx,
1115 bb, valign_offset, spill, clip);
1118 /* Draws the cells of PAGE indicated in BB. */
1119 static void
1120 render_page_draw_cells (const struct render_page *page,
1121 int ofs[TABLE_N_AXES], int bb[TABLE_N_AXES][2])
1123 for (int y = bb[V][0]; y < bb[V][1]; y++)
1124 for (int x = bb[H][0]; x < bb[H][1];)
1125 if (!is_rule (x) && !is_rule (y))
1127 struct table_cell cell;
1129 render_get_cell (page, x / 2, y / 2, &cell);
1130 if (y / 2 == bb[V][0] / 2 || y / 2 == cell.d[V][0])
1131 render_cell (page, ofs, &cell);
1132 x = rule_ofs (cell.d[H][1]);
1134 else
1135 x++;
1137 for (int y = bb[V][0]; y < bb[V][1]; y++)
1138 for (int x = bb[H][0]; x < bb[H][1]; x++)
1139 if (is_rule (x) || is_rule (y))
1141 int d[TABLE_N_AXES];
1142 d[H] = x;
1143 d[V] = y;
1144 render_rule (page, ofs, d);
1148 /* Renders PAGE, by calling the 'draw_line' and 'draw_cell' functions from the
1149 render_params provided to render_page_create(). */
1150 static void
1151 render_page_draw (const struct render_page *page, int ofs[TABLE_N_AXES])
1153 int bb[TABLE_N_AXES][2];
1155 bb[H][0] = 0;
1156 bb[H][1] = page->n[H] * 2 + 1;
1157 bb[V][0] = 0;
1158 bb[V][1] = page->n[V] * 2 + 1;
1160 render_page_draw_cells (page, ofs, bb);
1163 /* Returns the greatest value i, 0 <= i < n, such that cp[i] <= x0. */
1164 static int
1165 get_clip_min_extent (int x0, const int cp[], int n)
1167 int low = 0;
1168 int high = n;
1169 int best = 0;
1170 while (low < high)
1172 int middle = low + (high - low) / 2;
1174 if (cp[middle] <= x0)
1176 best = middle;
1177 low = middle + 1;
1179 else
1180 high = middle;
1183 return best;
1186 /* Returns the least value i, 0 <= i < n, such that cp[i] >= x1. */
1187 static int
1188 get_clip_max_extent (int x1, const int cp[], int n)
1190 int low = 0;
1191 int high = n;
1192 int best = n;
1193 while (low < high)
1195 int middle = low + (high - low) / 2;
1197 if (cp[middle] >= x1)
1198 best = high = middle;
1199 else
1200 low = middle + 1;
1203 while (best > 0 && cp[best - 1] == cp[best])
1204 best--;
1206 return best;
1209 /* Renders the cells of PAGE that intersect (X,Y)-(X+W,Y+H), by calling the
1210 'draw_line' and 'draw_cell' functions from the render_params provided to
1211 render_page_create(). */
1212 static void
1213 render_page_draw_region (const struct render_page *page,
1214 int ofs[TABLE_N_AXES], int clip[TABLE_N_AXES][2])
1216 int bb[TABLE_N_AXES][2];
1218 bb[H][0] = get_clip_min_extent (clip[H][0], page->cp[H], page->n[H] * 2 + 1);
1219 bb[H][1] = get_clip_max_extent (clip[H][1], page->cp[H], page->n[H] * 2 + 1);
1220 bb[V][0] = get_clip_min_extent (clip[V][0], page->cp[V], page->n[V] * 2 + 1);
1221 bb[V][1] = get_clip_max_extent (clip[V][1], page->cp[V], page->n[V] * 2 + 1);
1223 render_page_draw_cells (page, ofs, bb);
1226 /* Breaking up tables to fit on a page. */
1228 /* An iterator for breaking render_pages into smaller chunks. */
1229 struct render_break
1231 struct render_page *page; /* Page being broken up. */
1232 enum table_axis axis; /* Axis along which 'page' is being broken. */
1233 int z; /* Next cell along 'axis'. */
1234 int pixel; /* Pixel offset within cell 'z' (usually 0). */
1235 int hw; /* Width of headers of 'page' along 'axis'. */
1238 static int needed_size (const struct render_break *, int cell);
1239 static bool cell_is_breakable (const struct render_break *, int cell);
1240 static struct render_page *render_page_select (const struct render_page *,
1241 enum table_axis,
1242 int z0, int p0,
1243 int z1, int p1);
1245 /* Initializes render_break B for breaking PAGE along AXIS.
1246 Takes ownership of PAGE. */
1247 static void
1248 render_break_init (struct render_break *b, struct render_page *page,
1249 enum table_axis axis)
1251 b->page = page;
1252 b->axis = axis;
1253 b->z = page->h[axis][0];
1254 b->pixel = 0;
1255 b->hw = headers_width (page, axis);
1258 /* Initializes B as a render_break structure for which
1259 render_break_has_next() always returns false. */
1260 static void
1261 render_break_init_empty (struct render_break *b)
1263 b->page = NULL;
1264 b->axis = TABLE_HORZ;
1265 b->z = 0;
1266 b->pixel = 0;
1267 b->hw = 0;
1270 /* Frees B and unrefs the render_page that it owns. */
1271 static void
1272 render_break_destroy (struct render_break *b)
1274 if (b != NULL)
1276 render_page_unref (b->page);
1277 b->page = NULL;
1281 /* Returns true if B still has cells that are yet to be returned,
1282 false if all of B's page has been processed. */
1283 static bool
1284 render_break_has_next (const struct render_break *b)
1286 const struct render_page *page = b->page;
1287 enum table_axis axis = b->axis;
1289 return page != NULL && b->z < page->n[axis] - page->h[axis][1];
1292 /* Returns a new render_page that is up to SIZE pixels wide along B's axis.
1293 Returns a null pointer if B has already been completely broken up, or if
1294 SIZE is too small to reasonably render any cells. The latter will never
1295 happen if SIZE is at least as large as the page size passed to
1296 render_page_create() along B's axis. */
1297 static struct render_page *
1298 render_break_next (struct render_break *b, int size)
1300 const struct render_page *page = b->page;
1301 enum table_axis axis = b->axis;
1302 struct render_page *subpage;
1304 if (!render_break_has_next (b))
1305 return NULL;
1307 int pixel = 0;
1308 int z;
1309 for (z = b->z; z < page->n[axis] - page->h[axis][1]; z++)
1311 int needed = needed_size (b, z + 1);
1312 if (needed > size)
1314 if (cell_is_breakable (b, z))
1316 /* If there is no right header and we render a partial cell on
1317 the right side of the body, then we omit the rightmost rule of
1318 the body. Otherwise the rendering is deceptive because it
1319 looks like the whole cell is present instead of a partial
1320 cell.
1322 This is similar to code for the left side in needed_size(). */
1323 int rule_allowance = (page->h[axis][1]
1325 : rule_width (page, axis, z));
1327 /* The amount that, if we added cell 'z', the rendering would
1328 overfill the allocated 'size'. */
1329 int overhang = needed - size - rule_allowance;
1331 /* The width of cell 'z'. */
1332 int cell_size = cell_width (page, axis, z);
1334 /* The amount trimmed off the left side of 'z',
1335 and the amount left to render. */
1336 int cell_ofs = z == b->z ? b->pixel : 0;
1337 int cell_left = cell_size - cell_ofs;
1339 /* A small but visible width. */
1340 int em = page->params->font_size[axis];
1342 /* If some of the cell remains to render,
1343 and there would still be some of the cell left afterward,
1344 then partially render that much of the cell. */
1345 pixel = (cell_left && cell_left > overhang
1346 ? cell_left - overhang + cell_ofs
1347 : 0);
1349 /* If there would be only a tiny amount of the cell left after
1350 rendering it partially, reduce the amount rendered slightly
1351 to make the output look a little better. */
1352 if (pixel + em > cell_size)
1353 pixel = MAX (pixel - em, 0);
1355 /* If we're breaking vertically, then consider whether the cells
1356 being broken have a better internal breakpoint than the exact
1357 number of pixels available, which might look bad e.g. because
1358 it breaks in the middle of a line of text. */
1359 if (axis == TABLE_VERT && page->params->ops->adjust_break)
1360 for (int x = 0; x < page->n[H];)
1362 struct table_cell cell;
1364 render_get_cell (page, x, z, &cell);
1365 int w = joined_width (page, H, cell.d[H][0], cell.d[H][1]);
1366 int better_pixel = page->params->ops->adjust_break (
1367 page->params->aux, &cell, w, pixel);
1368 x = cell.d[H][1];
1370 if (better_pixel < pixel)
1372 if (better_pixel > (z == b->z ? b->pixel : 0))
1374 pixel = better_pixel;
1375 break;
1377 else if (better_pixel == 0 && z != b->z)
1379 pixel = 0;
1380 break;
1385 break;
1389 if (z == b->z && !pixel)
1390 return NULL;
1392 subpage = render_page_select (page, axis, b->z, b->pixel,
1393 pixel ? z + 1 : z,
1394 pixel ? cell_width (page, axis, z) - pixel
1395 : 0);
1396 b->z = z;
1397 b->pixel = pixel;
1398 return subpage;
1401 /* Returns the width that would be required along B's axis to render a page
1402 from B's current position up to but not including CELL. */
1403 static int
1404 needed_size (const struct render_break *b, int cell)
1406 const struct render_page *page = b->page;
1407 enum table_axis axis = b->axis;
1409 /* Width of left header not including its rightmost rule. */
1410 int size = axis_width (page, axis, 0, rule_ofs (page->h[axis][0]));
1412 /* If we have a pixel offset and there is no left header, then we omit the
1413 leftmost rule of the body. Otherwise the rendering is deceptive because
1414 it looks like the whole cell is present instead of a partial cell.
1416 Otherwise (if there are headers) we will be merging two rules: the
1417 rightmost rule in the header and the leftmost rule in the body. We assume
1418 that the width of a merged rule is the larger of the widths of either rule
1419 invidiually. */
1420 if (b->pixel == 0 || page->h[axis][0])
1421 size += MAX (rule_width (page, axis, page->h[axis][0]),
1422 rule_width (page, axis, b->z));
1424 /* Width of body, minus any pixel offset in the leftmost cell. */
1425 size += joined_width (page, axis, b->z, cell) - b->pixel;
1427 /* Width of rightmost rule in body merged with leftmost rule in headers. */
1428 size += MAX (rule_width_r (page, axis, page->h[axis][1]),
1429 rule_width (page, axis, cell));
1431 /* Width of right header not including its leftmost rule. */
1432 size += axis_width (page, axis, rule_ofs_r (page, axis, page->h[axis][1]),
1433 rule_ofs_r (page, axis, 0));
1435 /* Join crossing. */
1436 if (page->h[axis][0] && page->h[axis][1])
1437 size += page->join_crossing[axis][b->z];
1439 return size;
1442 /* Returns true if CELL along B's axis may be broken across a page boundary.
1444 This is just a heuristic. Breaking cells across page boundaries can save
1445 space, but it looks ugly. */
1446 static bool
1447 cell_is_breakable (const struct render_break *b, int cell)
1449 const struct render_page *page = b->page;
1450 enum table_axis axis = b->axis;
1452 return cell_width (page, axis, cell) >= page->params->min_break[axis];
1455 /* render_pager. */
1457 struct render_pager
1459 const struct render_params *params;
1460 double scale;
1462 /* An array of "render_page"s to be rendered, in order, vertically. There
1463 may be up to 5 pages, for the pivot table's title, layers, body,
1464 captions, and footnotes. */
1465 struct render_page *pages[5];
1466 size_t n_pages;
1468 size_t cur_page;
1469 struct render_break x_break;
1470 struct render_break y_break;
1473 static void
1474 render_pager_add_table (struct render_pager *p, struct table *table,
1475 int min_width)
1477 if (table)
1478 p->pages[p->n_pages++] = render_page_create (p->params, table, min_width);
1481 static void
1482 render_pager_start_page (struct render_pager *p)
1484 render_break_init (&p->x_break, render_page_ref (p->pages[p->cur_page++]),
1486 render_break_init_empty (&p->y_break);
1489 /* Creates and returns a new render_pager for rendering PT on the device
1490 with the given PARAMS. */
1491 struct render_pager *
1492 render_pager_create (const struct render_params *params,
1493 const struct pivot_table *pt,
1494 const size_t *layer_indexes)
1496 if (!layer_indexes)
1497 layer_indexes = pt->current_layer;
1499 struct table *title, *layers, *body, *caption, *footnotes;
1500 pivot_output (pt, layer_indexes, params->printing,
1501 &title, &layers, &body, &caption, &footnotes, NULL, NULL);
1503 /* Figure out the width of the body of the table. Use this to determine the
1504 base scale. */
1505 struct render_page *body_page = render_page_create (params, body, 0);
1506 int body_width = table_width (body_page, H);
1507 double scale = 1.0;
1508 if (body_width > params->size[H])
1510 if (pt->look->shrink_to_fit[H] && params->ops->scale)
1511 scale = params->size[H] / (double) body_width;
1512 else
1514 struct render_break b;
1515 render_break_init (&b, render_page_ref (body_page), H);
1516 struct render_page *subpage
1517 = render_break_next (&b, params->size[H]);
1518 body_width = subpage ? subpage->cp[H][2 * subpage->n[H] + 1] : 0;
1519 render_page_unref (subpage);
1520 render_break_destroy (&b);
1524 /* Create the pager. */
1525 struct render_pager *p = xmalloc (sizeof *p);
1526 *p = (struct render_pager) { .params = params, .scale = scale };
1527 render_pager_add_table (p, title, body_width);
1528 render_pager_add_table (p, layers, body_width);
1529 p->pages[p->n_pages++] = body_page;
1530 render_pager_add_table (p, caption, 0);
1531 render_pager_add_table (p, footnotes, 0);
1532 assert (p->n_pages <= sizeof p->pages / sizeof *p->pages);
1534 /* If we're shrinking tables to fit the page length, then adjust the scale
1535 factor.
1537 XXX This will sometimes shrink more than needed, because adjusting the
1538 scale factor allows for cells to be "wider", which means that sometimes
1539 they won't break across as much vertical space, thus shrinking the table
1540 vertically more than the scale would imply. Shrinking only as much as
1541 necessary would require an iterative search. */
1542 if (pt->look->shrink_to_fit[V] && params->ops->scale)
1544 int total_height = 0;
1545 for (size_t i = 0; i < p->n_pages; i++)
1546 total_height += table_width (p->pages[i], V);
1547 if (total_height * p->scale >= params->size[V])
1548 p->scale *= params->size[V] / (double) total_height;
1551 render_pager_start_page (p);
1553 return p;
1556 /* Destroys P. */
1557 void
1558 render_pager_destroy (struct render_pager *p)
1560 if (p)
1562 render_break_destroy (&p->x_break);
1563 render_break_destroy (&p->y_break);
1564 for (size_t i = 0; i < p->n_pages; i++)
1565 render_page_unref (p->pages[i]);
1566 free (p);
1570 /* Returns true if P has content remaining to render, false if rendering is
1571 done. */
1572 bool
1573 render_pager_has_next (const struct render_pager *p_)
1575 struct render_pager *p = CONST_CAST (struct render_pager *, p_);
1577 while (!render_break_has_next (&p->y_break))
1579 render_break_destroy (&p->y_break);
1580 if (!render_break_has_next (&p->x_break))
1582 render_break_destroy (&p->x_break);
1583 if (p->cur_page >= p->n_pages)
1585 render_break_init_empty (&p->x_break);
1586 render_break_init_empty (&p->y_break);
1587 return false;
1589 render_pager_start_page (p);
1591 else
1592 render_break_init (
1593 &p->y_break, render_break_next (&p->x_break,
1594 p->params->size[H] / p->scale), V);
1596 return true;
1599 /* Draws a chunk of content from P to fit in a space that has vertical size
1600 SPACE and the horizontal size specified in the render_params passed to
1601 render_page_create(). Returns the amount of space actually used by the
1602 rendered chunk, which will be 0 if SPACE is too small to render anything or
1603 if no content remains (use render_pager_has_next() to distinguish these
1604 cases). */
1606 render_pager_draw_next (struct render_pager *p, int space)
1608 if (p->scale != 1.0)
1610 p->params->ops->scale (p->params->aux, p->scale);
1611 space /= p->scale;
1614 int ofs[TABLE_N_AXES] = { 0, 0 };
1615 size_t start_page = SIZE_MAX;
1617 while (render_pager_has_next (p))
1619 if (start_page == p->cur_page)
1620 break;
1621 start_page = p->cur_page;
1623 struct render_page *page
1624 = render_break_next (&p->y_break, space - ofs[V]);
1625 if (!page)
1626 break;
1628 render_page_draw (page, ofs);
1629 ofs[V] += render_page_get_size (page, V);
1630 render_page_unref (page);
1633 if (p->scale != 1.0)
1634 ofs[V] *= p->scale;
1636 return ofs[V];
1639 /* Draws all of P's content. */
1640 void
1641 render_pager_draw (const struct render_pager *p)
1643 render_pager_draw_region (p, 0, 0, INT_MAX, INT_MAX);
1646 /* Draws the region of P's content that lies in the region (X,Y)-(X+W,Y+H).
1647 Some extra content might be drawn; the device should perform clipping as
1648 necessary. */
1649 void
1650 render_pager_draw_region (const struct render_pager *p,
1651 int x, int y, int w, int h)
1653 int ofs[TABLE_N_AXES] = { 0, 0 };
1654 int clip[TABLE_N_AXES][2];
1656 clip[H][0] = x;
1657 clip[H][1] = x + w;
1658 for (size_t i = 0; i < p->n_pages; i++)
1660 const struct render_page *page = p->pages[i];
1661 int size = render_page_get_size (page, V);
1663 clip[V][0] = MAX (y, ofs[V]) - ofs[V];
1664 clip[V][1] = MIN (y + h, ofs[V] + size) - ofs[V];
1665 if (clip[V][1] > clip[V][0])
1666 render_page_draw_region (page, ofs, clip);
1668 ofs[V] += size;
1672 /* Returns the size of P's content along AXIS; i.e. the content's width if AXIS
1673 is TABLE_HORZ and its length if AXIS is TABLE_VERT. */
1675 render_pager_get_size (const struct render_pager *p, enum table_axis axis)
1677 int size = 0;
1679 for (size_t i = 0; i < p->n_pages; i++)
1681 int subsize = render_page_get_size (p->pages[i], axis);
1682 size = axis == H ? MAX (size, subsize) : size + subsize;
1685 return size;
1689 render_pager_get_best_breakpoint (const struct render_pager *p, int height)
1691 int y = 0;
1692 size_t i;
1694 for (i = 0; i < p->n_pages; i++)
1696 int size = render_page_get_size (p->pages[i], V);
1697 if (y + size >= height)
1698 return render_page_get_best_breakpoint (p->pages[i], height - y) + y;
1699 y += size;
1702 return height;
1705 /* render_page_select() and helpers. */
1707 struct render_page_selection
1709 const struct render_page *page; /* Page whose slice we are selecting. */
1710 struct render_page *subpage; /* New page under construction. */
1711 enum table_axis a; /* Axis of 'page' along which 'subpage' is a slice. */
1712 enum table_axis b; /* The opposite of 'a'. */
1713 int z0; /* First cell along 'a' being selected. */
1714 int z1; /* Last cell being selected, plus 1. */
1715 int p0; /* Number of pixels to trim off left side of z0. */
1716 int p1; /* Number of pixels to trim off right side of z1-1. */
1719 static void cell_to_subpage (struct render_page_selection *,
1720 const struct table_cell *,
1721 int subcell[TABLE_N_AXES]);
1722 static const struct render_overflow *find_overflow_for_cell (
1723 struct render_page_selection *, const struct table_cell *);
1724 static struct render_overflow *insert_overflow (struct render_page_selection *,
1725 const struct table_cell *);
1727 /* Creates and returns a new render_page whose contents are a subregion of
1728 PAGE's contents. The new render_page includes cells Z0 through Z1
1729 (exclusive) along AXIS, plus any headers on AXIS.
1731 If P0 is nonzero, then it is a number of pixels to exclude from the left or
1732 top (according to AXIS) of cell Z0. Similarly, P1 is a number of pixels to
1733 exclude from the right or bottom of cell Z1 - 1. (P0 and P1 are used to
1734 render cells that are too large to fit on a single page.)
1736 The whole of axis !AXIS is included. (The caller may follow up with another
1737 call to render_page_select() to select on !AXIS to select on that axis as
1738 well.)
1740 The caller retains ownership of PAGE, which is not modified. */
1741 static struct render_page *
1742 render_page_select (const struct render_page *page, enum table_axis axis,
1743 int z0, int p0, int z1, int p1)
1745 enum table_axis a = axis;
1746 enum table_axis b = !a;
1748 /* Optimize case where all of PAGE is selected by just incrementing the
1749 reference count. */
1750 if (z0 == page->h[a][0] && p0 == 0
1751 && z1 == page->n[a] - page->h[a][1] && p1 == 0)
1753 struct render_page *page_rw = CONST_CAST (struct render_page *, page);
1754 page_rw->ref_cnt++;
1755 return page_rw;
1758 /* Allocate subpage. */
1759 int trim[2] = { z0 - page->h[a][0], (page->n[a] - page->h[a][1]) - z1 };
1760 int n[TABLE_N_AXES] = { [H] = page->n[H], [V] = page->n[V] };
1761 n[a] -= trim[0] + trim[1];
1762 struct render_page *subpage = render_page_allocate__ (
1763 page->params, table_ref (page->table), n);
1764 for (enum table_axis k = 0; k < TABLE_N_AXES; k++)
1766 subpage->h[k][0] = page->h[k][0];
1767 subpage->h[k][1] = page->h[k][1];
1768 subpage->r[k][0] = page->r[k][0];
1769 subpage->r[k][1] = page->r[k][1];
1771 subpage->r[a][0] += trim[0];
1772 subpage->r[a][1] -= trim[1];
1774 /* An edge is cut off if it was cut off in PAGE or if we're trimming pixels
1775 off that side of the page and there are no headers. */
1776 subpage->is_edge_cutoff[a][0] =
1777 subpage->h[a][0] == 0 && (p0 || (z0 == 0 && page->is_edge_cutoff[a][0]));
1778 subpage->is_edge_cutoff[a][1] =
1779 subpage->h[a][1] == 0 && (p1 || (z1 == page->n[a]
1780 && page->is_edge_cutoff[a][1]));
1781 subpage->is_edge_cutoff[b][0] = page->is_edge_cutoff[b][0];
1782 subpage->is_edge_cutoff[b][1] = page->is_edge_cutoff[b][1];
1784 /* Select join crossings from PAGE into subpage. */
1785 int *jc = subpage->join_crossing[a];
1786 for (int z = 0; z < page->h[a][0]; z++)
1787 *jc++ = page->join_crossing[a][z];
1788 for (int z = z0; z <= z1; z++)
1789 *jc++ = page->join_crossing[a][z];
1790 for (int z = page->n[a] - page->h[a][1]; z < page->n[a]; z++)
1791 *jc++ = page->join_crossing[a][z];
1792 assert (jc == &subpage->join_crossing[a][subpage->n[a] + 1]);
1794 memcpy (subpage->join_crossing[b], page->join_crossing[b],
1795 (subpage->n[b] + 1) * sizeof **subpage->join_crossing);
1797 /* Select widths from PAGE into subpage. */
1798 int *scp = page->cp[a];
1799 int *dcp = subpage->cp[a];
1800 *dcp = 0;
1801 for (int z = 0; z <= rule_ofs (subpage->h[a][0]); z++, dcp++)
1803 int w = !z && subpage->is_edge_cutoff[a][0] ? 0 : scp[z + 1] - scp[z];
1804 dcp[1] = dcp[0] + w;
1806 for (int z = cell_ofs (z0); z <= cell_ofs (z1 - 1); z++, dcp++)
1808 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1809 if (z == cell_ofs (z0))
1811 dcp[1] -= p0;
1812 if (page->h[a][0] && page->h[a][1])
1813 dcp[1] += page->join_crossing[a][z / 2];
1815 if (z == cell_ofs (z1 - 1))
1816 dcp[1] -= p1;
1818 for (int z = rule_ofs_r (page, a, subpage->h[a][1]);
1819 z <= rule_ofs_r (page, a, 0); z++, dcp++)
1821 if (z == rule_ofs_r (page, a, 0) && subpage->is_edge_cutoff[a][1])
1822 dcp[1] = dcp[0];
1823 else
1824 dcp[1] = dcp[0] + (scp[z + 1] - scp[z]);
1826 assert (dcp == &subpage->cp[a][2 * subpage->n[a] + 1]);
1828 for (int z = 0; z < page->n[b] * 2 + 2; z++)
1829 subpage->cp[b][z] = page->cp[b][z];
1831 /* Add new overflows. */
1832 struct render_page_selection s = {
1833 .page = page,
1834 .a = a,
1835 .b = b,
1836 .z0 = z0,
1837 .z1 = z1,
1838 .p0 = p0,
1839 .p1 = p1,
1840 .subpage = subpage,
1843 if (!page->h[a][0] || z0 > page->h[a][0] || p0)
1844 for (int z = 0; z < page->n[b];)
1846 int d[TABLE_N_AXES];
1847 d[a] = z0;
1848 d[b] = z;
1850 struct table_cell cell;
1851 render_get_cell (page, d[H], d[V], &cell);
1852 bool overflow0 = p0 || cell.d[a][0] < z0;
1853 bool overflow1 = cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1);
1854 if (overflow0 || overflow1)
1856 struct render_overflow *ro = insert_overflow (&s, &cell);
1858 if (overflow0)
1860 ro->overflow[a][0] += p0 + axis_width (
1861 page, a, cell_ofs (cell.d[a][0]), cell_ofs (z0));
1862 if (page->h[a][0] && page->h[a][1])
1863 ro->overflow[a][0] -= page->join_crossing[a][cell.d[a][0]
1864 + 1];
1867 if (overflow1)
1869 ro->overflow[a][1] += p1 + axis_width (
1870 page, a, cell_ofs (z1), cell_ofs (cell.d[a][1]));
1871 if (page->h[a][0] && page->h[a][1])
1872 ro->overflow[a][1] -= page->join_crossing[a][cell.d[a][1]];
1875 z = cell.d[b][1];
1878 if (!page->h[a][1] || z1 < page->n[a] - page->h[a][1] || p1)
1879 for (int z = 0; z < page->n[b];)
1881 int d[TABLE_N_AXES];
1882 d[a] = z1 - 1;
1883 d[b] = z;
1885 struct table_cell cell;
1886 render_get_cell (page, d[H], d[V], &cell);
1887 if ((cell.d[a][1] > z1 || (cell.d[a][1] == z1 && p1))
1888 && find_overflow_for_cell (&s, &cell) == NULL)
1890 struct render_overflow *ro = insert_overflow (&s, &cell);
1891 ro->overflow[a][1] += p1 + axis_width (page, a, cell_ofs (z1),
1892 cell_ofs (cell.d[a][1]));
1894 z = cell.d[b][1];
1897 /* Copy overflows from PAGE into subpage. */
1898 struct render_overflow *ro;
1899 HMAP_FOR_EACH (ro, struct render_overflow, node, &page->overflows)
1901 struct table_cell cell;
1903 table_get_cell (page->table, ro->d[H], ro->d[V], &cell);
1904 if (cell.d[a][1] > z0 && cell.d[a][0] < z1
1905 && find_overflow_for_cell (&s, &cell) == NULL)
1906 insert_overflow (&s, &cell);
1909 return subpage;
1912 /* Given CELL, a table_cell within S->page, stores in SUBCELL the (x,y)
1913 coordinates of the top-left cell as it will appear in S->subpage.
1915 CELL must actually intersect the region of S->page that is being selected
1916 by render_page_select() or the results will not make any sense. */
1917 static void
1918 cell_to_subpage (struct render_page_selection *s,
1919 const struct table_cell *cell, int subcell[TABLE_N_AXES])
1921 enum table_axis a = s->a;
1922 enum table_axis b = s->b;
1923 int ha0 = s->subpage->h[a][0];
1925 subcell[a] = MAX (cell->d[a][0] - s->z0 + ha0, ha0);
1926 subcell[b] = cell->d[b][0];
1929 /* Given CELL, a table_cell within S->page, returns the render_overflow for
1930 that cell in S->subpage, if there is one, and a null pointer otherwise.
1932 CELL must actually intersect the region of S->page that is being selected
1933 by render_page_select() or the results will not make any sense. */
1934 static const struct render_overflow *
1935 find_overflow_for_cell (struct render_page_selection *s,
1936 const struct table_cell *cell)
1938 int subcell[2];
1940 cell_to_subpage (s, cell, subcell);
1941 return find_overflow (s->subpage, subcell[H], subcell[V]);
1944 /* Given CELL, a table_cell within S->page, inserts a render_overflow for that
1945 cell in S->subpage (which must not already exist). Initializes the new
1946 render_overflow's 'overflow' member from the overflow for CELL in S->page,
1947 if there is one.
1949 CELL must actually intersect the region of S->page that is being selected
1950 by render_page_select() or the results will not make any sense. */
1951 static struct render_overflow *
1952 insert_overflow (struct render_page_selection *s,
1953 const struct table_cell *cell)
1955 struct render_overflow *of = XZALLOC (struct render_overflow);
1956 cell_to_subpage (s, cell, of->d);
1957 hmap_insert (&s->subpage->overflows, &of->node,
1958 hash_cell (of->d[H], of->d[V]));
1960 const struct render_overflow *old
1961 = find_overflow (s->page, cell->d[H][0], cell->d[V][0]);
1962 if (old != NULL)
1963 memcpy (of->overflow, old->overflow, sizeof of->overflow);
1965 return of;