2 * Copyright 2008-2009 Katholieke Universiteit Leuven
4 * Use of this software is governed by the GNU LGPLv2.1 license
6 * Written by Sven Verdoolaege, K.U.Leuven, Departement
7 * Computerwetenschappen, Celestijnenlaan 200A, B-3001 Leuven, Belgium
13 #include "isl_map_private.h"
15 struct isl_mat
*isl_mat_alloc(struct isl_ctx
*ctx
,
16 unsigned n_row
, unsigned n_col
)
21 mat
= isl_alloc_type(ctx
, struct isl_mat
);
26 mat
->block
= isl_blk_alloc(ctx
, n_row
* n_col
);
27 if (isl_blk_is_error(mat
->block
))
29 mat
->row
= isl_alloc_array(ctx
, isl_int
*, n_row
);
33 for (i
= 0; i
< n_row
; ++i
)
34 mat
->row
[i
] = mat
->block
.data
+ i
* n_col
;
46 isl_blk_free(ctx
, mat
->block
);
51 struct isl_mat
*isl_mat_extend(struct isl_mat
*mat
,
52 unsigned n_row
, unsigned n_col
)
60 if (mat
->max_col
>= n_col
&& mat
->n_row
>= n_row
) {
61 if (mat
->n_col
< n_col
)
66 if (mat
->max_col
< n_col
) {
67 struct isl_mat
*new_mat
;
69 if (n_row
< mat
->n_row
)
71 new_mat
= isl_mat_alloc(mat
->ctx
, n_row
, n_col
);
74 for (i
= 0; i
< mat
->n_row
; ++i
)
75 isl_seq_cpy(new_mat
->row
[i
], mat
->row
[i
], mat
->n_col
);
80 mat
= isl_mat_cow(mat
);
84 assert(mat
->ref
== 1);
85 old
= mat
->block
.data
;
86 mat
->block
= isl_blk_extend(mat
->ctx
, mat
->block
, n_row
* mat
->max_col
);
87 if (isl_blk_is_error(mat
->block
))
89 mat
->row
= isl_realloc_array(mat
->ctx
, mat
->row
, isl_int
*, n_row
);
93 for (i
= 0; i
< mat
->n_row
; ++i
)
94 mat
->row
[i
] = mat
->block
.data
+ (mat
->row
[i
] - old
);
95 for (i
= mat
->n_row
; i
< n_row
; ++i
)
96 mat
->row
[i
] = mat
->block
.data
+ i
* mat
->max_col
;
98 if (mat
->n_col
< n_col
)
107 struct isl_mat
*isl_mat_sub_alloc(struct isl_ctx
*ctx
, isl_int
**row
,
108 unsigned first_row
, unsigned n_row
, unsigned first_col
, unsigned n_col
)
113 mat
= isl_alloc_type(ctx
, struct isl_mat
);
116 mat
->row
= isl_alloc_array(ctx
, isl_int
*, n_row
);
119 for (i
= 0; i
< n_row
; ++i
)
120 mat
->row
[i
] = row
[first_row
+i
] + first_col
;
126 mat
->block
= isl_blk_empty();
127 mat
->flags
= ISL_MAT_BORROWED
;
134 void isl_mat_sub_copy(struct isl_ctx
*ctx
, isl_int
**dst
, isl_int
**src
,
135 unsigned n_row
, unsigned dst_col
, unsigned src_col
, unsigned n_col
)
139 for (i
= 0; i
< n_row
; ++i
)
140 isl_seq_cpy(dst
[i
]+dst_col
, src
[i
]+src_col
, n_col
);
143 void isl_mat_sub_neg(struct isl_ctx
*ctx
, isl_int
**dst
, isl_int
**src
,
144 unsigned n_row
, unsigned dst_col
, unsigned src_col
, unsigned n_col
)
148 for (i
= 0; i
< n_row
; ++i
)
149 isl_seq_neg(dst
[i
]+dst_col
, src
[i
]+src_col
, n_col
);
152 struct isl_mat
*isl_mat_copy(struct isl_mat
*mat
)
161 struct isl_mat
*isl_mat_dup(struct isl_mat
*mat
)
164 struct isl_mat
*mat2
;
168 mat2
= isl_mat_alloc(mat
->ctx
, mat
->n_row
, mat
->n_col
);
171 for (i
= 0; i
< mat
->n_row
; ++i
)
172 isl_seq_cpy(mat2
->row
[i
], mat
->row
[i
], mat
->n_col
);
176 struct isl_mat
*isl_mat_cow(struct isl_mat
*mat
)
178 struct isl_mat
*mat2
;
182 if (mat
->ref
== 1 && !ISL_F_ISSET(mat
, ISL_MAT_BORROWED
))
185 mat2
= isl_mat_dup(mat
);
190 void isl_mat_free(struct isl_mat
*mat
)
198 if (!ISL_F_ISSET(mat
, ISL_MAT_BORROWED
))
199 isl_blk_free(mat
->ctx
, mat
->block
);
200 isl_ctx_deref(mat
->ctx
);
205 struct isl_mat
*isl_mat_identity(struct isl_ctx
*ctx
, unsigned n_row
)
210 mat
= isl_mat_alloc(ctx
, n_row
, n_row
);
213 for (i
= 0; i
< n_row
; ++i
) {
214 isl_seq_clr(mat
->row
[i
], i
);
215 isl_int_set_si(mat
->row
[i
][i
], 1);
216 isl_seq_clr(mat
->row
[i
]+i
+1, n_row
-(i
+1));
222 struct isl_vec
*isl_mat_vec_product(struct isl_mat
*mat
, struct isl_vec
*vec
)
225 struct isl_vec
*prod
;
230 isl_assert(mat
->ctx
, mat
->n_col
== vec
->size
, goto error
);
232 prod
= isl_vec_alloc(mat
->ctx
, mat
->n_row
);
236 for (i
= 0; i
< prod
->size
; ++i
)
237 isl_seq_inner_product(mat
->row
[i
], vec
->el
, vec
->size
,
238 &prod
->block
.data
[i
]);
248 __isl_give isl_vec
*isl_mat_vec_inverse_product(__isl_take isl_mat
*mat
,
249 __isl_take isl_vec
*vec
)
251 struct isl_mat
*vec_mat
;
256 vec_mat
= isl_mat_alloc(vec
->ctx
, vec
->size
, 1);
259 for (i
= 0; i
< vec
->size
; ++i
)
260 isl_int_set(vec_mat
->row
[i
][0], vec
->el
[i
]);
261 vec_mat
= isl_mat_inverse_product(mat
, vec_mat
);
265 vec
= isl_vec_alloc(vec_mat
->ctx
, vec_mat
->n_row
);
267 for (i
= 0; i
< vec
->size
; ++i
)
268 isl_int_set(vec
->el
[i
], vec_mat
->row
[i
][0]);
269 isl_mat_free(vec_mat
);
277 struct isl_vec
*isl_vec_mat_product(struct isl_vec
*vec
, struct isl_mat
*mat
)
280 struct isl_vec
*prod
;
285 isl_assert(mat
->ctx
, mat
->n_row
== vec
->size
, goto error
);
287 prod
= isl_vec_alloc(mat
->ctx
, mat
->n_col
);
291 for (i
= 0; i
< prod
->size
; ++i
) {
292 isl_int_set_si(prod
->el
[i
], 0);
293 for (j
= 0; j
< vec
->size
; ++j
)
294 isl_int_addmul(prod
->el
[i
], vec
->el
[j
], mat
->row
[j
][i
]);
305 struct isl_mat
*isl_mat_aff_direct_sum(struct isl_mat
*left
,
306 struct isl_mat
*right
)
314 isl_assert(left
->ctx
, left
->n_row
== right
->n_row
, goto error
);
315 isl_assert(left
->ctx
, left
->n_row
>= 1, goto error
);
316 isl_assert(left
->ctx
, left
->n_col
>= 1, goto error
);
317 isl_assert(left
->ctx
, right
->n_col
>= 1, goto error
);
318 isl_assert(left
->ctx
,
319 isl_seq_first_non_zero(left
->row
[0]+1, left
->n_col
-1) == -1,
321 isl_assert(left
->ctx
,
322 isl_seq_first_non_zero(right
->row
[0]+1, right
->n_col
-1) == -1,
325 sum
= isl_mat_alloc(left
->ctx
, left
->n_row
, left
->n_col
+ right
->n_col
- 1);
328 isl_int_lcm(sum
->row
[0][0], left
->row
[0][0], right
->row
[0][0]);
329 isl_int_divexact(left
->row
[0][0], sum
->row
[0][0], left
->row
[0][0]);
330 isl_int_divexact(right
->row
[0][0], sum
->row
[0][0], right
->row
[0][0]);
332 isl_seq_clr(sum
->row
[0]+1, sum
->n_col
-1);
333 for (i
= 1; i
< sum
->n_row
; ++i
) {
334 isl_int_mul(sum
->row
[i
][0], left
->row
[0][0], left
->row
[i
][0]);
335 isl_int_addmul(sum
->row
[i
][0],
336 right
->row
[0][0], right
->row
[i
][0]);
337 isl_seq_scale(sum
->row
[i
]+1, left
->row
[i
]+1, left
->row
[0][0],
339 isl_seq_scale(sum
->row
[i
]+left
->n_col
,
340 right
->row
[i
]+1, right
->row
[0][0],
344 isl_int_divexact(left
->row
[0][0], sum
->row
[0][0], left
->row
[0][0]);
345 isl_int_divexact(right
->row
[0][0], sum
->row
[0][0], right
->row
[0][0]);
355 static void exchange(struct isl_mat
*M
, struct isl_mat
**U
,
356 struct isl_mat
**Q
, unsigned row
, unsigned i
, unsigned j
)
359 for (r
= row
; r
< M
->n_row
; ++r
)
360 isl_int_swap(M
->row
[r
][i
], M
->row
[r
][j
]);
362 for (r
= 0; r
< (*U
)->n_row
; ++r
)
363 isl_int_swap((*U
)->row
[r
][i
], (*U
)->row
[r
][j
]);
366 isl_mat_swap_rows(*Q
, i
, j
);
369 static void subtract(struct isl_mat
*M
, struct isl_mat
**U
,
370 struct isl_mat
**Q
, unsigned row
, unsigned i
, unsigned j
, isl_int m
)
373 for (r
= row
; r
< M
->n_row
; ++r
)
374 isl_int_submul(M
->row
[r
][j
], m
, M
->row
[r
][i
]);
376 for (r
= 0; r
< (*U
)->n_row
; ++r
)
377 isl_int_submul((*U
)->row
[r
][j
], m
, (*U
)->row
[r
][i
]);
380 for (r
= 0; r
< (*Q
)->n_col
; ++r
)
381 isl_int_addmul((*Q
)->row
[i
][r
], m
, (*Q
)->row
[j
][r
]);
385 static void oppose(struct isl_mat
*M
, struct isl_mat
**U
,
386 struct isl_mat
**Q
, unsigned row
, unsigned col
)
389 for (r
= row
; r
< M
->n_row
; ++r
)
390 isl_int_neg(M
->row
[r
][col
], M
->row
[r
][col
]);
392 for (r
= 0; r
< (*U
)->n_row
; ++r
)
393 isl_int_neg((*U
)->row
[r
][col
], (*U
)->row
[r
][col
]);
396 isl_seq_neg((*Q
)->row
[col
], (*Q
)->row
[col
], (*Q
)->n_col
);
399 /* Given matrix M, compute
404 * with U and Q unimodular matrices and H a matrix in column echelon form
405 * such that on each echelon row the entries in the non-echelon column
406 * are non-negative (if neg == 0) or non-positive (if neg == 1)
407 * and stricly smaller (in absolute value) than the entries in the echelon
409 * If U or Q are NULL, then these matrices are not computed.
411 struct isl_mat
*isl_mat_left_hermite(struct isl_mat
*M
, int neg
,
412 struct isl_mat
**U
, struct isl_mat
**Q
)
427 *U
= isl_mat_identity(M
->ctx
, M
->n_col
);
432 *Q
= isl_mat_identity(M
->ctx
, M
->n_col
);
439 for (row
= 0; row
< M
->n_row
; ++row
) {
441 first
= isl_seq_abs_min_non_zero(M
->row
[row
]+col
, M
->n_col
-col
);
446 exchange(M
, U
, Q
, row
, first
, col
);
447 if (isl_int_is_neg(M
->row
[row
][col
]))
448 oppose(M
, U
, Q
, row
, col
);
450 while ((off
= isl_seq_first_non_zero(M
->row
[row
]+first
,
451 M
->n_col
-first
)) != -1) {
453 isl_int_fdiv_q(c
, M
->row
[row
][first
], M
->row
[row
][col
]);
454 subtract(M
, U
, Q
, row
, col
, first
, c
);
455 if (!isl_int_is_zero(M
->row
[row
][first
]))
456 exchange(M
, U
, Q
, row
, first
, col
);
460 for (i
= 0; i
< col
; ++i
) {
461 if (isl_int_is_zero(M
->row
[row
][i
]))
464 isl_int_cdiv_q(c
, M
->row
[row
][i
], M
->row
[row
][col
]);
466 isl_int_fdiv_q(c
, M
->row
[row
][i
], M
->row
[row
][col
]);
467 if (isl_int_is_zero(c
))
469 subtract(M
, U
, Q
, row
, col
, i
, c
);
488 struct isl_mat
*isl_mat_right_kernel(struct isl_mat
*mat
)
491 struct isl_mat
*U
= NULL
;
494 mat
= isl_mat_left_hermite(mat
, 0, &U
, NULL
);
498 for (i
= 0, rank
= 0; rank
< mat
->n_col
; ++rank
) {
499 while (i
< mat
->n_row
&& isl_int_is_zero(mat
->row
[i
][rank
]))
504 K
= isl_mat_alloc(U
->ctx
, U
->n_row
, U
->n_col
- rank
);
507 isl_mat_sub_copy(K
->ctx
, K
->row
, U
->row
, U
->n_row
, 0, rank
, U
->n_col
-rank
);
517 struct isl_mat
*isl_mat_lin_to_aff(struct isl_mat
*mat
)
520 struct isl_mat
*mat2
;
524 mat2
= isl_mat_alloc(mat
->ctx
, 1+mat
->n_row
, 1+mat
->n_col
);
527 isl_int_set_si(mat2
->row
[0][0], 1);
528 isl_seq_clr(mat2
->row
[0]+1, mat
->n_col
);
529 for (i
= 0; i
< mat
->n_row
; ++i
) {
530 isl_int_set_si(mat2
->row
[1+i
][0], 0);
531 isl_seq_cpy(mat2
->row
[1+i
]+1, mat
->row
[i
], mat
->n_col
);
540 /* Given two matrices M1 and M2, return the block matrix
545 __isl_give isl_mat
*isl_mat_diagonal(__isl_take isl_mat
*mat1
,
546 __isl_take isl_mat
*mat2
)
554 mat
= isl_mat_alloc(mat1
->ctx
, mat1
->n_row
+ mat2
->n_row
,
555 mat1
->n_col
+ mat2
->n_col
);
558 for (i
= 0; i
< mat1
->n_row
; ++i
) {
559 isl_seq_cpy(mat
->row
[i
], mat1
->row
[i
], mat1
->n_col
);
560 isl_seq_clr(mat
->row
[i
] + mat1
->n_col
, mat2
->n_col
);
562 for (i
= 0; i
< mat2
->n_row
; ++i
) {
563 isl_seq_clr(mat
->row
[mat1
->n_row
+ i
], mat1
->n_col
);
564 isl_seq_cpy(mat
->row
[mat1
->n_row
+ i
] + mat1
->n_col
,
565 mat2
->row
[i
], mat2
->n_col
);
576 static int row_first_non_zero(isl_int
**row
, unsigned n_row
, unsigned col
)
580 for (i
= 0; i
< n_row
; ++i
)
581 if (!isl_int_is_zero(row
[i
][col
]))
586 static int row_abs_min_non_zero(isl_int
**row
, unsigned n_row
, unsigned col
)
588 int i
, min
= row_first_non_zero(row
, n_row
, col
);
591 for (i
= min
+ 1; i
< n_row
; ++i
) {
592 if (isl_int_is_zero(row
[i
][col
]))
594 if (isl_int_abs_lt(row
[i
][col
], row
[min
][col
]))
600 static void inv_exchange(struct isl_mat
*left
, struct isl_mat
*right
,
601 unsigned i
, unsigned j
)
603 left
= isl_mat_swap_rows(left
, i
, j
);
604 right
= isl_mat_swap_rows(right
, i
, j
);
607 static void inv_oppose(
608 struct isl_mat
*left
, struct isl_mat
*right
, unsigned row
)
610 isl_seq_neg(left
->row
[row
]+row
, left
->row
[row
]+row
, left
->n_col
-row
);
611 isl_seq_neg(right
->row
[row
], right
->row
[row
], right
->n_col
);
614 static void inv_subtract(struct isl_mat
*left
, struct isl_mat
*right
,
615 unsigned row
, unsigned i
, isl_int m
)
618 isl_seq_combine(left
->row
[i
]+row
,
619 left
->ctx
->one
, left
->row
[i
]+row
,
620 m
, left
->row
[row
]+row
,
622 isl_seq_combine(right
->row
[i
], right
->ctx
->one
, right
->row
[i
],
623 m
, right
->row
[row
], right
->n_col
);
626 /* Compute inv(left)*right
628 struct isl_mat
*isl_mat_inverse_product(struct isl_mat
*left
,
629 struct isl_mat
*right
)
637 isl_assert(left
->ctx
, left
->n_row
== left
->n_col
, goto error
);
638 isl_assert(left
->ctx
, left
->n_row
== right
->n_row
, goto error
);
640 if (left
->n_row
== 0) {
645 left
= isl_mat_cow(left
);
646 right
= isl_mat_cow(right
);
652 for (row
= 0; row
< left
->n_row
; ++row
) {
653 int pivot
, first
, i
, off
;
654 pivot
= row_abs_min_non_zero(left
->row
+row
, left
->n_row
-row
, row
);
658 isl_assert(left
->ctx
, pivot
>= 0, goto error
);
662 inv_exchange(left
, right
, pivot
, row
);
663 if (isl_int_is_neg(left
->row
[row
][row
]))
664 inv_oppose(left
, right
, row
);
666 while ((off
= row_first_non_zero(left
->row
+first
,
667 left
->n_row
-first
, row
)) != -1) {
669 isl_int_fdiv_q(a
, left
->row
[first
][row
],
670 left
->row
[row
][row
]);
671 inv_subtract(left
, right
, row
, first
, a
);
672 if (!isl_int_is_zero(left
->row
[first
][row
]))
673 inv_exchange(left
, right
, row
, first
);
677 for (i
= 0; i
< row
; ++i
) {
678 if (isl_int_is_zero(left
->row
[i
][row
]))
680 isl_int_gcd(a
, left
->row
[row
][row
], left
->row
[i
][row
]);
681 isl_int_divexact(b
, left
->row
[i
][row
], a
);
682 isl_int_divexact(a
, left
->row
[row
][row
], a
);
684 isl_seq_combine(left
->row
[i
] + i
,
686 b
, left
->row
[row
] + i
,
688 isl_seq_combine(right
->row
[i
], a
, right
->row
[i
],
689 b
, right
->row
[row
], right
->n_col
);
694 isl_int_set(a
, left
->row
[0][0]);
695 for (row
= 1; row
< left
->n_row
; ++row
)
696 isl_int_lcm(a
, a
, left
->row
[row
][row
]);
697 if (isl_int_is_zero(a
)){
699 isl_assert(left
->ctx
, 0, goto error
);
701 for (row
= 0; row
< left
->n_row
; ++row
) {
702 isl_int_divexact(left
->row
[row
][row
], a
, left
->row
[row
][row
]);
703 if (isl_int_is_one(left
->row
[row
][row
]))
705 isl_seq_scale(right
->row
[row
], right
->row
[row
],
706 left
->row
[row
][row
], right
->n_col
);
718 void isl_mat_col_scale(struct isl_mat
*mat
, unsigned col
, isl_int m
)
722 for (i
= 0; i
< mat
->n_row
; ++i
)
723 isl_int_mul(mat
->row
[i
][col
], mat
->row
[i
][col
], m
);
726 void isl_mat_col_combine(struct isl_mat
*mat
, unsigned dst
,
727 isl_int m1
, unsigned src1
, isl_int m2
, unsigned src2
)
733 for (i
= 0; i
< mat
->n_row
; ++i
) {
734 isl_int_mul(tmp
, m1
, mat
->row
[i
][src1
]);
735 isl_int_addmul(tmp
, m2
, mat
->row
[i
][src2
]);
736 isl_int_set(mat
->row
[i
][dst
], tmp
);
741 struct isl_mat
*isl_mat_right_inverse(struct isl_mat
*mat
)
747 mat
= isl_mat_cow(mat
);
751 inv
= isl_mat_identity(mat
->ctx
, mat
->n_col
);
752 inv
= isl_mat_cow(inv
);
758 for (row
= 0; row
< mat
->n_row
; ++row
) {
759 int pivot
, first
, i
, off
;
760 pivot
= isl_seq_abs_min_non_zero(mat
->row
[row
]+row
, mat
->n_col
-row
);
764 isl_assert(mat
->ctx
, pivot
>= 0, goto error
);
768 exchange(mat
, &inv
, NULL
, row
, pivot
, row
);
769 if (isl_int_is_neg(mat
->row
[row
][row
]))
770 oppose(mat
, &inv
, NULL
, row
, row
);
772 while ((off
= isl_seq_first_non_zero(mat
->row
[row
]+first
,
773 mat
->n_col
-first
)) != -1) {
775 isl_int_fdiv_q(a
, mat
->row
[row
][first
],
777 subtract(mat
, &inv
, NULL
, row
, row
, first
, a
);
778 if (!isl_int_is_zero(mat
->row
[row
][first
]))
779 exchange(mat
, &inv
, NULL
, row
, row
, first
);
783 for (i
= 0; i
< row
; ++i
) {
784 if (isl_int_is_zero(mat
->row
[row
][i
]))
786 isl_int_gcd(a
, mat
->row
[row
][row
], mat
->row
[row
][i
]);
787 isl_int_divexact(b
, mat
->row
[row
][i
], a
);
788 isl_int_divexact(a
, mat
->row
[row
][row
], a
);
790 isl_mat_col_combine(mat
, i
, a
, i
, b
, row
);
791 isl_mat_col_combine(inv
, i
, a
, i
, b
, row
);
796 isl_int_set(a
, mat
->row
[0][0]);
797 for (row
= 1; row
< mat
->n_row
; ++row
)
798 isl_int_lcm(a
, a
, mat
->row
[row
][row
]);
799 if (isl_int_is_zero(a
)){
803 for (row
= 0; row
< mat
->n_row
; ++row
) {
804 isl_int_divexact(mat
->row
[row
][row
], a
, mat
->row
[row
][row
]);
805 if (isl_int_is_one(mat
->row
[row
][row
]))
807 isl_mat_col_scale(inv
, row
, mat
->row
[row
][row
]);
819 struct isl_mat
*isl_mat_transpose(struct isl_mat
*mat
)
821 struct isl_mat
*transpose
= NULL
;
824 if (mat
->n_col
== mat
->n_row
) {
825 mat
= isl_mat_cow(mat
);
828 for (i
= 0; i
< mat
->n_row
; ++i
)
829 for (j
= i
+ 1; j
< mat
->n_col
; ++j
)
830 isl_int_swap(mat
->row
[i
][j
], mat
->row
[j
][i
]);
833 transpose
= isl_mat_alloc(mat
->ctx
, mat
->n_col
, mat
->n_row
);
836 for (i
= 0; i
< mat
->n_row
; ++i
)
837 for (j
= 0; j
< mat
->n_col
; ++j
)
838 isl_int_set(transpose
->row
[j
][i
], mat
->row
[i
][j
]);
846 struct isl_mat
*isl_mat_swap_cols(struct isl_mat
*mat
, unsigned i
, unsigned j
)
850 mat
= isl_mat_cow(mat
);
853 isl_assert(mat
->ctx
, i
< mat
->n_col
, goto error
);
854 isl_assert(mat
->ctx
, j
< mat
->n_col
, goto error
);
856 for (r
= 0; r
< mat
->n_row
; ++r
)
857 isl_int_swap(mat
->row
[r
][i
], mat
->row
[r
][j
]);
864 struct isl_mat
*isl_mat_swap_rows(struct isl_mat
*mat
, unsigned i
, unsigned j
)
870 mat
= isl_mat_cow(mat
);
874 mat
->row
[i
] = mat
->row
[j
];
879 struct isl_mat
*isl_mat_product(struct isl_mat
*left
, struct isl_mat
*right
)
882 struct isl_mat
*prod
;
886 isl_assert(left
->ctx
, left
->n_col
== right
->n_row
, goto error
);
887 prod
= isl_mat_alloc(left
->ctx
, left
->n_row
, right
->n_col
);
890 if (left
->n_col
== 0) {
891 for (i
= 0; i
< prod
->n_row
; ++i
)
892 isl_seq_clr(prod
->row
[i
], prod
->n_col
);
895 for (i
= 0; i
< prod
->n_row
; ++i
) {
896 for (j
= 0; j
< prod
->n_col
; ++j
) {
897 isl_int_mul(prod
->row
[i
][j
],
898 left
->row
[i
][0], right
->row
[0][j
]);
899 for (k
= 1; k
< left
->n_col
; ++k
)
900 isl_int_addmul(prod
->row
[i
][j
],
901 left
->row
[i
][k
], right
->row
[k
][j
]);
913 /* Replace the variables x in the rows q by x' given by x = M x',
914 * with M the matrix mat.
916 * If the number of new variables is greater than the original
917 * number of variables, then the rows q have already been
918 * preextended. If the new number is smaller, then the coefficients
919 * of the divs, which are not changed, need to be shifted down.
920 * The row q may be the equalities, the inequalities or the
921 * div expressions. In the latter case, has_div is true and
922 * we need to take into account the extra denominator column.
924 static int preimage(struct isl_ctx
*ctx
, isl_int
**q
, unsigned n
,
925 unsigned n_div
, int has_div
, struct isl_mat
*mat
)
931 if (mat
->n_col
>= mat
->n_row
)
934 e
= mat
->n_row
- mat
->n_col
;
936 for (i
= 0; i
< n
; ++i
)
937 isl_int_mul(q
[i
][0], q
[i
][0], mat
->row
[0][0]);
938 t
= isl_mat_sub_alloc(mat
->ctx
, q
, 0, n
, has_div
, mat
->n_row
);
939 t
= isl_mat_product(t
, mat
);
942 for (i
= 0; i
< n
; ++i
) {
943 isl_seq_swp_or_cpy(q
[i
] + has_div
, t
->row
[i
], t
->n_col
);
944 isl_seq_cpy(q
[i
] + has_div
+ t
->n_col
,
945 q
[i
] + has_div
+ t
->n_col
+ e
, n_div
);
946 isl_seq_clr(q
[i
] + has_div
+ t
->n_col
+ n_div
, e
);
952 /* Replace the variables x in bset by x' given by x = M x', with
955 * If there are fewer variables x' then there are x, then we perform
956 * the transformation in place, which that, in principle,
957 * this frees up some extra variables as the number
958 * of columns remains constant, but we would have to extend
959 * the div array too as the number of rows in this array is assumed
960 * to be equal to extra.
962 struct isl_basic_set
*isl_basic_set_preimage(struct isl_basic_set
*bset
,
971 bset
= isl_basic_set_cow(bset
);
975 isl_assert(ctx
, bset
->dim
->nparam
== 0, goto error
);
976 isl_assert(ctx
, 1+bset
->dim
->n_out
== mat
->n_row
, goto error
);
977 isl_assert(ctx
, mat
->n_col
> 0, goto error
);
979 if (mat
->n_col
> mat
->n_row
)
980 bset
= isl_basic_set_extend(bset
, 0, mat
->n_col
-1, 0,
982 else if (mat
->n_col
< mat
->n_row
) {
983 bset
->dim
= isl_dim_cow(bset
->dim
);
986 bset
->dim
->n_out
-= mat
->n_row
- mat
->n_col
;
989 if (preimage(ctx
, bset
->eq
, bset
->n_eq
, bset
->n_div
, 0,
990 isl_mat_copy(mat
)) < 0)
993 if (preimage(ctx
, bset
->ineq
, bset
->n_ineq
, bset
->n_div
, 0,
994 isl_mat_copy(mat
)) < 0)
997 if (preimage(ctx
, bset
->div
, bset
->n_div
, bset
->n_div
, 1, mat
) < 0)
1000 ISL_F_CLR(bset
, ISL_BASIC_SET_NO_IMPLICIT
);
1001 ISL_F_CLR(bset
, ISL_BASIC_SET_NO_REDUNDANT
);
1002 ISL_F_CLR(bset
, ISL_BASIC_SET_NORMALIZED
);
1003 ISL_F_CLR(bset
, ISL_BASIC_SET_NORMALIZED_DIVS
);
1004 ISL_F_CLR(bset
, ISL_BASIC_SET_ALL_EQUALITIES
);
1006 bset
= isl_basic_set_simplify(bset
);
1007 bset
= isl_basic_set_finalize(bset
);
1013 isl_basic_set_free(bset
);
1017 struct isl_set
*isl_set_preimage(struct isl_set
*set
, struct isl_mat
*mat
)
1019 struct isl_ctx
*ctx
;
1022 set
= isl_set_cow(set
);
1027 for (i
= 0; i
< set
->n
; ++i
) {
1028 set
->p
[i
] = isl_basic_set_preimage(set
->p
[i
],
1033 if (mat
->n_col
!= mat
->n_row
) {
1034 set
->dim
= isl_dim_cow(set
->dim
);
1037 set
->dim
->n_out
+= mat
->n_col
;
1038 set
->dim
->n_out
-= mat
->n_row
;
1041 ISL_F_CLR(set
, ISL_SET_NORMALIZED
);
1049 void isl_mat_dump(struct isl_mat
*mat
, FILE *out
, int indent
)
1054 fprintf(out
, "%*snull mat\n", indent
, "");
1058 if (mat
->n_row
== 0)
1059 fprintf(out
, "%*s[]\n", indent
, "");
1061 for (i
= 0; i
< mat
->n_row
; ++i
) {
1063 fprintf(out
, "%*s[[", indent
, "");
1065 fprintf(out
, "%*s[", indent
+1, "");
1066 for (j
= 0; j
< mat
->n_col
; ++j
) {
1069 isl_int_print(out
, mat
->row
[i
][j
], 0);
1071 if (i
== mat
->n_row
-1)
1072 fprintf(out
, "]]\n");
1074 fprintf(out
, "]\n");
1078 struct isl_mat
*isl_mat_drop_cols(struct isl_mat
*mat
, unsigned col
, unsigned n
)
1082 mat
= isl_mat_cow(mat
);
1086 if (col
!= mat
->n_col
-n
) {
1087 for (r
= 0; r
< mat
->n_row
; ++r
)
1088 isl_seq_cpy(mat
->row
[r
]+col
, mat
->row
[r
]+col
+n
,
1089 mat
->n_col
- col
- n
);
1095 struct isl_mat
*isl_mat_drop_rows(struct isl_mat
*mat
, unsigned row
, unsigned n
)
1099 mat
= isl_mat_cow(mat
);
1103 for (r
= row
; r
+n
< mat
->n_row
; ++r
)
1104 mat
->row
[r
] = mat
->row
[r
+n
];
1110 __isl_give isl_mat
*isl_mat_insert_cols(__isl_take isl_mat
*mat
,
1111 unsigned col
, unsigned n
)
1120 ext
= isl_mat_alloc(mat
->ctx
, mat
->n_row
, mat
->n_col
+ n
);
1124 isl_mat_sub_copy(mat
->ctx
, ext
->row
, mat
->row
, mat
->n_row
, 0, 0, col
);
1125 isl_mat_sub_copy(mat
->ctx
, ext
->row
, mat
->row
, mat
->n_row
,
1126 col
+ n
, col
, mat
->n_col
- col
);
1135 __isl_give isl_mat
*isl_mat_insert_rows(__isl_take isl_mat
*mat
,
1136 unsigned row
, unsigned n
)
1145 ext
= isl_mat_alloc(mat
->ctx
, mat
->n_row
+ n
, mat
->n_col
);
1149 isl_mat_sub_copy(mat
->ctx
, ext
->row
, mat
->row
, row
, 0, 0, mat
->n_col
);
1150 isl_mat_sub_copy(mat
->ctx
, ext
->row
+ row
+ n
, mat
->row
+ row
,
1151 mat
->n_row
- row
, 0, 0, mat
->n_col
);
1160 void isl_mat_col_submul(struct isl_mat
*mat
,
1161 int dst_col
, isl_int f
, int src_col
)
1165 for (i
= 0; i
< mat
->n_row
; ++i
)
1166 isl_int_submul(mat
->row
[i
][dst_col
], f
, mat
->row
[i
][src_col
]);
1169 void isl_mat_col_mul(struct isl_mat
*mat
, int dst_col
, isl_int f
, int src_col
)
1173 for (i
= 0; i
< mat
->n_row
; ++i
)
1174 isl_int_mul(mat
->row
[i
][dst_col
], f
, mat
->row
[i
][src_col
]);
1177 struct isl_mat
*isl_mat_unimodular_complete(struct isl_mat
*M
, int row
)
1180 struct isl_mat
*H
= NULL
, *Q
= NULL
;
1185 isl_assert(M
->ctx
, M
->n_row
== M
->n_col
, goto error
);
1187 H
= isl_mat_left_hermite(isl_mat_copy(M
), 0, NULL
, &Q
);
1188 M
->n_row
= M
->n_col
;
1191 for (r
= 0; r
< row
; ++r
)
1192 isl_assert(M
->ctx
, isl_int_is_one(H
->row
[r
][r
]), goto error
);
1193 for (r
= row
; r
< M
->n_row
; ++r
)
1194 isl_seq_cpy(M
->row
[r
], Q
->row
[r
], M
->n_col
);
1205 __isl_give isl_mat
*isl_mat_concat(__isl_take isl_mat
*top
,
1206 __isl_take isl_mat
*bot
)
1208 struct isl_mat
*mat
;
1213 isl_assert(top
->ctx
, top
->n_col
== bot
->n_col
, goto error
);
1214 if (top
->n_row
== 0) {
1218 if (bot
->n_row
== 0) {
1223 mat
= isl_mat_alloc(top
->ctx
, top
->n_row
+ bot
->n_row
, top
->n_col
);
1226 isl_mat_sub_copy(mat
->ctx
, mat
->row
, top
->row
, top
->n_row
,
1228 isl_mat_sub_copy(mat
->ctx
, mat
->row
+ top
->n_row
, bot
->row
, bot
->n_row
,
1239 int isl_mat_is_equal(__isl_keep isl_mat
*mat1
, __isl_keep isl_mat
*mat2
)
1246 if (mat1
->n_row
!= mat2
->n_row
)
1249 if (mat1
->n_col
!= mat2
->n_col
)
1252 for (i
= 0; i
< mat1
->n_row
; ++i
)
1253 if (!isl_seq_eq(mat1
->row
[i
], mat2
->row
[i
], mat1
->n_col
))
1259 __isl_give isl_mat
*isl_mat_from_row_vec(__isl_take isl_vec
*vec
)
1261 struct isl_mat
*mat
;
1265 mat
= isl_mat_alloc(vec
->ctx
, 1, vec
->size
);
1269 isl_seq_cpy(mat
->row
[0], vec
->el
, vec
->size
);
1278 __isl_give isl_mat
*isl_mat_vec_concat(__isl_take isl_mat
*top
,
1279 __isl_take isl_vec
*bot
)
1281 return isl_mat_concat(top
, isl_mat_from_row_vec(bot
));
1284 __isl_give isl_mat
*isl_mat_move_cols(__isl_take isl_mat
*mat
,
1285 unsigned dst_col
, unsigned src_col
, unsigned n
)
1291 if (n
== 0 || dst_col
== src_col
)
1294 res
= isl_mat_alloc(mat
->ctx
, mat
->n_row
, mat
->n_col
);
1298 if (dst_col
< src_col
) {
1299 isl_mat_sub_copy(res
->ctx
, res
->row
, mat
->row
, mat
->n_row
,
1301 isl_mat_sub_copy(res
->ctx
, res
->row
, mat
->row
, mat
->n_row
,
1302 dst_col
, src_col
, n
);
1303 isl_mat_sub_copy(res
->ctx
, res
->row
, mat
->row
, mat
->n_row
,
1304 dst_col
+ n
, dst_col
, src_col
- dst_col
);
1305 isl_mat_sub_copy(res
->ctx
, res
->row
, mat
->row
, mat
->n_row
,
1306 src_col
+ n
, src_col
+ n
,
1307 res
->n_col
- src_col
- n
);
1309 isl_mat_sub_copy(res
->ctx
, res
->row
, mat
->row
, mat
->n_row
,
1311 isl_mat_sub_copy(res
->ctx
, res
->row
, mat
->row
, mat
->n_row
,
1312 src_col
, src_col
+ n
, dst_col
- src_col
);
1313 isl_mat_sub_copy(res
->ctx
, res
->row
, mat
->row
, mat
->n_row
,
1314 dst_col
, src_col
, n
);
1315 isl_mat_sub_copy(res
->ctx
, res
->row
, mat
->row
, mat
->n_row
,
1316 dst_col
+ n
, dst_col
+ n
,
1317 res
->n_col
- dst_col
- n
);
1327 void isl_mat_gcd(__isl_keep isl_mat
*mat
, isl_int
*gcd
)
1332 isl_int_set_si(*gcd
, 0);
1337 for (i
= 0; i
< mat
->n_row
; ++i
) {
1338 isl_seq_gcd(mat
->row
[i
], mat
->n_col
, &g
);
1339 isl_int_gcd(*gcd
, *gcd
, g
);
1344 __isl_give isl_mat
*isl_mat_scale_down(__isl_take isl_mat
*mat
, isl_int m
)
1351 for (i
= 0; i
< mat
->n_row
; ++i
)
1352 isl_seq_scale_down(mat
->row
[i
], mat
->row
[i
], m
, mat
->n_col
);
1357 __isl_give isl_mat
*isl_mat_normalize(__isl_take isl_mat
*mat
)
1365 isl_mat_gcd(mat
, &gcd
);
1366 mat
= isl_mat_scale_down(mat
, gcd
);