Little fix after the last commit (mostly a git fail)
[eigenmath-fx.git] / tensor.cpp
blob4c6ebcc61f4d638be14bb441428abc2c304cbc9f
1 #include "stdafx.h"
3 #include "defs.h"
5 static void promote_tensor(void);
6 static int compatible(U *, U *);
8 //-----------------------------------------------------------------------------
9 //
10 // Called from the "eval" module to evaluate tensor elements.
12 // p1 points to the tensor operand.
14 //-----------------------------------------------------------------------------
16 void
17 eval_tensor(void)
19 int i, ndim, nelem;
20 U **a, **b;
22 //---------------------------------------------------------------------
24 // create a new tensor for the result
26 //---------------------------------------------------------------------
28 nelem = p1->u.tensor->nelem;
30 ndim = p1->u.tensor->ndim;
32 p2 = alloc_tensor(nelem);
34 p2->u.tensor->ndim = ndim;
36 for (i = 0; i < ndim; i++)
37 p2->u.tensor->dim[i] = p1->u.tensor->dim[i];
39 //---------------------------------------------------------------------
41 // b = eval(a)
43 //---------------------------------------------------------------------
45 a = p1->u.tensor->elem;
46 b = p2->u.tensor->elem;
48 for (i = 0; i < nelem; i++) {
49 push(a[i]);
50 eval();
51 b[i] = pop();
54 //---------------------------------------------------------------------
56 // push the result
58 //---------------------------------------------------------------------
60 push(p2);
62 promote_tensor();
65 //-----------------------------------------------------------------------------
67 // Add tensors
69 // Input: Operands on stack
71 // Output: Result on stack
73 //-----------------------------------------------------------------------------
75 void
76 tensor_plus_tensor(void)
78 int i, ndim, nelem;
79 U **a, **b, **c;
81 save();
83 p2 = pop();
84 p1 = pop();
86 // are the dimension lists equal?
88 ndim = p1->u.tensor->ndim;
90 if (ndim != p2->u.tensor->ndim) {
91 push(symbol(NIL));
92 restore();
93 return;
96 for (i = 0; i < ndim; i++)
97 if (p1->u.tensor->dim[i] != p2->u.tensor->dim[i]) {
98 push(symbol(NIL));
99 restore();
100 return;
103 // create a new tensor for the result
105 nelem = p1->u.tensor->nelem;
107 p3 = alloc_tensor(nelem);
109 p3->u.tensor->ndim = ndim;
111 for (i = 0; i < ndim; i++)
112 p3->u.tensor->dim[i] = p1->u.tensor->dim[i];
114 // c = a + b
116 a = p1->u.tensor->elem;
117 b = p2->u.tensor->elem;
118 c = p3->u.tensor->elem;
120 for (i = 0; i < nelem; i++) {
121 push(a[i]);
122 push(b[i]);
123 add();
124 c[i] = pop();
127 // push the result
129 push(p3);
131 restore();
134 //-----------------------------------------------------------------------------
136 // careful not to reorder factors
138 //-----------------------------------------------------------------------------
140 void
141 tensor_times_scalar(void)
143 int i, ndim, nelem;
144 U **a, **b;
146 save();
148 p2 = pop();
149 p1 = pop();
151 ndim = p1->u.tensor->ndim;
152 nelem = p1->u.tensor->nelem;
154 p3 = alloc_tensor(nelem);
156 p3->u.tensor->ndim = ndim;
158 for (i = 0; i < ndim; i++)
159 p3->u.tensor->dim[i] = p1->u.tensor->dim[i];
161 a = p1->u.tensor->elem;
162 b = p3->u.tensor->elem;
164 for (i = 0; i < nelem; i++) {
165 push(a[i]);
166 push(p2);
167 multiply();
168 b[i] = pop();
171 push(p3);
172 restore();
175 void
176 scalar_times_tensor(void)
178 int i, ndim, nelem;
179 U **a, **b;
181 save();
183 p2 = pop();
184 p1 = pop();
186 ndim = p2->u.tensor->ndim;
187 nelem = p2->u.tensor->nelem;
189 p3 = alloc_tensor(nelem);
191 p3->u.tensor->ndim = ndim;
193 for (i = 0; i < ndim; i++)
194 p3->u.tensor->dim[i] = p2->u.tensor->dim[i];
196 a = p2->u.tensor->elem;
197 b = p3->u.tensor->elem;
199 for (i = 0; i < nelem; i++) {
200 push(p1);
201 push(a[i]);
202 multiply();
203 b[i] = pop();
206 push(p3);
208 restore();
212 is_square_matrix(U *p)
214 if (istensor(p) && p->u.tensor->ndim == 2 && p->u.tensor->dim[0] == p->u.tensor->dim[1])
215 return 1;
216 else
217 return 0;
220 //-----------------------------------------------------------------------------
222 // gradient of tensor
224 //-----------------------------------------------------------------------------
226 void
227 d_tensor_tensor(void)
229 int i, j, ndim, nelem;
230 U **a, **b, **c;
232 ndim = p1->u.tensor->ndim;
233 nelem = p1->u.tensor->nelem;
235 if (ndim + 1 >= MAXDIM)
236 goto dont_evaluate;
238 p3 = alloc_tensor(nelem * p2->u.tensor->nelem);
240 p3->u.tensor->ndim = ndim + 1;
242 for (i = 0; i < ndim; i++)
243 p3->u.tensor->dim[i] = p1->u.tensor->dim[i];
245 p3->u.tensor->dim[ndim] = p2->u.tensor->dim[0];
247 a = p1->u.tensor->elem;
248 b = p2->u.tensor->elem;
249 c = p3->u.tensor->elem;
251 for (i = 0; i < nelem; i++) {
252 for (j = 0; j < p2->u.tensor->nelem; j++) {
253 push(a[i]);
254 push(b[j]);
255 derivative();
256 c[i * p2->u.tensor->nelem + j] = pop();
260 push(p3);
262 return;
264 dont_evaluate:
266 push_symbol(DERIVATIVE);
267 push(p1);
268 push(p2);
269 list(3);
272 //-----------------------------------------------------------------------------
274 // gradient of scalar
276 //-----------------------------------------------------------------------------
278 void
279 d_scalar_tensor(void)
281 int i;
282 U **a, **b;
284 p3 = alloc_tensor(p2->u.tensor->nelem);
286 p3->u.tensor->ndim = 1;
288 p3->u.tensor->dim[0] = p2->u.tensor->dim[0];
290 a = p2->u.tensor->elem;
291 b = p3->u.tensor->elem;
293 for (i = 0; i < p2->u.tensor->nelem; i++) {
294 push(p1);
295 push(a[i]);
296 derivative();
297 b[i] = pop();
300 push(p3);
303 //-----------------------------------------------------------------------------
305 // Derivative of tensor
307 //-----------------------------------------------------------------------------
309 void
310 d_tensor_scalar(void)
312 int i;
313 U **a, **b;
315 p3 = alloc_tensor(p1->u.tensor->nelem);
317 p3->u.tensor->ndim = p1->u.tensor->ndim;
319 for (i = 0; i < p1->u.tensor->ndim; i++)
320 p3->u.tensor->dim[i] = p1->u.tensor->dim[i];
322 a = p1->u.tensor->elem;
323 b = p3->u.tensor->elem;
325 for (i = 0; i < p1->u.tensor->nelem; i++) {
326 push(a[i]);
327 push(p2);
328 derivative();
329 b[i] = pop();
332 push(p3);
336 compare_tensors(U *p1, U *p2)
338 int i;
340 if (p1->u.tensor->ndim < p2->u.tensor->ndim)
341 return -1;
343 if (p1->u.tensor->ndim > p2->u.tensor->ndim)
344 return 1;
346 for (i = 0; i < p1->u.tensor->ndim; i++) {
347 if (p1->u.tensor->dim[i] < p2->u.tensor->dim[i])
348 return -1;
349 if (p1->u.tensor->dim[i] > p2->u.tensor->dim[i])
350 return 1;
353 for (i = 0; i < p1->u.tensor->nelem; i++) {
354 if (equal(p1->u.tensor->elem[i], p2->u.tensor->elem[i]))
355 continue;
356 if (lessp(p1->u.tensor->elem[i], p2->u.tensor->elem[i]))
357 return -1;
358 else
359 return 1;
362 return 0;
365 //-----------------------------------------------------------------------------
367 // Raise a tensor to a power
369 // Input: p1 tensor
371 // p2 exponent
373 // Output: Result on stack
375 //-----------------------------------------------------------------------------
377 void
378 power_tensor(void)
380 int i, k, n;
382 // first and last dims must be equal
384 k = p1->u.tensor->ndim - 1;
386 if (p1->u.tensor->dim[0] != p1->u.tensor->dim[k]) {
387 push_symbol(POWER);
388 push(p1);
389 push(p2);
390 list(3);
391 return;
394 push(p2);
396 n = pop_integer();
398 if (n == (int) 0x80000000) {
399 push_symbol(POWER);
400 push(p1);
401 push(p2);
402 list(3);
403 return;
406 if (n == 0) {
407 if (p1->u.tensor->ndim != 2)
408 stop("power(tensor,0) with tensor rank not equal to 2");
409 n = p1->u.tensor->dim[0];
410 p1 = alloc_tensor(n * n);
411 p1->u.tensor->ndim = 2;
412 p1->u.tensor->dim[0] = n;
413 p1->u.tensor->dim[1] = n;
414 for (i = 0; i < n; i++)
415 p1->u.tensor->elem[n * i + i] = one;
416 push(p1);
417 return;
420 if (n < 0) {
421 n = -n;
422 push(p1);
423 inv();
424 p1 = pop();
427 push(p1);
429 for (i = 1; i < n; i++) {
430 push(p1);
431 inner();
432 if (iszero(stack[tos - 1]))
433 break;
437 void
438 copy_tensor(void)
440 int i;
442 save();
444 p1 = pop();
446 p2 = alloc_tensor(p1->u.tensor->nelem);
448 p2->u.tensor->ndim = p1->u.tensor->ndim;
450 for (i = 0; i < p1->u.tensor->ndim; i++)
451 p2->u.tensor->dim[i] = p1->u.tensor->dim[i];
453 for (i = 0; i < p1->u.tensor->nelem; i++)
454 p2->u.tensor->elem[i] = p1->u.tensor->elem[i];
456 push(p2);
458 restore();
461 // Tensors with elements that are also tensors get promoted to a higher rank.
463 static void
464 promote_tensor(void)
466 int i, j, k, nelem, ndim;
468 save();
470 p1 = pop();
472 if (!istensor(p1)) {
473 push(p1);
474 restore();
475 return;
478 p2 = p1->u.tensor->elem[0];
480 for (i = 1; i < p1->u.tensor->nelem; i++)
481 if (!compatible(p2, p1->u.tensor->elem[i]))
482 stop("Cannot promote tensor due to inconsistent tensor components.");
484 if (!istensor(p2)) {
485 push(p1);
486 restore();
487 return;
490 ndim = p1->u.tensor->ndim + p2->u.tensor->ndim;
492 if (ndim > MAXDIM)
493 stop("tensor rank > 24");
495 nelem = p1->u.tensor->nelem * p2->u.tensor->nelem;
497 p3 = alloc_tensor(nelem);
499 p3->u.tensor->ndim = ndim;
501 for (i = 0; i < p1->u.tensor->ndim; i++)
502 p3->u.tensor->dim[i] = p1->u.tensor->dim[i];
504 for (j = 0; j < p2->u.tensor->ndim; j++)
505 p3->u.tensor->dim[i + j] = p2->u.tensor->dim[j];
507 k = 0;
509 for (i = 0; i < p1->u.tensor->nelem; i++) {
510 p2 = p1->u.tensor->elem[i];
511 for (j = 0; j < p2->u.tensor->nelem; j++)
512 p3->u.tensor->elem[k++] = p2->u.tensor->elem[j];
515 push(p3);
517 restore();
520 static int
521 compatible(U *p, U *q)
523 int i;
525 if (!istensor(p) && !istensor(q))
526 return 1;
528 if (!istensor(p) || !istensor(q))
529 return 0;
531 if (p->u.tensor->ndim != q->u.tensor->ndim)
532 return 0;
534 for (i = 0; i < p->u.tensor->ndim; i++)
535 if (p->u.tensor->dim[i] != q->u.tensor->dim[i])
536 return 0;
538 return 1;
541 #if SELFTEST
543 static char *s[] = {
545 "#test_tensor",
547 "a=(1,2,3)",
550 "b=(4,5,6)",
553 "c=(7,8,9)",
556 "rank((a,b,c))",
557 "2",
559 "(a,b,c)",
560 "((1,2,3),(4,5,6),(7,8,9))",
562 // check tensor promotion
564 "((1,0),(0,0))",
565 "((1,0),(0,0))",
567 "a=quote(a)",
570 "b=quote(b)",
573 "c=quote(c)",
577 void
578 test_tensor(void)
580 test(__FILE__, s, sizeof s / sizeof (char *));
583 #endif