2 * Copyright (C) 2003 Robert Kooima
4 * NEVERBALL is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published
6 * by the Free Software Foundation; either version 2 of the License,
7 * or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
17 #include "solid_vary.h"
21 /*---------------------------------------------------------------------------*/
23 int sol_load_vary(struct s_vary
*fp
, struct s_base
*base
)
27 memset(fp
, 0, sizeof (*fp
));
33 fp
->pv
= calloc(fp
->base
->pc
, sizeof (*fp
->pv
));
34 fp
->pc
= fp
->base
->pc
;
36 for (i
= 0; i
< fp
->base
->pc
; i
++)
38 struct v_path
*pp
= fp
->pv
+ i
;
39 struct b_path
*pq
= fp
->base
->pv
+ i
;
50 fp
->bv
= calloc(fp
->base
->bc
, sizeof (*fp
->bv
));
51 fp
->bc
= fp
->base
->bc
;
53 alloc_new(&mv
, sizeof (*fp
->mv
), (void **) &fp
->mv
, &fp
->mc
);
55 for (i
= 0; i
< fp
->base
->bc
; i
++)
57 struct b_body
*bbody
= fp
->base
->bv
+ i
;
58 struct v_body
*vbody
= fp
->bv
+ i
;
66 if (bbody
->pi
>= 0 && (vmove
= alloc_add(&mv
)))
68 memset(vmove
, 0, sizeof (*vmove
));
70 vbody
->mi
= fp
->mc
- 1;
71 vmove
->pi
= bbody
->pi
;
74 if (bbody
->pj
== bbody
->pi
)
76 vbody
->mj
= vbody
->mi
;
78 else if (bbody
->pj
>= 0 && (vmove
= alloc_add(&mv
)))
80 memset(vmove
, 0, sizeof (*vmove
));
82 vbody
->mj
= fp
->mc
- 1;
83 vmove
->pi
= bbody
->pj
;
90 fp
->hv
= calloc(fp
->base
->hc
, sizeof (*fp
->hv
));
91 fp
->hc
= fp
->base
->hc
;
93 for (i
= 0; i
< fp
->base
->hc
; i
++)
95 struct v_item
*hp
= fp
->hv
+ i
;
96 struct b_item
*hq
= fp
->base
->hv
+ i
;
107 fp
->xv
= calloc(fp
->base
->xc
, sizeof (*fp
->xv
));
108 fp
->xc
= fp
->base
->xc
;
110 for (i
= 0; i
< fp
->base
->xc
; i
++)
112 struct v_swch
*xp
= fp
->xv
+ i
;
113 struct b_swch
*xq
= fp
->base
->xv
+ i
;
124 fp
->uv
= calloc(fp
->base
->uc
, sizeof (*fp
->uv
));
125 fp
->uc
= fp
->base
->uc
;
127 for (i
= 0; i
< fp
->base
->uc
; i
++)
129 struct v_ball
*up
= fp
->uv
+ i
;
130 struct b_ball
*uq
= fp
->base
->uv
+ i
;
136 up
->E
[0][0] = up
->e
[0][0] = 1.0f
;
137 up
->E
[0][1] = up
->e
[0][1] = 0.0f
;
138 up
->E
[0][2] = up
->e
[0][2] = 0.0f
;
140 up
->E
[1][0] = up
->e
[1][0] = 0.0f
;
141 up
->E
[1][1] = up
->e
[1][1] = 1.0f
;
142 up
->E
[1][2] = up
->e
[1][2] = 0.0f
;
144 up
->E
[2][0] = up
->e
[2][0] = 0.0f
;
145 up
->E
[2][1] = up
->e
[2][1] = 0.0f
;
146 up
->E
[2][2] = up
->e
[2][2] = 1.0f
;
153 void sol_free_vary(struct s_vary
*fp
)
162 memset(fp
, 0, sizeof (*fp
));
165 /*---------------------------------------------------------------------------*/
167 int sol_vary_cmd(struct s_vary
*fp
, struct cmd_state
*cs
, const union cmd
*cmd
)
175 if ((up
= realloc(fp
->uv
, sizeof (*up
) * (fp
->uc
+ 1))))
178 cs
->curr_ball
= fp
->uc
;
184 case CMD_CLEAR_BALLS
:
197 /*---------------------------------------------------------------------------*/
202 int sol_lerp_cmd(struct s_lerp
*fp
, struct cmd_state
*cs
, const union cmd
*cmd
)
204 struct l_ball (*uv
)[2];
213 if ((uv
= realloc(fp
->uv
, sizeof (*uv
) * (fp
->uc
+ 1))))
217 /* Update varying state. */
219 if (sol_vary_cmd(fp
->vary
, cs
, cmd
))
228 if ((mi
= cmd
->movepath
.mi
) >= 0 && mi
< fp
->mc
)
230 /* Be extra paranoid. */
232 if ((idx
= cmd
->movepath
.pi
) >= 0 && idx
< fp
->vary
->base
->pc
)
233 fp
->mv
[mi
][CURR
].pi
= idx
;
238 if ((mi
= cmd
->movetime
.mi
) >= 0 && mi
< fp
->mc
)
240 fp
->mv
[mi
][CURR
].t
= cmd
->movetime
.t
;
245 /* Backward compatibility: update linear mover only. */
247 if ((idx
= cmd
->bodypath
.bi
) >= 0 && idx
< fp
->vary
->bc
&&
248 (mi
= fp
->vary
->bv
[idx
].mi
) >= 0)
250 /* Be EXTRA paranoid. */
252 if ((idx
= cmd
->bodypath
.pi
) >= 0 && idx
< fp
->vary
->base
->pc
)
253 fp
->mv
[mi
][CURR
].pi
= idx
;
258 /* Same as CMD_BODY_PATH. */
260 if ((idx
= cmd
->bodytime
.bi
) >= 0 && idx
< fp
->vary
->bc
&&
261 (mi
= fp
->vary
->bv
[idx
].mi
) >= 0)
263 fp
->mv
[mi
][CURR
].t
= cmd
->bodytime
.t
;
267 case CMD_BALL_RADIUS
:
268 fp
->uv
[cs
->curr_ball
][CURR
].r
= cmd
->ballradius
.r
;
271 case CMD_CLEAR_BALLS
:
276 sol_vary_cmd(fp
->vary
, cs
, cmd
);
280 case CMD_BALL_POSITION
:
281 up
= &fp
->uv
[cs
->curr_ball
][CURR
];
282 v_cpy(up
->p
, cmd
->ballpos
.p
);
286 up
= &fp
->uv
[cs
->curr_ball
][CURR
];
287 v_cpy(up
->e
[0], cmd
->ballbasis
.e
[0]);
288 v_cpy(up
->e
[1], cmd
->ballbasis
.e
[1]);
289 v_crs(up
->e
[2], up
->e
[0], up
->e
[1]);
292 case CMD_BALL_PEND_BASIS
:
293 up
= &fp
->uv
[cs
->curr_ball
][CURR
];
294 v_cpy(up
->E
[0], cmd
->ballpendbasis
.E
[0]);
295 v_cpy(up
->E
[1], cmd
->ballpendbasis
.E
[1]);
296 v_crs(up
->E
[2], up
->E
[0], up
->E
[1]);
299 case CMD_STEP_SIMULATION
:
301 * Step each mover ahead. This way we cut down on replay size
302 * significantly while still keeping things in sync with
303 * occasional CMD_MOVE_PATH and CMD_MOVE_TIME.
306 for (i
= 0; i
< fp
->mc
; i
++)
308 struct l_move
*mp
= &fp
->mv
[i
][CURR
];
310 if (mp
->pi
>= 0 && fp
->vary
->pv
[mp
->pi
].f
)
311 mp
->t
+= cmd
->stepsim
.dt
;
322 void sol_lerp_copy(struct s_lerp
*fp
)
326 for (i
= 0; i
< fp
->mc
; i
++)
327 fp
->mv
[i
][PREV
] = fp
->mv
[i
][CURR
];
329 for (i
= 0; i
< fp
->uc
; i
++)
330 fp
->uv
[i
][PREV
] = fp
->uv
[i
][CURR
];
333 void sol_lerp_apply(struct s_lerp
*fp
, float a
)
337 for (i
= 0; i
< fp
->mc
; i
++)
339 if (fp
->mv
[i
][PREV
].pi
== fp
->mv
[i
][CURR
].pi
)
340 fp
->vary
->mv
[i
].t
= flerp(fp
->mv
[i
][PREV
].t
, fp
->mv
[i
][CURR
].t
, a
);
342 fp
->vary
->mv
[i
].t
= fp
->mv
[i
][CURR
].t
* a
;
344 fp
->vary
->mv
[i
].pi
= fp
->mv
[i
][CURR
].pi
;
347 for (i
= 0; i
< fp
->uc
; i
++)
349 e_lerp(fp
->vary
->uv
[i
].e
, fp
->uv
[i
][PREV
].e
, fp
->uv
[i
][CURR
].e
, a
);
350 v_lerp(fp
->vary
->uv
[i
].p
, fp
->uv
[i
][PREV
].p
, fp
->uv
[i
][CURR
].p
, a
);
351 e_lerp(fp
->vary
->uv
[i
].E
, fp
->uv
[i
][PREV
].E
, fp
->uv
[i
][CURR
].E
, a
);
353 fp
->vary
->uv
[i
].r
= flerp(fp
->uv
[i
][PREV
].r
, fp
->uv
[i
][CURR
].r
, a
);
357 int sol_load_lerp(struct s_lerp
*fp
, struct s_vary
*vary
)
365 fp
->mv
= calloc(fp
->vary
->mc
, sizeof (*fp
->mv
));
366 fp
->mc
= fp
->vary
->mc
;
368 for (i
= 0; i
< fp
->vary
->mc
; i
++)
369 fp
->mv
[i
][CURR
].pi
= fp
->vary
->mv
[i
].pi
;
374 fp
->uv
= calloc(fp
->vary
->uc
, sizeof (*fp
->uv
));
375 fp
->uc
= fp
->vary
->uc
;
377 for (i
= 0; i
< fp
->vary
->uc
; i
++)
379 e_cpy(fp
->uv
[i
][CURR
].e
, fp
->vary
->uv
[i
].e
);
380 v_cpy(fp
->uv
[i
][CURR
].p
, fp
->vary
->uv
[i
].p
);
381 e_cpy(fp
->uv
[i
][CURR
].E
, fp
->vary
->uv
[i
].E
);
383 fp
->uv
[i
][CURR
].r
= fp
->vary
->uv
[i
].r
;
392 void sol_free_lerp(struct s_lerp
*fp
)
394 if (fp
->mv
) free(fp
->mv
);
395 if (fp
->uv
) free(fp
->uv
);
397 memset(fp
, 0, sizeof (*fp
));
400 /*---------------------------------------------------------------------------*/