UCT: set default expand_p=8, crit_rave=1.1
[pachi/nmclean.git] / stats.h
blobbcae0a01d7798792d032ec51db1644a819b4b582
1 #ifndef PACHI_STATS_H
2 #define PACHI_STATS_H
4 #include <math.h>
6 /* Move statistics; we track how good value each move has. */
7 /* These operations are supposed to be atomic - reasonably
8 * safe to perform by multiple threads at once on the same stats.
9 * What this means in practice is that perhaps the value will get
10 * slightly wrong, but not drastically corrupted. */
12 struct move_stats {
13 int playouts; // # of playouts
14 floating_t value; // BLACK wins/playouts
17 /* Add a result to the stats. */
18 static void stats_add_result(struct move_stats *s, floating_t result, int playouts);
20 /* Remove a result from the stats. */
21 static void stats_rm_result(struct move_stats *s, floating_t result, int playouts);
23 /* Merge two stats together. THIS IS NOT ATOMIC! */
24 static void stats_merge(struct move_stats *dest, struct move_stats *src);
26 /* Reverse stats parity. */
27 static void stats_reverse_parity(struct move_stats *s);
30 /* We actually do the atomicity in a pretty hackish way - we simply
31 * rely on the fact that int,floating_t operations should be atomic with
32 * reasonable compilers (gcc) on reasonable architectures (i386,
33 * x86_64). */
34 /* There is a write order dependency - when we bump the playouts,
35 * our value must be already correct, otherwise the node will receive
36 * invalid evaluation if that's made in parallel, esp. when
37 * current s->playouts is zero. */
39 static inline void
40 stats_add_result(struct move_stats *s, floating_t result, int playouts)
42 int s_playouts = s->playouts;
43 floating_t s_value = s->value;
44 /* Force the load, another thread can work on the
45 * values in parallel. */
46 __sync_synchronize(); /* full memory barrier */
48 s_playouts += playouts;
49 s_value += (result - s_value) * playouts / s_playouts;
51 /* We rely on the fact that these two assignments are atomic. */
52 s->value = s_value;
53 __sync_synchronize(); /* full memory barrier */
54 s->playouts = s_playouts;
57 static inline void
58 stats_rm_result(struct move_stats *s, floating_t result, int playouts)
60 if (s->playouts > playouts) {
61 int s_playouts = s->playouts;
62 floating_t s_value = s->value;
63 /* Force the load, another thread can work on the
64 * values in parallel. */
65 __sync_synchronize(); /* full memory barrier */
67 s_playouts -= playouts;
68 s_value += (s_value - result) * playouts / s_playouts;
70 /* We rely on the fact that these two assignments are atomic. */
71 s->value = s_value;
72 __sync_synchronize(); /* full memory barrier */
73 s->playouts = s_playouts;
75 } else {
76 /* We don't touch the value, since in parallel, another
77 * thread can be adding a result, thus raising the
78 * playouts count after we zero the value. Instead,
79 * leaving the value as is with zero playouts should
80 * not break anything. */
81 s->playouts = 0;
85 static inline void
86 stats_merge(struct move_stats *dest, struct move_stats *src)
88 /* In a sense, this is non-atomic version of stats_add_result(). */
89 if (src->playouts) {
90 dest->playouts += src->playouts;
91 dest->value += (src->value - dest->value) * src->playouts / dest->playouts;
95 static inline void
96 stats_reverse_parity(struct move_stats *s)
98 s->value = 1 - s->value;
101 #endif