uct_search_stop_early(): Consider cannot-change break only after estimate gets sensible
[pachi/derm.git] / uct / search.c
blob7402364f3ece0158b652c26265a13b1adf5cbc8d
1 #include <assert.h>
2 #include <math.h>
3 #include <pthread.h>
4 #include <signal.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <time.h>
10 #define DEBUG
12 #include "debug.h"
13 #include "distributed/distributed.h"
14 #include "move.h"
15 #include "random.h"
16 #include "timeinfo.h"
17 #include "uct/dynkomi.h"
18 #include "uct/internal.h"
19 #include "uct/search.h"
20 #include "uct/tree.h"
21 #include "uct/uct.h"
22 #include "uct/walk.h"
25 /* Default number of simulations to perform per move.
26 * Note that this is now in total over all threads!. */
27 #define MC_GAMES 80000
28 static const struct time_info default_ti = {
29 .period = TT_MOVE,
30 .dim = TD_GAMES,
31 .len = { .games = MC_GAMES },
34 /* Once per how many simulations (per thread) to show a progress report line. */
35 #define TREE_SIMPROGRESS_INTERVAL 10000
37 /* When terminating UCT search early, the safety margin to add to the
38 * remaining playout number estimate when deciding whether the result can
39 * still change. */
40 #define PLAYOUT_DELTA_SAFEMARGIN 1000
42 /* Minimal number of simulations to consider early break. */
43 #define PLAYOUT_EARLY_BREAK_MIN 5000
46 /* Pachi threading structure:
48 * main thread
49 * | main(), GTP communication, ...
50 * | starts and stops the search managed by thread_manager
51 * |
52 * thread_manager
53 * | spawns and collects worker threads
54 * |
55 * worker0
56 * worker1
57 * ...
58 * workerK
59 * uct_playouts() loop, doing descend-playout until uct_halt
61 * Another way to look at it is by functions (lines denote thread boundaries):
63 * | uct_genmove()
64 * | uct_search() (uct_search_start() .. uct_search_stop())
65 * | -----------------------
66 * | spawn_thread_manager()
67 * | -----------------------
68 * | spawn_worker()
69 * V uct_playouts() */
71 /* Set in thread manager in case the workers should stop. */
72 volatile sig_atomic_t uct_halt = 0;
73 /* ID of the thread manager. */
74 static pthread_t thread_manager;
75 bool thread_manager_running;
77 static pthread_mutex_t finish_mutex = PTHREAD_MUTEX_INITIALIZER;
78 static pthread_cond_t finish_cond = PTHREAD_COND_INITIALIZER;
79 static volatile int finish_thread;
80 static pthread_mutex_t finish_serializer = PTHREAD_MUTEX_INITIALIZER;
82 static void *
83 spawn_worker(void *ctx_)
85 struct uct_thread_ctx *ctx = ctx_;
86 /* Setup */
87 fast_srandom(ctx->seed);
88 /* Run */
89 ctx->games = uct_playouts(ctx->u, ctx->b, ctx->color, ctx->t, ctx->ti);
90 /* Finish */
91 pthread_mutex_lock(&finish_serializer);
92 pthread_mutex_lock(&finish_mutex);
93 finish_thread = ctx->tid;
94 pthread_cond_signal(&finish_cond);
95 pthread_mutex_unlock(&finish_mutex);
96 return ctx;
99 /* Thread manager, controlling worker threads. It must be called with
100 * finish_mutex lock held, but it will unlock it itself before exiting;
101 * this is necessary to be completely deadlock-free. */
102 /* The finish_cond can be signalled for it to stop; in that case,
103 * the caller should set finish_thread = -1. */
104 /* After it is started, it will update mctx->t to point at some tree
105 * used for the actual search, on return
106 * it will set mctx->games to the number of performed simulations. */
107 static void *
108 spawn_thread_manager(void *ctx_)
110 /* In thread_manager, we use only some of the ctx fields. */
111 struct uct_thread_ctx *mctx = ctx_;
112 struct uct *u = mctx->u;
113 struct tree *t = mctx->t;
114 fast_srandom(mctx->seed);
116 int played_games = 0;
117 pthread_t threads[u->threads];
118 int joined = 0;
120 uct_halt = 0;
122 /* Garbage collect the tree by preference when pondering. */
123 if (u->pondering && t->nodes && t->nodes_size >= t->pruning_threshold) {
124 t->root = tree_garbage_collect(t, t->root);
127 /* Spawn threads... */
128 for (int ti = 0; ti < u->threads; ti++) {
129 struct uct_thread_ctx *ctx = malloc2(sizeof(*ctx));
130 ctx->u = u; ctx->b = mctx->b; ctx->color = mctx->color;
131 mctx->t = ctx->t = t;
132 ctx->tid = ti; ctx->seed = fast_random(65536) + ti;
133 ctx->ti = mctx->ti;
134 pthread_create(&threads[ti], NULL, spawn_worker, ctx);
135 if (UDEBUGL(3))
136 fprintf(stderr, "Spawned worker %d\n", ti);
139 /* ...and collect them back: */
140 while (joined < u->threads) {
141 /* Wait for some thread to finish... */
142 pthread_cond_wait(&finish_cond, &finish_mutex);
143 if (finish_thread < 0) {
144 /* Stop-by-caller. Tell the workers to wrap up. */
145 uct_halt = 1;
146 continue;
148 /* ...and gather its remnants. */
149 struct uct_thread_ctx *ctx;
150 pthread_join(threads[finish_thread], (void **) &ctx);
151 played_games += ctx->games;
152 joined++;
153 free(ctx);
154 if (UDEBUGL(3))
155 fprintf(stderr, "Joined worker %d\n", finish_thread);
156 pthread_mutex_unlock(&finish_serializer);
159 pthread_mutex_unlock(&finish_mutex);
161 mctx->games = played_games;
162 return mctx;
166 /*** THREAD MANAGER end */
168 /*** Search infrastructure: */
172 uct_search_games(struct uct_search_state *s)
174 return s->ctx->t->root->u.playouts;
177 void
178 uct_search_start(struct uct *u, struct board *b, enum stone color,
179 struct tree *t, struct time_info *ti,
180 struct uct_search_state *s)
182 /* Set up search state. */
183 s->base_playouts = s->last_dynkomi = s->last_print = t->root->u.playouts;
184 s->print_interval = TREE_SIMPROGRESS_INTERVAL * u->threads;
185 s->fullmem = false;
187 if (ti) {
188 if (ti->period == TT_NULL) *ti = default_ti;
189 time_stop_conditions(ti, b, u->fuseki_end, u->yose_start, &s->stop);
192 /* Fire up the tree search thread manager, which will in turn
193 * spawn the searching threads. */
194 assert(u->threads > 0);
195 assert(!thread_manager_running);
196 static struct uct_thread_ctx mctx;
197 mctx = (struct uct_thread_ctx) { .u = u, .b = b, .color = color, .t = t, .seed = fast_random(65536), .ti = ti };
198 s->ctx = &mctx;
199 pthread_mutex_lock(&finish_mutex);
200 pthread_create(&thread_manager, NULL, spawn_thread_manager, s->ctx);
201 thread_manager_running = true;
204 struct uct_thread_ctx *
205 uct_search_stop(void)
207 assert(thread_manager_running);
209 /* Signal thread manager to stop the workers. */
210 pthread_mutex_lock(&finish_mutex);
211 finish_thread = -1;
212 pthread_cond_signal(&finish_cond);
213 pthread_mutex_unlock(&finish_mutex);
215 /* Collect the thread manager. */
216 struct uct_thread_ctx *pctx;
217 thread_manager_running = false;
218 pthread_join(thread_manager, (void **) &pctx);
219 return pctx;
223 void
224 uct_search_progress(struct uct *u, struct board *b, enum stone color,
225 struct tree *t, struct time_info *ti,
226 struct uct_search_state *s, int i)
228 struct uct_thread_ctx *ctx = s->ctx;
230 /* Adjust dynkomi? */
231 int di = u->dynkomi_interval * u->threads;
232 if (ctx->t->use_extra_komi && u->dynkomi->permove
233 && !u->pondering && di
234 && i > s->last_dynkomi + di) {
235 s->last_dynkomi += di;
236 float old_dynkomi = ctx->t->extra_komi;
237 ctx->t->extra_komi = u->dynkomi->permove(u->dynkomi, b, ctx->t);
238 if (UDEBUGL(3) && old_dynkomi != ctx->t->extra_komi)
239 fprintf(stderr, "dynkomi adjusted (%f -> %f)\n",
240 old_dynkomi, ctx->t->extra_komi);
243 /* Print progress? */
244 if (i - s->last_print > s->print_interval) {
245 s->last_print += s->print_interval; // keep the numbers tidy
246 uct_progress_status(u, ctx->t, color, s->last_print);
249 if (!s->fullmem && ctx->t->nodes_size > u->max_tree_size) {
250 if (UDEBUGL(2))
251 fprintf(stderr, "memory limit hit (%lu > %lu)\n",
252 ctx->t->nodes_size, u->max_tree_size);
253 s->fullmem = true;
258 /* Determine whether we should terminate the search early. */
259 static bool
260 uct_search_stop_early(struct uct *u, struct tree *t, struct board *b,
261 struct time_info *ti, struct time_stop *stop,
262 struct tree_node *best, struct tree_node *best2,
263 int played, bool fullmem)
265 /* If the memory is full, stop immediately. Since the tree
266 * cannot grow anymore, some non-well-expanded nodes will
267 * quickly take over with extremely high ratio since the
268 * counters are not properly simulated (just as if we use
269 * non-UCT MonteCarlo). */
270 /* (XXX: A proper solution would be to prune the tree
271 * on the spot.) */
272 if (fullmem)
273 return true;
275 /* Think at least 100ms to avoid a random move. This is particularly
276 * important in distributed mode, where this function is called frequently. */
277 double elapsed = 0.0;
278 if (ti->dim == TD_WALLTIME) {
279 elapsed = time_now() - ti->len.t.timer_start;
280 if (elapsed < TREE_BUSYWAIT_INTERVAL) return false;
283 /* Break early if we estimate the second-best move cannot
284 * catch up in assigned time anymore. We use all our time
285 * if we are in byoyomi with single stone remaining in our
286 * period, however - it's better to pre-ponder. */
287 bool time_indulgent = (!ti->len.t.main_time && ti->len.t.byoyomi_stones == 1);
288 if (best2 && ti->dim == TD_WALLTIME
289 && played >= PLAYOUT_EARLY_BREAK_MIN && !time_indulgent) {
290 double remaining = stop->worst.time - elapsed;
291 double pps = ((double)played) / elapsed;
292 double estplayouts = remaining * pps + PLAYOUT_DELTA_SAFEMARGIN;
293 if (best->u.playouts > best2->u.playouts + estplayouts) {
294 if (UDEBUGL(2))
295 fprintf(stderr, "Early stop, result cannot change: "
296 "best %d, best2 %d, estimated %f simulations to go (%d/%f=%f pps)\n",
297 best->u.playouts, best2->u.playouts, estplayouts, played, elapsed, pps);
298 return true;
302 /* Early break in won situation. */
303 if (best->u.playouts >= PLAYOUT_EARLY_BREAK_MIN
304 && tree_node_get_value(t, 1, best->u.value) >= u->sure_win_threshold) {
305 return true;
308 return false;
311 /* Determine whether we should terminate the search later than expected. */
312 static bool
313 uct_search_keep_looking(struct uct *u, struct tree *t, struct board *b,
314 struct time_info *ti, struct time_stop *stop,
315 struct tree_node *best, struct tree_node *best2,
316 struct tree_node *bestr, struct tree_node *winner, int i)
318 if (!best) {
319 if (UDEBUGL(2))
320 fprintf(stderr, "Did not find best move, still trying...\n");
321 return true;
324 /* Do not waste time if we are winning. Spend up to worst time if
325 * we are unsure, but only desired time if we are sure of winning. */
326 float beta = 2 * (tree_node_get_value(t, 1, best->u.value) - 0.5);
327 if (ti->dim == TD_WALLTIME && beta > 0) {
328 double good_enough = stop->desired.time * beta + stop->worst.time * (1 - beta);
329 double elapsed = time_now() - ti->len.t.timer_start;
330 if (elapsed > good_enough) return false;
333 if (u->best2_ratio > 0) {
334 /* Check best/best2 simulations ratio. If the
335 * two best moves give very similar results,
336 * keep simulating. */
337 if (best2 && best2->u.playouts
338 && (double)best->u.playouts / best2->u.playouts < u->best2_ratio) {
339 if (UDEBUGL(2))
340 fprintf(stderr, "Best2 ratio %f < threshold %f\n",
341 (double)best->u.playouts / best2->u.playouts,
342 u->best2_ratio);
343 return true;
347 if (u->bestr_ratio > 0) {
348 /* Check best, best_best value difference. If the best move
349 * and its best child do not give similar enough results,
350 * keep simulating. */
351 if (bestr && bestr->u.playouts
352 && fabs((double)best->u.value - bestr->u.value) > u->bestr_ratio) {
353 if (UDEBUGL(2))
354 fprintf(stderr, "Bestr delta %f > threshold %f\n",
355 fabs((double)best->u.value - bestr->u.value),
356 u->bestr_ratio);
357 return true;
361 if (winner && winner != best) {
362 /* Keep simulating if best explored
363 * does not have also highest value. */
364 if (UDEBUGL(2))
365 fprintf(stderr, "[%d] best %3s [%d] %f != winner %3s [%d] %f\n", i,
366 coord2sstr(best->coord, t->board),
367 best->u.playouts, tree_node_get_value(t, 1, best->u.value),
368 coord2sstr(winner->coord, t->board),
369 winner->u.playouts, tree_node_get_value(t, 1, winner->u.value));
370 return true;
373 /* No reason to keep simulating, bye. */
374 return false;
377 bool
378 uct_search_check_stop(struct uct *u, struct board *b, enum stone color,
379 struct tree *t, struct time_info *ti,
380 struct uct_search_state *s, int i)
382 struct uct_thread_ctx *ctx = s->ctx;
384 /* Never consider stopping if we played too few simulations.
385 * Maybe we risk losing on time when playing in super-extreme
386 * time pressure but the tree is going to be just too messed
387 * up otherwise - we might even play invalid suicides or pass
388 * when we mustn't. */
389 assert(!(ti->dim == TD_GAMES && ti->len.games < GJ_MINGAMES));
390 if (i < GJ_MINGAMES)
391 return false;
393 struct tree_node *best = NULL;
394 struct tree_node *best2 = NULL; // Second-best move.
395 struct tree_node *bestr = NULL; // best's best child.
396 struct tree_node *winner = NULL;
398 best = u->policy->choose(u->policy, ctx->t->root, b, color, resign);
399 if (best) best2 = u->policy->choose(u->policy, ctx->t->root, b, color, best->coord);
401 /* Possibly stop search early if it's no use to try on. */
402 int played = u->played_all + i - s->base_playouts;
403 if (best && uct_search_stop_early(u, ctx->t, b, ti, &s->stop, best, best2, played, s->fullmem))
404 return true;
406 /* Check against time settings. */
407 bool desired_done;
408 if (ti->dim == TD_WALLTIME) {
409 double elapsed = time_now() - ti->len.t.timer_start;
410 if (elapsed > s->stop.worst.time) return true;
411 desired_done = elapsed > s->stop.desired.time;
413 } else { assert(ti->dim == TD_GAMES);
414 if (i > s->stop.worst.playouts) return true;
415 desired_done = i > s->stop.desired.playouts;
418 /* We want to stop simulating, but are willing to keep trying
419 * if we aren't completely sure about the winner yet. */
420 if (desired_done) {
421 if (u->policy->winner && u->policy->evaluate) {
422 struct uct_descent descent = { .node = ctx->t->root };
423 u->policy->winner(u->policy, ctx->t, &descent);
424 winner = descent.node;
426 if (best)
427 bestr = u->policy->choose(u->policy, best, b, stone_other(color), resign);
428 if (!uct_search_keep_looking(u, ctx->t, b, ti, &s->stop, best, best2, bestr, winner, i))
429 return true;
432 /* TODO: Early break if best->variance goes under threshold
433 * and we already have enough playouts (possibly thanks to tbook
434 * or to pondering)? */
435 return false;
439 struct tree_node *
440 uct_search_result(struct uct *u, struct board *b, enum stone color,
441 bool pass_all_alive, int played_games, int base_playouts,
442 coord_t *best_coord)
444 /* Choose the best move from the tree. */
445 struct tree_node *best = u->policy->choose(u->policy, u->t->root, b, color, resign);
446 if (!best) {
447 *best_coord = pass;
448 return NULL;
450 *best_coord = best->coord;
451 if (UDEBUGL(1))
452 fprintf(stderr, "*** WINNER is %s (%d,%d) with score %1.4f (%d/%d:%d/%d games), extra komi %f\n",
453 coord2sstr(best->coord, b), coord_x(best->coord, b), coord_y(best->coord, b),
454 tree_node_get_value(u->t, 1, best->u.value), best->u.playouts,
455 u->t->root->u.playouts, u->t->root->u.playouts - base_playouts, played_games,
456 u->t->extra_komi);
458 /* Do not resign if we're so short of time that evaluation of best
459 * move is completely unreliable, we might be winning actually.
460 * In this case best is almost random but still better than resign.
461 * Also do not resign if we are getting bad results while actually
462 * giving away extra komi points (dynkomi). */
463 if (tree_node_get_value(u->t, 1, best->u.value) < u->resign_threshold
464 && !is_pass(best->coord) && best->u.playouts > GJ_MINGAMES
465 && (!u->t->use_extra_komi || komi_by_color(u->t->extra_komi, color) < 0.5)) {
466 *best_coord = resign;
467 return NULL;
470 /* If the opponent just passed and we win counting, always
471 * pass as well. */
472 if (b->moves > 1 && is_pass(b->last_move.coord)) {
473 /* Make sure enough playouts are simulated. */
474 while (u->ownermap.playouts < GJ_MINGAMES)
475 uct_playout(u, b, color, u->t);
476 if (uct_pass_is_safe(u, b, color, u->pass_all_alive || pass_all_alive)) {
477 if (UDEBUGL(0))
478 fprintf(stderr, "<Will rather pass, looks safe enough; score %f>\n",
479 board_official_score(b, NULL) / 2);
480 *best_coord = pass;
481 best = u->t->root->children; // pass is the first child
482 assert(is_pass(best->coord));
483 return best;
487 return best;