1 /* This is a master for the "distributed" engine. It receives connections
2 * from slave machines, sends them gtp commands, then aggregates the
3 * results. It can also act as a proxy for the logs of all slave machines.
4 * The slave machines must run with engine "uct" (not "distributed").
5 * The master sends pachi-genmoves gtp commands regularly to each slave,
6 * gets as replies a list of nodes, their number of playouts
7 * and their value. The master then picks the most popular move
8 * among the top level nodes. */
10 /* With time control, the master waits for all slaves, except
11 * when the allowed time is already passed. In this case the
12 * master picks among the available replies, or waits for just
13 * one reply if there is none yet.
14 * Without time control, the master waits until the desired
15 * number of games have been simulated. In this case the -t
16 * parameter for the master should be the sum of the parameters
19 /* The master sends updated statistics for the best nodes in each
20 * genmoves command. They are incremental updates from all other
21 * slaves (so they exclude contributions from the target slave).
22 * The slaves reply with just their own stats. So both master and
23 * slave remember what was previously sent. A slave remembers in
24 * the tree ("pu" field), which is stable across moves. The slave
25 * also has a temporary hash table to map received coord paths
26 * to tree nodes; the hash table is cleared at each new move.
27 * The master remembers stats in a queue of received buffers that
28 * are merged together, plus one hash table per slave. The master
29 * queue and the hash tables are cleared at each new move. */
31 /* To allow the master to select the best move, slaves also send
32 * absolute playout counts for the best top level nodes (children
33 * of the root node), including contributions from other slaves.
34 * The master sums these counts and picks the best sum, which is
35 * equivalent to picking the best average. (The master cannot
36 * use the incremental stats sent in binary form because they
37 * are not maintained across moves, so playouts from previous
38 * moves would be lost.) */
40 /* The master-slave protocol has fault tolerance. If a slave is
41 * out of sync, the master sends it the appropriate command history. */
43 /* Pass me arguments like a=b,c=d,...
44 * Supported arguments:
45 * slave_port=SLAVE_PORT slaves connect to this port; this parameter is mandatory.
46 * max_slaves=MAX_SLAVES default 24
47 * shared_nodes=SHARED_NODES default 10K
48 * stats_hbits=STATS_HBITS default 21. 2^stats_bits = hash table size
49 * slaves_quit=0|1 quit gtp command also sent to slaves, default false.
50 * proxy_port=PROXY_PORT slaves optionally send their logs to this port.
51 * Warning: with proxy_port, the master stderr mixes the logs of all
52 * machines but you can separate them again:
53 * slave logs: sed -n '/< .*:/s/.*< /< /p' logfile
54 * master logs: perl -0777 -pe 's/<[ <].*:.*\n//g' logfile
57 /* A configuration without proxy would have one master run on masterhost as:
58 * pachi -e distributed slave_port=1234
59 * and N slaves running as:
60 * pachi -e uct -g masterhost:1234 slave
62 * pachi -e distributed slave_port=1234,proxy_port=1235
63 * pachi -e uct -g masterhost:1234 -l masterhost:1235 slave
64 * If the master itself runs on a machine other than that running gogui,
65 * gogui-twogtp, kgsGtp or cgosGtp, it can redirect its gtp port:
66 * pachi -e distributed -g 10000 slave_port=1234,proxy_port=1235
75 #include <sys/types.h>
86 #include "distributed/distributed.h"
87 #include "distributed/merge.h"
89 /* Internal engine state. */
97 struct move my_last_move
;
98 struct move_stats my_last_stats
;
101 /* Default number of simulations to perform per move.
102 * Note that this is in total over all slaves! */
103 #define DIST_GAMES 80000
104 static const struct time_info default_ti
= {
107 .len
= { .games
= DIST_GAMES
},
110 #define get_value(value, color) \
111 ((color) == S_BLACK ? (value) : 1 - (value))
114 /* Maximum time (seconds) to wait for answers to fast gtp commands
115 * (all commands except pachi-genmoves and final_status_list). */
116 #define MAX_FAST_CMD_WAIT 0.5
118 /* Maximum time (seconds) to wait for answers to genmoves. */
119 #define MAX_GENMOVES_WAIT 0.1 /* 100 ms */
121 /* Minimum time (seconds) to wait before we stop early. This should
122 * ensure that most slaves have replied at least once. */
123 #define MIN_EARLY_STOP_WAIT 0.3 /* 300 ms */
125 /* Display a path as leaf<parent<grandparent...
126 * Returns the path string in a static buffer; it is NOT safe for
127 * anything but debugging - in particular, it is NOT thread-safe! */
129 path2sstr(path_t path
, struct board
*b
)
131 /* Special case for pass and resign. */
132 if (path
< 0) return coord2sstr((coord_t
)path
, b
);
134 static char buf
[16][64];
142 while ((leaf
= leaf_coord(path
, b
)) != 0) {
143 s
+= snprintf(s
, end
- s
, "%s<", coord2sstr(leaf
, b
));
144 path
= parent_path(path
, b
);
146 if (s
!= b2
) s
[-1] = '\0';
150 /* Dispatch a new gtp command to all slaves.
151 * The slave lock must not be held upon entry and is released upon return.
152 * args is empty or ends with '\n' */
153 static enum parse_code
154 distributed_notify(struct engine
*e
, struct board
*b
, int id
, char *cmd
, char *args
, char **reply
)
156 struct distributed
*dist
= e
->data
;
158 /* Commands that should not be sent to slaves.
159 * time_left will be part of next pachi-genmoves,
160 * we reduce latency by not forwarding it here. */
161 if ((!strcasecmp(cmd
, "quit") && !dist
->slaves_quit
)
162 || !strcasecmp(cmd
, "pachi-gentbook")
163 || !strcasecmp(cmd
, "pachi-dumptbook")
164 || !strcasecmp(cmd
, "kgs-chat")
165 || !strcasecmp(cmd
, "time_left")
167 /* and commands that will be sent to slaves later */
168 || !strcasecmp(cmd
, "genmove")
169 || !strcasecmp(cmd
, "kgs-genmove_cleanup")
170 || !strcasecmp(cmd
, "final_score")
171 || !strcasecmp(cmd
, "final_status_list"))
176 // Create a new command to be sent by the slave threads.
177 new_cmd(b
, cmd
, args
);
179 /* Wait for replies here. If we don't wait, we run the
180 * risk of getting out of sync with most slaves and
181 * sending command history too frequently. But don't wait
182 * for all slaves otherwise we can lose on time because of
183 * a single slow slave when replaying a whole game. */
184 int min_slaves
= active_slaves
> 1 ? 3 * active_slaves
/ 4 : 1;
185 get_replies(time_now() + MAX_FAST_CMD_WAIT
, min_slaves
);
189 // At the beginning wait even more for late slaves.
190 if (b
->moves
== 0) sleep(1);
194 /* The playouts sent by slaves for the children of the root node
195 * include contributions from other slaves. To avoid 32-bit overflow on
196 * large configurations with many slaves we must average the playouts. */
198 long playouts
; // # of playouts
199 floating_t value
; // BLACK wins/playouts
203 large_stats_add_result(struct large_stats
*s
, floating_t result
, long playouts
)
205 s
->playouts
+= playouts
;
206 s
->value
+= (result
- s
->value
) * playouts
/ s
->playouts
;
209 /* genmoves returns "=id played_own total_playouts threads keep_looking @size"
210 * then a list of lines "coord playouts value" with absolute counts for
211 * children of the root node, then a binary array of incr_stats structs.
212 * To simplify the code, we assume that master and slave have the same architecture
213 * (store values identically).
214 * Return the move with most playouts, and additional stats.
215 * keep_looking is set from a majority vote of the slaves seen so far for this
216 * move but should not be trusted if too few slaves have been seen.
217 * Keep this code in sync with uct/slave.c:report_stats().
218 * slave_lock is held on entry and on return. */
220 select_best_move(struct board
*b
, struct large_stats
*stats
, int *played
,
221 int *total_playouts
, int *total_threads
, bool *keep_looking
)
223 assert(reply_count
> 0);
225 /* +2 for pass and resign */
226 memset(stats
-2, 0, (board_size2(b
)+2) * sizeof(*stats
));
228 coord_t best_move
= pass
;
229 long best_playouts
= -1;
235 for (int reply
= 0; reply
< reply_count
; reply
++) {
236 char *r
= gtp_replies
[reply
];
238 if (sscanf(r
, "=%d %d %d %d %d", &id
, &o
, &p
, &t
, &k
) != 5) continue;
240 *total_playouts
+= p
;
243 // Skip the rest of the firt line in particular @size
248 while (r
&& sscanf(++r
, "%63s %d " PRIfloating
, move
, &s
.playouts
, &s
.value
) == 3) {
249 coord_t c
= str2scoord(move
, board_size(b
));
250 assert (c
>= resign
&& c
< board_size2(b
) && s
.playouts
>= 0);
252 large_stats_add_result(&stats
[c
], s
.value
, (long)s
.playouts
);
254 if (stats
[c
].playouts
> best_playouts
) {
255 best_playouts
= stats
[c
].playouts
;
261 for (coord_t c
= resign
; c
< board_size2(b
); c
++)
262 stats
[c
].playouts
/= reply_count
;
263 *keep_looking
= keep
> reply_count
/ 2;
267 /* Set the args for the genmoves command. If binary_args is set,
268 * each slave thred will add the correct binary size when sending
269 * (see get_binary_arg()). args must have CMDS_SIZE bytes and
270 * upon return ends with a single \n.
271 * Keep this code in sync with uct/slave.c:uct_genmoves().
272 * slave_lock is held on entry and on return but we don't
273 * rely on the lock here. */
275 genmoves_args(char *args
, enum stone color
, int played
,
276 struct time_info
*ti
, bool binary_args
)
278 char *end
= args
+ CMDS_SIZE
;
279 char *s
= args
+ snprintf(args
, CMDS_SIZE
, "%s %d", stone2str(color
), played
);
281 if (ti
->dim
== TD_WALLTIME
) {
282 s
+= snprintf(s
, end
- s
, " %.3f %.3f %d %d",
283 ti
->len
.t
.main_time
, ti
->len
.t
.byoyomi_time
,
284 ti
->len
.t
.byoyomi_periods
, ti
->len
.t
.byoyomi_stones
);
286 s
+= snprintf(s
, end
- s
, binary_args
? " @0\n" : "\n");
289 /* Time control is mostly done by the slaves, so we use default values here. */
290 #define FUSEKI_END 20
291 #define YOSE_START 40
292 #define MAX_MAINTIME_RATIO 3.0
294 /* Regularly send genmoves command to the slaves, and select the best move. */
296 distributed_genmove(struct engine
*e
, struct board
*b
, struct time_info
*ti
,
297 enum stone color
, bool pass_all_alive
)
299 struct distributed
*dist
= e
->data
;
300 double now
= time_now();
302 char buf
[BSIZE
]; // debug only
304 char *cmd
= pass_all_alive
? "pachi-genmoves_cleanup" : "pachi-genmoves";
305 char args
[CMDS_SIZE
];
308 int played
, playouts
, threads
;
310 if (ti
->period
== TT_NULL
) *ti
= default_ti
;
311 struct time_stop stop
;
312 time_stop_conditions(ti
, b
, FUSEKI_END
, YOSE_START
, MAX_MAINTIME_RATIO
, &stop
);
313 struct time_info saved_ti
= *ti
;
315 /* Combined move stats from all slaves, only for children
316 * of the root node, plus 2 for pass and resign. */
317 struct large_stats stats_array
[board_size2(b
) + 2], *stats
;
318 stats
= &stats_array
[2];
321 clear_receive_queue();
323 /* Send the first genmoves without stats. */
324 genmoves_args(args
, color
, 0, ti
, false);
325 new_cmd(b
, cmd
, args
);
327 /* Loop until most slaves want to quit or time elapsed. */
329 for (iterations
= 1; ; iterations
++) {
331 /* Wait for just one slave to get stats as fresh as possible,
332 * or at most 100ms to check if we run out of time. */
333 get_replies(now
+ MAX_GENMOVES_WAIT
, 1);
335 if (ti
->dim
== TD_WALLTIME
)
336 time_sub(ti
, now
- start
, false);
339 best
= select_best_move(b
, stats
, &played
, &playouts
, &threads
, &keep_looking
);
341 if (ti
->dim
== TD_WALLTIME
) {
342 if (now
- ti
->len
.t
.timer_start
>= stop
.worst
.time
) break;
343 if (!keep_looking
&& now
- first
>= MIN_EARLY_STOP_WAIT
) break;
345 if (!keep_looking
|| played
>= stop
.worst
.playouts
) break;
348 char *coord
= coord2sstr(best
, b
);
349 snprintf(buf
, sizeof(buf
),
350 "temp winner is %s %s with score %1.4f (%d/%d games)"
351 " %d slaves %d threads\n",
352 stone2str(color
), coord
, get_value(stats
[best
].value
, color
),
353 (int)stats
[best
].playouts
, playouts
, reply_count
, threads
);
354 logline(NULL
, "* ", buf
);
356 /* Send the command with the same gtp id, to avoid discarding
357 * a reply to a previous genmoves at the same move. */
358 genmoves_args(args
, color
, played
, ti
, true);
359 update_cmd(b
, cmd
, args
, false);
361 int replies
= reply_count
;
363 /* Do not subtract time spent twice (see gtp_parse). */
366 dist
->my_last_move
.color
= color
;
367 dist
->my_last_move
.coord
= best
;
368 dist
->my_last_stats
.value
= stats
[best
].value
;
369 dist
->my_last_stats
.playouts
= (int)stats
[best
].playouts
;
371 /* Tell the slaves to commit to the selected move, overwriting
372 * the last "pachi-genmoves" in the command history. */
373 clear_receive_queue();
375 char *coord
= coord2bstr(coordbuf
, best
, b
);
376 snprintf(args
, sizeof(args
), "%s %s\n", stone2str(color
), coord
);
377 update_cmd(b
, "play", args
, true);
381 double time
= now
- first
+ 0.000001; /* avoid divide by zero */
382 snprintf(buf
, sizeof(buf
),
383 "GLOBAL WINNER is %s %s with score %1.4f (%d/%d games)\n"
384 "genmove %d games in %0.2fs %d slaves %d threads (%d games/s,"
385 " %d games/s/slave, %d games/s/thread, %.3f ms/iter)\n",
386 stone2str(color
), coord
, get_value(stats
[best
].value
, color
),
387 (int)stats
[best
].playouts
, playouts
, played
, time
, replies
, threads
,
388 (int)(played
/time
), (int)(played
/time
/replies
),
389 (int)(played
/time
/threads
), 1000*time
/iterations
);
390 logline(NULL
, "* ", buf
);
393 int total_hnodes
= replies
* (1 << dist
->stats_hbits
);
394 merge_print_stats(total_hnodes
);
396 return coord_copy(best
);
400 distributed_chat(struct engine
*e
, struct board
*b
, char *cmd
)
402 struct distributed
*dist
= e
->data
;
403 static char reply
[BSIZE
];
405 cmd
+= strspn(cmd
, " \n\t");
406 if (!strncasecmp(cmd
, "winrate", 7)) {
407 enum stone color
= dist
->my_last_move
.color
;
408 snprintf(reply
, BSIZE
, "In %d playouts at %d machines, %s %s can win with %.2f%% probability.",
409 dist
->my_last_stats
.playouts
, active_slaves
, stone2str(color
),
410 coord2sstr(dist
->my_last_move
.coord
, b
),
411 100 * get_value(dist
->my_last_stats
.value
, color
));
418 scmp(const void *p1
, const void *p2
)
420 return strcasecmp(*(char * const *)p1
, *(char * const *)p2
);
424 distributed_dead_group_list(struct engine
*e
, struct board
*b
, struct move_queue
*mq
)
428 new_cmd(b
, "final_status_list", "dead\n");
429 get_replies(time_now() + MAX_FAST_CMD_WAIT
, active_slaves
);
431 /* Find the most popular reply. */
432 qsort(gtp_replies
, reply_count
, sizeof(char *), scmp
);
436 for (int reply
= 1; reply
< reply_count
; reply
++) {
437 if (!strcmp(gtp_replies
[reply
], gtp_replies
[reply
-1])) {
442 if (count
> best_count
) {
448 /* Pick the first move of each line as group. */
449 char *dead
= gtp_replies
[best_reply
];
450 dead
= strchr(dead
, ' '); // skip "id "
451 while (dead
&& *++dead
!= '\n') {
452 mq_add(mq
, str2scoord(dead
, board_size(b
)), 0);
453 dead
= strchr(dead
, '\n');
458 static struct distributed
*
459 distributed_state_init(char *arg
, struct board
*b
)
461 struct distributed
*dist
= calloc2(1, sizeof(struct distributed
));
463 dist
->stats_hbits
= DEFAULT_STATS_HBITS
;
464 dist
->max_slaves
= DEFAULT_MAX_SLAVES
;
465 dist
->shared_nodes
= DEFAULT_SHARED_NODES
;
467 char *optspec
, *next
= arg
;
470 next
+= strcspn(next
, ",");
471 if (*next
) { *next
++ = 0; } else { *next
= 0; }
473 char *optname
= optspec
;
474 char *optval
= strchr(optspec
, '=');
475 if (optval
) *optval
++ = 0;
477 if (!strcasecmp(optname
, "slave_port") && optval
) {
478 dist
->slave_port
= strdup(optval
);
479 } else if (!strcasecmp(optname
, "proxy_port") && optval
) {
480 dist
->proxy_port
= strdup(optval
);
481 } else if (!strcasecmp(optname
, "max_slaves") && optval
) {
482 dist
->max_slaves
= atoi(optval
);
483 } else if (!strcasecmp(optname
, "shared_nodes") && optval
) {
484 /* Share at most shared_nodes between master and slave at each genmoves.
485 * Must use the same value in master and slaves. */
486 dist
->shared_nodes
= atoi(optval
);
487 } else if (!strcasecmp(optname
, "stats_hbits") && optval
) {
488 /* Set hash table size to 2^stats_hbits for the shared stats. */
489 dist
->stats_hbits
= atoi(optval
);
490 } else if (!strcasecmp(optname
, "slaves_quit")) {
491 dist
->slaves_quit
= !optval
|| atoi(optval
);
493 fprintf(stderr
, "distributed: Invalid engine argument %s or missing value\n", optname
);
498 gtp_replies
= calloc2(dist
->max_slaves
, sizeof(char *));
500 if (!dist
->slave_port
) {
501 fprintf(stderr
, "distributed: missing slave_port\n");
505 merge_init(&default_sstate
, dist
->shared_nodes
, dist
->stats_hbits
, dist
->max_slaves
);
506 protocol_init(dist
->slave_port
, dist
->proxy_port
, dist
->max_slaves
);
512 engine_distributed_init(char *arg
, struct board
*b
)
514 struct distributed
*dist
= distributed_state_init(arg
, b
);
515 struct engine
*e
= calloc2(1, sizeof(struct engine
));
516 e
->name
= "Distributed";
517 e
->comment
= "If you believe you have won but I am still playing, "
518 "please help me understand by capturing all dead stones. "
519 "Anyone can send me 'winrate' in private chat to get my assessment of the position.";
520 e
->notify
= distributed_notify
;
521 e
->genmove
= distributed_genmove
;
522 e
->dead_group_list
= distributed_dead_group_list
;
523 e
->chat
= distributed_chat
;
525 // Keep the threads and the open socket connections:
526 e
->keep_on_clear
= true;