Support absolute number of games for distributed engine.
[pachi/derm.git] / distributed / distributed.c
blobae2d7a98e9117e82ffba00807f7ecf76e0253cf9
1 /* This is a master for the "distributed" engine. It receives connections
2 * from slave machines, sends them gtp commands, then aggregates the
3 * results. It can also act as a proxy for the logs of all slave machines.
4 * The slave machines must run with engine "uct" (not "distributed").
5 * The master sends the pachi-genmoves gtp command to each slave,
6 * gets as replies a list of candidate moves, their number of playouts
7 * and their value. The master then picks the most popular move. */
9 /* With time control, the master waits for all slaves, except
10 * when the allowed time is already passed. In this case the
11 * master picks among the available replies, or waits for just
12 * one reply if there is none yet.
13 * Without time control, the master waits until the desired
14 * number of games have been simulated. In this case the -t
15 * parameter for the master should be the sum of the parameters
16 * for all slaves. */
18 /* This first version does not send tree updates between slaves,
19 * but it has fault tolerance. If a slave is out of sync, the master
20 * sends it the whole command history. */
22 /* Pass me arguments like a=b,c=d,...
23 * Supported arguments:
24 * slave_port=SLAVE_PORT slaves connect to this port; this parameter is mandatory.
25 * max_slaves=MAX_SLAVES default 100
26 * slaves_quit=0|1 quit gtp command also sent to slaves, default false.
27 * proxy_port=PROXY_PORT slaves optionally send their logs to this port.
28 * Warning: with proxy_port, the master stderr mixes the logs of all
29 * machines but you can separate them again:
30 * slave logs: sed -n '/< .*:/s/.*< /< /p' logfile
31 * master logs: perl -0777 -pe 's/<[ <].*:.*\n//g' logfile
34 /* A configuration without proxy would have one master run on masterhost as:
35 * zzgo -e distributed slave_port=1234
36 * and N slaves running as:
37 * zzgo -e uct -g masterhost:1234 slave
38 * With log proxy:
39 * zzgo -e distributed slave_port=1234,proxy_port=1235
40 * zzgo -e uct -g masterhost:1234 -l masterhost:1235 slave
41 * If the master itself runs on a machine other than that running gogui,
42 * gogui-twogtp, kgsGtp or cgosGtp, it can redirect its gtp port:
43 * zzgo -e distributed -g 10000 slave_port=1234,proxy_port=1235
46 #include <assert.h>
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <string.h>
50 #include <pthread.h>
51 #include <limits.h>
52 #include <ctype.h>
53 #include <time.h>
54 #include <alloca.h>
55 #include <sys/types.h>
56 #include <sys/socket.h>
57 #include <arpa/inet.h>
59 #define DEBUG
61 #include "board.h"
62 #include "engine.h"
63 #include "move.h"
64 #include "timeinfo.h"
65 #include "network.h"
66 #include "playout.h"
67 #include "random.h"
68 #include "stats.h"
69 #include "mq.h"
70 #include "debug.h"
71 #include "distributed/distributed.h"
73 /* Internal engine state. */
74 struct distributed {
75 char *slave_port;
76 char *proxy_port;
77 int max_slaves;
78 bool slaves_quit;
79 struct move my_last_move;
80 struct move_stats my_last_stats;
83 static coord_t select_best_move(struct board *b, struct move_stats *best_stats,
84 int *total_playouts, int *total_threads);
86 /* Default number of simulations to perform per move.
87 * Note that this is in total over all slaves! */
88 #define DIST_GAMES 80000
89 static const struct time_info default_ti = {
90 .period = TT_MOVE,
91 .dim = TD_GAMES,
92 .len = { .games = DIST_GAMES },
95 #define get_value(value, color) \
96 ((color) == S_BLACK ? (value) : 1 - (value))
98 /* Max size for one reply or slave log. */
99 #define BSIZE 4096
101 /* Max size of all gtp commands for one game */
102 #define CMDS_SIZE (40*MAX_GAMELEN)
104 /* All gtp commands for current game separated by \n */
105 char gtp_cmds[CMDS_SIZE];
107 /* Latest gtp command sent to slaves. */
108 char *gtp_cmd = NULL;
110 /* Number of active slave machines working for this master. */
111 int active_slaves = 0;
113 /* Number of replies to last gtp command already received. */
114 int reply_count = 0;
116 /* All replies to latest gtp command are in gtp_replies[0..reply_count-1]. */
117 char **gtp_replies;
119 /* Mutex protecting gtp_cmds, gtp_cmd, active_slaves, reply_count & gtp_replies */
120 pthread_mutex_t slave_lock = PTHREAD_MUTEX_INITIALIZER;
122 /* Condition signaled when a new gtp command is available. */
123 static pthread_cond_t cmd_cond = PTHREAD_COND_INITIALIZER;
125 /* Condition signaled when reply_count increases. */
126 static pthread_cond_t reply_cond = PTHREAD_COND_INITIALIZER;
128 /* Mutex protecting stderr. Must not be held at same time as slave_lock. */
129 pthread_mutex_t log_lock = PTHREAD_MUTEX_INITIALIZER;
131 /* Absolute time when this program was started.
132 * For debugging only. */
133 double start_time;
135 /* Write the time, client address, prefix, and string s to stderr atomically.
136 * s should end with a \n */
137 static void
138 logline(struct in_addr *client, char *prefix, char *s)
140 double now = time_now();
141 char addr[INET_ADDRSTRLEN];
142 if (client) {
143 inet_ntop(AF_INET, client, addr, sizeof(addr));
144 } else {
145 addr[0] = '\0';
147 pthread_mutex_lock(&log_lock);
148 fprintf(stderr, "%s%15s %9.3f: %s", prefix, addr, now - start_time, s);
149 pthread_mutex_unlock(&log_lock);
152 /* Thread opening a connection on the given socket and copying input
153 * from there to stderr. */
154 static void *
155 proxy_thread(void *arg)
157 int proxy_sock = (long)arg;
158 assert(proxy_sock >= 0);
159 for (;;) {
160 struct in_addr client;
161 int conn = open_server_connection(proxy_sock, &client);
162 FILE *f = fdopen(conn, "r");
163 char buf[BSIZE];
164 while (fgets(buf, BSIZE, f)) {
165 logline(&client, "< ", buf);
167 fclose(f);
171 /* Main loop of a slave thread.
172 * Send the current command to the slave machine and wait for a reply.
173 * Resend the whole command history if the slave machine is out of sync.
174 * Returns when the connection with the slave machine is cut.
175 * slave_lock is held on both entry and exit of this function. */
176 static void
177 slave_loop(FILE *f, struct in_addr client, char *buf, bool resend)
179 char *to_send = gtp_cmd;
180 int cmd_id = -1;
181 int reply_id = -1;
182 for (;;) {
183 while (cmd_id == reply_id && !resend) {
184 // Wait for a new gtp command.
185 pthread_cond_wait(&cmd_cond, &slave_lock);
186 if (gtp_cmd)
187 cmd_id = atoi(gtp_cmd);
188 to_send = gtp_cmd;
191 /* Command available, send it to slave machine.
192 * If slave was out of sync, send all the history. */
193 assert(to_send && gtp_cmd);
194 strncpy(buf, to_send, CMDS_SIZE);
195 cmd_id = atoi(gtp_cmd);
197 pthread_mutex_unlock(&slave_lock);
198 if (DEBUGL(2))
199 logline(&client, ">>", buf);
200 fputs(buf, f);
201 fflush(f);
203 /* Read the reply, which always ends with \n\n
204 * The slave machine sends "=id reply" or "?id reply"
205 * with id == cmd_id if it is in sync. */
206 *buf = '\0';
207 reply_id = -1;
208 char *line = buf;
209 while (fgets(line, buf + CMDS_SIZE - line, f) && *line != '\n') {
210 if (DEBUGL(2))
211 logline(&client, "<<", line);
212 if (reply_id < 0 && (*line == '=' || *line == '?') && isdigit(line[1]))
213 reply_id = atoi(line+1);
214 line += strlen(line);
217 pthread_mutex_lock(&slave_lock);
218 if (*line != '\n') return;
219 // Make sure we are still in sync:
220 cmd_id = atoi(gtp_cmd);
221 if (reply_id == cmd_id && *buf == '=') {
222 resend = false;
223 gtp_replies[reply_count++] = buf;
224 pthread_cond_signal(&reply_cond);
225 } else {
226 /* The slave was out of sync or had an incorrect board.
227 * Send the whole command history without wait.
228 * The slave will send a single reply with the
229 * id of the last command. */
230 to_send = gtp_cmds;
231 resend = true;
232 if (DEBUGL(1))
233 logline(&client, "? ", "Resending all history\n");
238 /* Thread sending gtp commands to one slave machine, and
239 * reading replies. If a slave machine dies, this thread waits
240 * for a connection from another slave. */
241 static void *
242 slave_thread(void *arg)
244 int slave_sock = (long)arg;
245 assert(slave_sock >= 0);
246 char slave_buf[CMDS_SIZE];
247 bool resend = false;
249 for (;;) {
250 /* Wait for a connection from any slave. */
251 struct in_addr client;
252 int conn = open_server_connection(slave_sock, &client);
254 FILE *f = fdopen(conn, "r+");
255 if (DEBUGL(2))
256 logline(&client, "= ", "new slave\n");
258 /* Minimal check of the slave identity. */
259 fputs("name\n", f);
260 if (!fgets(slave_buf, sizeof(slave_buf), f)
261 || strncasecmp(slave_buf, "= Pachi", 7)
262 || !fgets(slave_buf, sizeof(slave_buf), f)
263 || strcmp(slave_buf, "\n")) {
264 logline(&client, "? ", "bad slave\n");
265 fclose(f);
266 continue;
269 pthread_mutex_lock(&slave_lock);
270 active_slaves++;
271 slave_loop(f, client, slave_buf, resend);
273 assert(active_slaves > 0);
274 active_slaves--;
275 pthread_mutex_unlock(&slave_lock);
277 resend = true;
278 if (DEBUGL(2))
279 logline(&client, "= ", "lost slave\n");
280 fclose(f);
284 /* Create a new gtp command for all slaves. The slave lock is held
285 * upon entry and upon return, so the command will actually be
286 * sent when the lock is released. The last command is overwritten
287 * if gtp_cmd points to a non-empty string. cmd is a single word;
288 * args has all arguments and is empty or has a trailing \n */
289 static void
290 update_cmd(struct board *b, char *cmd, char *args)
292 assert(gtp_cmd);
293 /* To make sure the slaves are in sync, we ignore the original id
294 * and use the board number plus some random bits as gtp id.
295 * Make sure the new command has a new id otherwise slaves
296 * won't send it. */
297 static int gtp_id = -1;
298 int id;
299 int moves = is_reset(cmd) ? 0 : b->moves;
300 do {
301 /* fast_random() is 16-bit only so the multiplication can't overflow. */
302 id = force_reply(moves + fast_random(65535) * DIST_GAMELEN);
303 } while (id == gtp_id);
304 gtp_id = id;
305 snprintf(gtp_cmd, gtp_cmds + CMDS_SIZE - gtp_cmd, "%d %s %s",
306 id, cmd, *args ? args : "\n");
307 reply_count = 0;
310 /* If time_limit > 0, wait until all slaves have replied, or if the
311 * given absolute time is passed, wait for at least one reply.
312 * If time_limit == 0, wait until we get at least min_playouts games
313 * simulated in total by all the slaves, or until all slaves have replied.
314 * The replies are returned in gtp_replies[0..reply_count-1]
315 * slave_lock is held on entry and on return. */
316 static void
317 get_replies(double time_limit, int min_playouts, struct board *b)
319 while (reply_count == 0 || reply_count < active_slaves) {
320 if (time_limit && reply_count > 0) {
321 struct timespec ts;
322 double sec;
323 ts.tv_nsec = (int)(modf(time_limit, &sec)*1000000000.0);
324 ts.tv_sec = (int)sec;
325 pthread_cond_timedwait(&reply_cond, &slave_lock, &ts);
326 } else {
327 pthread_cond_wait(&reply_cond, &slave_lock);
329 if (reply_count == 0) continue;
330 if (reply_count >= active_slaves) break;
331 if (time_limit) {
332 if (time_now() >= time_limit) break;
333 } else {
334 int playouts, threads;
335 struct move_stats s;
336 select_best_move(b, &s, &playouts, &threads);
337 if (playouts >= min_playouts) break;
340 assert(reply_count > 0);
343 /* Maximum time (seconds) to wait for answers to fast gtp commands
344 * (all commands except pachi-genmoves and final_status_list). */
345 #define MAX_FAST_CMD_WAIT 1.0
347 /* Dispatch a new gtp command to all slaves.
348 * The slave lock must not be held upon entry and is released upon return.
349 * args is empty or ends with '\n' */
350 static enum parse_code
351 distributed_notify(struct engine *e, struct board *b, int id, char *cmd, char *args, char **reply)
353 struct distributed *dist = e->data;
355 if ((!strcasecmp(cmd, "quit") && !dist->slaves_quit)
356 || !strcasecmp(cmd, "uct_genbook")
357 || !strcasecmp(cmd, "uct_dumpbook")
358 || !strcasecmp(cmd, "kgs-chat"))
359 return P_OK;
361 pthread_mutex_lock(&slave_lock);
363 // Clear the history when a new game starts:
364 if (!gtp_cmd || is_gamestart(cmd)) {
365 gtp_cmd = gtp_cmds;
366 } else {
367 /* Preserve command history for new slaves.
368 * To indicate that the slave should only reply to
369 * the last command we force the id of previous
370 * commands to be just the move number. */
371 int id = prevent_reply(atoi(gtp_cmd));
372 int len = strspn(gtp_cmd, "0123456789");
373 char buf[32];
374 snprintf(buf, sizeof(buf), "%0*d", len, id);
375 memcpy(gtp_cmd, buf, len);
377 gtp_cmd += strlen(gtp_cmd);
380 if (!strcasecmp(cmd, "genmove")) {
381 cmd = "pachi-genmoves";
382 } else if (!strcasecmp(cmd, "kgs-genmove_cleanup")) {
383 cmd = "pachi-genmoves_cleanup";
384 } else if (!strcasecmp(cmd, "final_score")) {
385 cmd = "final_status_list";
388 // Let the slaves send the new gtp command:
389 update_cmd(b, cmd, args);
390 pthread_cond_broadcast(&cmd_cond);
392 /* Wait for replies here except for specific commands
393 * handled by the engine later. If we don't wait, we run
394 * the risk of getting out of sync with most slaves and
395 * sending complete command history too frequently. */
396 if (strcasecmp(cmd, "pachi-genmoves")
397 && strcasecmp(cmd, "pachi-genmoves_cleanup")
398 && strcasecmp(cmd, "final_status_list"))
399 get_replies(time_now() + MAX_FAST_CMD_WAIT, 0, b);
401 pthread_mutex_unlock(&slave_lock);
402 return P_OK;
405 /* pachi-genmoves returns a line "=id total_playouts threads[ reserved]" then a list of lines
406 * "coord playouts value". Keep this function in sync with uct_notify().
407 * Return the move with most playouts, its average value, and stats for debugging.
408 * slave_lock is held on entry and on return. */
409 static coord_t
410 select_best_move(struct board *b, struct move_stats *best_stats,
411 int *total_playouts, int *total_threads)
413 assert(reply_count > 0);
415 /* +2 for pass and resign. */
416 struct move_stats *stats = alloca((board_size2(b)+2) * sizeof(struct move_stats));
417 memset(stats, 0, (board_size2(b)+2) * sizeof(*stats));
418 stats += 2;
420 coord_t best_move = pass;
421 int best_playouts = -1;
422 *total_playouts = *total_threads = 0;
424 for (int reply = 0; reply < reply_count; reply++) {
425 char *r = gtp_replies[reply];
426 int id, playouts, threads;
427 if (sscanf(r, "=%d %d %d", &id, &playouts, &threads) != 3) continue;
428 *total_playouts += playouts;
429 *total_threads += threads;
430 // Skip the rest of the firt line if any (allow future extensions)
431 r = strchr(r, '\n');
433 char move[64];
434 struct move_stats s;
435 while (r && sscanf(++r, "%63s %d %f", move, &s.playouts, &s.value) == 3) {
436 coord_t *c = str2coord(move, board_size(b));
437 stats_add_result(&stats[*c], s.value, s.playouts);
438 if (stats[*c].playouts > best_playouts) {
439 best_playouts = stats[*c].playouts;
440 best_move = *c;
442 coord_done(c);
443 r = strchr(r, '\n');
446 *best_stats = stats[best_move];
447 return best_move;
450 /* Time control is mostly done by the slaves, so we use default values here. */
451 #define FUSEKI_END 20
452 #define YOSE_START 40
454 static coord_t *
455 distributed_genmove(struct engine *e, struct board *b, struct time_info *ti, enum stone color, bool pass_all_alive)
457 struct distributed *dist = e->data;
458 double start = time_now();
460 long time_limit = 0;
461 int min_playouts = 0;
463 if (ti->period == TT_NULL) *ti = default_ti;
464 struct time_stop stop;
465 time_stop_conditions(ti, b, FUSEKI_END, YOSE_START, &stop);
466 if (ti->dim == TD_WALLTIME) {
467 time_limit = ti->len.t.timer_start + stop.worst.time;
468 } else {
469 min_playouts = stop.desired.playouts;
472 pthread_mutex_lock(&slave_lock);
473 get_replies(time_limit, min_playouts, b);
474 int replies = reply_count;
476 int playouts, threads;
477 dist->my_last_move.color = color;
478 dist->my_last_move.coord = select_best_move(b, &dist->my_last_stats, &playouts, &threads);
480 /* Tell the slaves to commit to the selected move, overwriting
481 * the last "pachi-genmoves" in the command history. */
482 char args[64];
483 char *coord = coord2str(dist->my_last_move.coord, b);
484 snprintf(args, sizeof(args), "%s %s\n", stone2str(color), coord);
485 update_cmd(b, "play", args);
486 pthread_cond_broadcast(&cmd_cond);
487 pthread_mutex_unlock(&slave_lock);
489 if (DEBUGL(1)) {
490 char buf[BSIZE];
491 enum stone color = dist->my_last_move.color;
492 double time = time_now() - start + 0.000001; /* avoid divide by zero */
493 snprintf(buf, sizeof(buf),
494 "GLOBAL WINNER is %s %s with score %1.4f (%d/%d games)\n"
495 "genmove in %0.2fs (%d games/s, %d games/s/slave, %d games/s/thread)\n",
496 stone2str(color), coord, get_value(dist->my_last_stats.value, color),
497 dist->my_last_stats.playouts, playouts, time,
498 (int)(playouts/time), (int)(playouts/time/replies),
499 (int)(playouts/time/threads));
500 logline(NULL, "*** ", buf);
502 free(coord);
503 return coord_copy(dist->my_last_move.coord);
506 static char *
507 distributed_chat(struct engine *e, struct board *b, char *cmd)
509 struct distributed *dist = e->data;
510 static char reply[BSIZE];
512 cmd += strspn(cmd, " \n\t");
513 if (!strncasecmp(cmd, "winrate", 7)) {
514 enum stone color = dist->my_last_move.color;
515 snprintf(reply, BSIZE, "In %d playouts at %d machines, %s %s can win with %.2f%% probability.",
516 dist->my_last_stats.playouts, active_slaves, stone2str(color),
517 coord2sstr(dist->my_last_move.coord, b),
518 100 * get_value(dist->my_last_stats.value, color));
519 return reply;
521 return NULL;
524 static int
525 scmp(const void *p1, const void *p2)
527 return strcasecmp(*(char * const *)p1, *(char * const *)p2);
530 static void
531 distributed_dead_group_list(struct engine *e, struct board *b, struct move_queue *mq)
533 pthread_mutex_lock(&slave_lock);
534 get_replies(time_now() + MAX_FAST_CMD_WAIT, 0, b);
536 /* Find the most popular reply. */
537 qsort(gtp_replies, reply_count, sizeof(char *), scmp);
538 int best_reply = 0;
539 int best_count = 1;
540 int count = 1;
541 for (int reply = 1; reply < reply_count; reply++) {
542 if (!strcmp(gtp_replies[reply], gtp_replies[reply-1])) {
543 count++;
544 } else {
545 count = 1;
547 if (count > best_count) {
548 best_count = count;
549 best_reply = reply;
553 /* Pick the first move of each line as group. */
554 char *dead = gtp_replies[best_reply];
555 dead = strchr(dead, ' '); // skip "id "
556 while (dead && *++dead != '\n') {
557 coord_t *c = str2coord(dead, board_size(b));
558 mq_add(mq, *c);
559 coord_done(c);
560 dead = strchr(dead, '\n');
562 pthread_mutex_unlock(&slave_lock);
565 static struct distributed *
566 distributed_state_init(char *arg, struct board *b)
568 struct distributed *dist = calloc(1, sizeof(struct distributed));
570 dist->max_slaves = 100;
571 if (arg) {
572 char *optspec, *next = arg;
573 while (*next) {
574 optspec = next;
575 next += strcspn(next, ",");
576 if (*next) { *next++ = 0; } else { *next = 0; }
578 char *optname = optspec;
579 char *optval = strchr(optspec, '=');
580 if (optval) *optval++ = 0;
582 if (!strcasecmp(optname, "slave_port") && optval) {
583 dist->slave_port = strdup(optval);
584 } else if (!strcasecmp(optname, "proxy_port") && optval) {
585 dist->proxy_port = strdup(optval);
586 } else if (!strcasecmp(optname, "max_slaves") && optval) {
587 dist->max_slaves = atoi(optval);
588 } else if (!strcasecmp(optname, "slaves_quit")) {
589 dist->slaves_quit = !optval || atoi(optval);
590 } else {
591 fprintf(stderr, "distributed: Invalid engine argument %s or missing value\n", optname);
596 gtp_replies = calloc(dist->max_slaves, sizeof(char *));
598 if (!dist->slave_port) {
599 fprintf(stderr, "distributed: missing slave_port\n");
600 exit(1);
602 int slave_sock = port_listen(dist->slave_port, dist->max_slaves);
603 pthread_t thread;
604 for (int id = 0; id < dist->max_slaves; id++) {
605 pthread_create(&thread, NULL, slave_thread, (void *)(long)slave_sock);
608 if (dist->proxy_port) {
609 int proxy_sock = port_listen(dist->proxy_port, dist->max_slaves);
610 for (int id = 0; id < dist->max_slaves; id++) {
611 pthread_create(&thread, NULL, proxy_thread, (void *)(long)proxy_sock);
614 return dist;
617 struct engine *
618 engine_distributed_init(char *arg, struct board *b)
620 start_time = time_now();
621 struct distributed *dist = distributed_state_init(arg, b);
622 struct engine *e = calloc(1, sizeof(struct engine));
623 e->name = "Distributed Engine";
624 e->comment = "I'm playing the distributed engine. When I'm losing, I will resign, "
625 "if I think I win, I play until you pass. "
626 "Anyone can send me 'winrate' in private chat to get my assessment of the position.";
627 e->notify = distributed_notify;
628 e->genmove = distributed_genmove;
629 e->dead_group_list = distributed_dead_group_list;
630 e->chat = distributed_chat;
631 e->data = dist;
632 // Keep the threads and the open socket connections:
633 e->keep_on_clear = true;
635 return e;