Check all memory allocations to avoid core dump when running out of memory.
[pachi/peepo.git] / distributed / distributed.c
blobf0aa015bb36b53522c11377ce42d647edad93d15
1 /* This is a master for the "distributed" engine. It receives connections
2 * from slave machines, sends them gtp commands, then aggregates the
3 * results. It can also act as a proxy for the logs of all slave machines.
4 * The slave machines must run with engine "uct" (not "distributed").
5 * The master sends pachi-genmoves gtp commands regularly to each slave,
6 * gets as replies a list of candidate moves, their number of playouts
7 * and their value. The master then picks the most popular move. */
9 /* With time control, the master waits for all slaves, except
10 * when the allowed time is already passed. In this case the
11 * master picks among the available replies, or waits for just
12 * one reply if there is none yet.
13 * Without time control, the master waits until the desired
14 * number of games have been simulated. In this case the -t
15 * parameter for the master should be the sum of the parameters
16 * for all slaves. */
18 /* The master sends updated statistics for the best moves
19 * in each genmoves command. In this version only the
20 * children of the root node are updated. The slaves
21 * reply with just their own stats; they remember what was
22 * previously received from or sent to the master, to
23 * distinguish their own contribution from that of other slaves. */
25 /* The master-slave protocol has has fault tolerance. If a slave is
26 * out of sync, the master sends it the appropriate command history. */
28 /* Pass me arguments like a=b,c=d,...
29 * Supported arguments:
30 * slave_port=SLAVE_PORT slaves connect to this port; this parameter is mandatory.
31 * max_slaves=MAX_SLAVES default 100
32 * slaves_quit=0|1 quit gtp command also sent to slaves, default false.
33 * proxy_port=PROXY_PORT slaves optionally send their logs to this port.
34 * Warning: with proxy_port, the master stderr mixes the logs of all
35 * machines but you can separate them again:
36 * slave logs: sed -n '/< .*:/s/.*< /< /p' logfile
37 * master logs: perl -0777 -pe 's/<[ <].*:.*\n//g' logfile
40 /* A configuration without proxy would have one master run on masterhost as:
41 * zzgo -e distributed slave_port=1234
42 * and N slaves running as:
43 * zzgo -e uct -g masterhost:1234 slave
44 * With log proxy:
45 * zzgo -e distributed slave_port=1234,proxy_port=1235
46 * zzgo -e uct -g masterhost:1234 -l masterhost:1235 slave
47 * If the master itself runs on a machine other than that running gogui,
48 * gogui-twogtp, kgsGtp or cgosGtp, it can redirect its gtp port:
49 * zzgo -e distributed -g 10000 slave_port=1234,proxy_port=1235
52 #include <assert.h>
53 #include <stdio.h>
54 #include <stdlib.h>
55 #include <string.h>
56 #include <pthread.h>
57 #include <limits.h>
58 #include <ctype.h>
59 #include <time.h>
60 #include <alloca.h>
61 #include <sys/types.h>
62 #include <sys/socket.h>
63 #include <arpa/inet.h>
65 #define DEBUG
67 #include "board.h"
68 #include "engine.h"
69 #include "move.h"
70 #include "timeinfo.h"
71 #include "network.h"
72 #include "playout.h"
73 #include "random.h"
74 #include "stats.h"
75 #include "mq.h"
76 #include "debug.h"
77 #include "distributed/distributed.h"
79 /* Internal engine state. */
80 struct distributed {
81 char *slave_port;
82 char *proxy_port;
83 int max_slaves;
84 bool slaves_quit;
85 struct move my_last_move;
86 struct move_stats my_last_stats;
89 /* Default number of simulations to perform per move.
90 * Note that this is in total over all slaves! */
91 #define DIST_GAMES 80000
92 static const struct time_info default_ti = {
93 .period = TT_MOVE,
94 .dim = TD_GAMES,
95 .len = { .games = DIST_GAMES },
98 #define get_value(value, color) \
99 ((color) == S_BLACK ? (value) : 1 - (value))
101 /* Max size for one line of reply or slave log. */
102 #define BSIZE 4096
104 /* Max size of all gtp commands for one game.
105 * 60 chars for the first line of genmoves plus 100 lines
106 * of 30 chars each for the stats at last move. */
107 #define CMDS_SIZE (60*MAX_GAMELEN + 30*100)
109 /* All gtp commands for current game separated by \n */
110 static char gtp_cmds[CMDS_SIZE];
112 /* Latest gtp command sent to slaves. */
113 static char *gtp_cmd = NULL;
115 /* Slaves send gtp_cmd when cmd_count changes. */
116 static int cmd_count = 0;
118 /* Remember at most 12 gtp ids per move: play pass,
119 * 10 genmoves (1s), play pass.
120 * For move 0 we always resend the whole history. */
121 #define MAX_CMDS_PER_MOVE 12
123 /* History of gtp commands sent for current game, indexed by move. */
124 static int id_history[MAX_GAMELEN][MAX_CMDS_PER_MOVE];
125 static char *cmd_history[MAX_GAMELEN][MAX_CMDS_PER_MOVE];
127 /* Number of active slave machines working for this master. */
128 static int active_slaves = 0;
130 /* Number of replies to last gtp command already received. */
131 static int reply_count = 0;
133 /* All replies to latest gtp command are in gtp_replies[0..reply_count-1]. */
134 static char **gtp_replies;
136 /* Mutex protecting gtp_cmds, gtp_cmd, id_history, cmd_history,
137 * cmd_count, active_slaves, reply_count & gtp_replies */
138 static pthread_mutex_t slave_lock = PTHREAD_MUTEX_INITIALIZER;
140 /* Condition signaled when a new gtp command is available. */
141 static pthread_cond_t cmd_cond = PTHREAD_COND_INITIALIZER;
143 /* Condition signaled when reply_count increases. */
144 static pthread_cond_t reply_cond = PTHREAD_COND_INITIALIZER;
146 /* Mutex protecting stderr. Must not be held at same time as slave_lock. */
147 static pthread_mutex_t log_lock = PTHREAD_MUTEX_INITIALIZER;
149 /* Absolute time when this program was started.
150 * For debugging only. */
151 static double start_time;
153 /* Write the time, client address, prefix, and string s to stderr atomically.
154 * s should end with a \n */
155 static void
156 logline(struct in_addr *client, char *prefix, char *s)
158 double now = time_now();
159 char addr[INET_ADDRSTRLEN];
160 if (client) {
161 inet_ntop(AF_INET, client, addr, sizeof(addr));
162 } else {
163 addr[0] = '\0';
165 pthread_mutex_lock(&log_lock);
166 fprintf(stderr, "%s%15s %9.3f: %s", prefix, addr, now - start_time, s);
167 pthread_mutex_unlock(&log_lock);
170 /* Thread opening a connection on the given socket and copying input
171 * from there to stderr. */
172 static void *
173 proxy_thread(void *arg)
175 int proxy_sock = (long)arg;
176 assert(proxy_sock >= 0);
177 for (;;) {
178 struct in_addr client;
179 int conn = open_server_connection(proxy_sock, &client);
180 FILE *f = fdopen(conn, "r");
181 char buf[BSIZE];
182 while (fgets(buf, BSIZE, f)) {
183 logline(&client, "< ", buf);
185 fclose(f);
189 /* Get a reply to one gtp command. Return the gtp command id,
190 * or -1 if error. reply must have at least CMDS_SIZE bytes.
191 * slave_lock is not held on either entry or exit of this function. */
192 static int
193 get_reply(FILE *f, struct in_addr client, char *reply)
195 int reply_id = -1;
196 *reply = '\0';
197 char *line = reply;
198 while (fgets(line, reply + CMDS_SIZE - line, f) && *line != '\n') {
199 if (DEBUGL(3) || (DEBUGL(2) && line == reply))
200 logline(&client, "<<", line);
201 if (reply_id < 0 && (*line == '=' || *line == '?') && isdigit(line[1]))
202 reply_id = atoi(line+1);
203 line += strlen(line);
205 if (*line != '\n') return -1;
206 return reply_id;
209 /* Main loop of a slave thread.
210 * Send the current command to the slave machine and wait for a reply.
211 * Resend command history if the slave machine is out of sync.
212 * Returns when the connection with the slave machine is cut.
213 * slave_lock is held on both entry and exit of this function. */
214 static void
215 slave_loop(FILE *f, struct in_addr client, char *reply_buf, bool resend)
217 char *to_send = gtp_cmd;
218 int last_cmd_sent = 0;
219 int last_reply_id = -1;
220 int reply_slot = -1;
221 for (;;) {
222 while (last_cmd_sent == cmd_count && !resend) {
223 // Wait for a new gtp command.
224 pthread_cond_wait(&cmd_cond, &slave_lock);
225 to_send = gtp_cmd;
228 /* Command available, send it to slave machine.
229 * If slave was out of sync, send the history. */
230 assert(to_send && gtp_cmd);
231 char buf[CMDS_SIZE];
232 strncpy(buf, to_send, CMDS_SIZE);
233 last_cmd_sent = cmd_count;
235 pthread_mutex_unlock(&slave_lock);
237 if (DEBUGL(1) && resend) {
238 if (to_send == gtp_cmds) {
239 logline(&client, "? ", "Slave out-of-sync, resending all history\n");
240 } else {
241 logline(&client, "? ", "Slave behind, partial resend\n");
244 fputs(buf, f);
245 fflush(f);
246 if (DEBUGL(2)) {
247 if (!DEBUGL(3)) {
248 char *s = strchr(buf, '\n');
249 if (s) s[1] = '\0';
251 logline(&client, ">>", buf);
254 /* Read the reply, which always ends with \n\n
255 * The slave machine sends "=id reply" or "?id reply"
256 * with id == cmd_id if it is in sync. */
257 int reply_id = get_reply(f, client, buf);
259 pthread_mutex_lock(&slave_lock);
260 if (reply_id == -1) return;
262 /* Make sure we are still in sync. cmd_count may have
263 * changed but the reply is valid as long as cmd_id didn't
264 * change (this only occurs for consecutive genmoves). */
265 int cmd_id = atoi(gtp_cmd);
266 if (reply_id == cmd_id && *buf == '=') {
267 resend = false;
268 strncpy(reply_buf, buf, CMDS_SIZE);
269 if (reply_id != last_reply_id)
270 reply_slot = reply_count++;
271 gtp_replies[reply_slot] = reply_buf;
272 last_reply_id = reply_id;
274 pthread_cond_signal(&reply_cond);
276 /* Force waiting for a new command. The next genmoves
277 * stats we will send must include those just received
278 * (this assumed by the slave). */
279 last_cmd_sent = cmd_count;
280 continue;
282 resend = true;
283 to_send = gtp_cmds;
284 /* Resend everything if slave got latest command,
285 * but doesn't have a correct board. */
286 if (reply_id == cmd_id) continue;
288 /* The slave is ouf-of-sync. Check whether the last command
289 * it received belongs to the current game. If so resend
290 * starting at the last move known by slave, otherwise
291 * resend the whole history. */
292 int reply_move = move_number(reply_id);
293 if (reply_move > move_number(cmd_id)) continue;
295 for (int slot = 0; slot < MAX_CMDS_PER_MOVE; slot++) {
296 if (reply_id == id_history[reply_move][slot]) {
297 to_send = cmd_history[reply_move][slot];
298 break;
304 /* Thread sending gtp commands to one slave machine, and
305 * reading replies. If a slave machine dies, this thread waits
306 * for a connection from another slave. */
307 static void *
308 slave_thread(void *arg)
310 int slave_sock = (long)arg;
311 assert(slave_sock >= 0);
312 char reply_buf[CMDS_SIZE];
313 bool resend = false;
315 for (;;) {
316 /* Wait for a connection from any slave. */
317 struct in_addr client;
318 int conn = open_server_connection(slave_sock, &client);
320 FILE *f = fdopen(conn, "r+");
321 if (DEBUGL(2))
322 logline(&client, "= ", "new slave\n");
324 /* Minimal check of the slave identity. */
325 fputs("name\n", f);
326 if (!fgets(reply_buf, sizeof(reply_buf), f)
327 || strncasecmp(reply_buf, "= Pachi", 7)
328 || !fgets(reply_buf, sizeof(reply_buf), f)
329 || strcmp(reply_buf, "\n")) {
330 logline(&client, "? ", "bad slave\n");
331 fclose(f);
332 continue;
335 pthread_mutex_lock(&slave_lock);
336 active_slaves++;
337 slave_loop(f, client, reply_buf, resend);
339 assert(active_slaves > 0);
340 active_slaves--;
341 // Unblock main thread if it was waiting for this slave.
342 pthread_cond_signal(&reply_cond);
343 pthread_mutex_unlock(&slave_lock);
345 resend = true;
346 if (DEBUGL(2))
347 logline(&client, "= ", "lost slave\n");
348 fclose(f);
352 /* Create a new gtp command for all slaves. The slave lock is held
353 * upon entry and upon return, so the command will actually be
354 * sent when the lock is released. The last command is overwritten
355 * if gtp_cmd points to a non-empty string. cmd is a single word;
356 * args has all arguments and is empty or has a trailing \n */
357 static void
358 update_cmd(struct board *b, char *cmd, char *args, bool new_id)
360 assert(gtp_cmd);
361 /* To make sure the slaves are in sync, we ignore the original id
362 * and use the board number plus some random bits as gtp id. */
363 static int gtp_id = -1;
364 int moves = is_reset(cmd) ? 0 : b->moves;
365 if (new_id) {
366 /* fast_random() is 16-bit only so the multiplication can't overflow. */
367 gtp_id = force_reply(moves + fast_random(65535) * DIST_GAMELEN);
368 reply_count = 0;
370 snprintf(gtp_cmd, gtp_cmds + CMDS_SIZE - gtp_cmd, "%d %s %s",
371 gtp_id, cmd, *args ? args : "\n");
372 cmd_count++;
374 /* Remember history for out-of-sync slaves. */
375 static int slot = 0;
376 slot = (slot + 1) % MAX_CMDS_PER_MOVE;
377 id_history[moves][slot] = gtp_id;
378 cmd_history[moves][slot] = gtp_cmd;
380 // Notify the slave threads about the new command.
381 pthread_cond_broadcast(&cmd_cond);
384 /* Update the command history, then create a new gtp command
385 * for all slaves. The slave lock is held upon entry and
386 * upon return, so the command will actually be sent when the
387 * lock is released. cmd is a single word; args has all
388 * arguments and is empty or has a trailing \n */
389 static void
390 new_cmd(struct board *b, char *cmd, char *args)
392 // Clear the history when a new game starts:
393 if (!gtp_cmd || is_gamestart(cmd)) {
394 gtp_cmd = gtp_cmds;
395 } else {
396 /* Preserve command history for new slaves.
397 * To indicate that the slave should only reply to
398 * the last command we force the id of previous
399 * commands to be just the move number. */
400 int id = prevent_reply(atoi(gtp_cmd));
401 int len = strspn(gtp_cmd, "0123456789");
402 char buf[32];
403 snprintf(buf, sizeof(buf), "%0*d", len, id);
404 memcpy(gtp_cmd, buf, len);
406 gtp_cmd += strlen(gtp_cmd);
409 // Let the slave threads send the new gtp command:
410 update_cmd(b, cmd, args, true);
413 /* Wait for at least one new reply. Return when all slaves have
414 * replied, or when the given absolute time is passed.
415 * The replies are returned in gtp_replies[0..reply_count-1]
416 * slave_lock is held on entry and on return. */
417 static void
418 get_replies(double time_limit)
420 for (;;) {
421 if (reply_count > 0) {
422 struct timespec ts;
423 double sec;
424 ts.tv_nsec = (int)(modf(time_limit, &sec)*1000000000.0);
425 ts.tv_sec = (int)sec;
426 pthread_cond_timedwait(&reply_cond, &slave_lock, &ts);
427 } else {
428 pthread_cond_wait(&reply_cond, &slave_lock);
430 if (reply_count == 0) continue;
431 if (reply_count >= active_slaves) return;
432 if (time_now() >= time_limit) break;
434 if (DEBUGL(1)) {
435 char buf[1024];
436 snprintf(buf, sizeof(buf),
437 "get_replies timeout %.3f >= %.3f, replies %d < active %d\n",
438 time_now() - start_time, time_limit - start_time,
439 reply_count, active_slaves);
440 logline(NULL, "? ", buf);
442 assert(reply_count > 0);
445 /* Maximum time (seconds) to wait for answers to fast gtp commands
446 * (all commands except pachi-genmoves and final_status_list). */
447 #define MAX_FAST_CMD_WAIT 1.0
449 /* How often to send a stats update to slaves (seconds) */
450 #define STATS_UPDATE_INTERVAL 0.1 /* 100ms */
452 /* Maximum time (seconds) to wait between genmoves
453 * (all commands except pachi-genmoves and final_status_list). */
454 #define MAX_FAST_CMD_WAIT 1.0
456 /* Dispatch a new gtp command to all slaves.
457 * The slave lock must not be held upon entry and is released upon return.
458 * args is empty or ends with '\n' */
459 static enum parse_code
460 distributed_notify(struct engine *e, struct board *b, int id, char *cmd, char *args, char **reply)
462 struct distributed *dist = e->data;
464 /* Commands that should not be sent to slaves.
465 * time_left will be part of next pachi-genmoves,
466 * we reduce latency by not forwarding it here. */
467 if ((!strcasecmp(cmd, "quit") && !dist->slaves_quit)
468 || !strcasecmp(cmd, "uct_genbook")
469 || !strcasecmp(cmd, "uct_dumpbook")
470 || !strcasecmp(cmd, "kgs-chat")
471 || !strcasecmp(cmd, "time_left")
473 /* and commands that will be sent to slaves later */
474 || !strcasecmp(cmd, "genmove")
475 || !strcasecmp(cmd, "kgs-genmove_cleanup")
476 || !strcasecmp(cmd, "final_score")
477 || !strcasecmp(cmd, "final_status_list"))
478 return P_OK;
480 pthread_mutex_lock(&slave_lock);
482 // Create a new command to be sent by the slave threads.
483 new_cmd(b, cmd, args);
485 /* Wait for replies here. If we don't wait, we run the
486 * risk of getting out of sync with most slaves and
487 * sending command history too frequently. */
488 get_replies(time_now() + MAX_FAST_CMD_WAIT);
490 pthread_mutex_unlock(&slave_lock);
491 return P_OK;
494 /* genmoves returns a line "=id played_own total_playouts threads keep_looking[ reserved]"
495 * then a list of lines "coord playouts value".
496 * Return the move with most playouts, and additional stats.
497 * Keep this code in sync with uct_getstats().
498 * slave_lock is held on entry and on return. */
499 static coord_t
500 select_best_move(struct board *b, struct move_stats *stats, int *played,
501 int *total_playouts, int *total_threads, bool *keep_looking)
503 assert(reply_count > 0);
505 /* +2 for pass and resign */
506 memset(stats-2, 0, (board_size2(b)+2) * sizeof(*stats));
508 coord_t best_move = pass;
509 int best_playouts = -1;
510 *played = 0;
511 *total_playouts = 0;
512 *total_threads = 0;
513 int keep = 0;
515 for (int reply = 0; reply < reply_count; reply++) {
516 char *r = gtp_replies[reply];
517 int id, o, p, t, k;
518 if (sscanf(r, "=%d %d %d %d %d", &id, &o, &p, &t, &k) != 5) continue;
519 *played += o;
520 *total_playouts += p;
521 *total_threads += t;
522 keep += k;
523 // Skip the rest of the firt line if any (allow future extensions)
524 r = strchr(r, '\n');
526 char move[64];
527 struct move_stats s;
528 while (r && sscanf(++r, "%63s %d %f", move, &s.playouts, &s.value) == 3) {
529 coord_t *c = str2coord(move, board_size(b));
530 stats_add_result(&stats[*c], s.value, s.playouts);
531 if (stats[*c].playouts > best_playouts) {
532 best_playouts = stats[*c].playouts;
533 best_move = *c;
535 coord_done(c);
536 r = strchr(r, '\n');
539 *keep_looking = keep > reply_count / 2;
540 return best_move;
543 /* Set the args for the genmoves command. If stats is not null,
544 * append the stats from all slaves above min_playouts, except
545 * for pass and resign. args must have CMDS_SIZE bytes and
546 * upon return ends with an empty line.
547 * Keep this code in sync with uct_genmoves().
548 * slave_lock is held on entry and on return. */
549 static void
550 genmoves_args(char *args, struct board *b, enum stone color, int played,
551 struct time_info *ti, struct move_stats *stats, int min_playouts)
553 char *end = args + CMDS_SIZE;
554 char *s = args + snprintf(args, CMDS_SIZE, "%s %d", stone2str(color), played);
556 if (ti->dim == TD_WALLTIME) {
557 s += snprintf(s, end - s, " %.3f %.3f %d %d",
558 ti->len.t.main_time, ti->len.t.byoyomi_time,
559 ti->len.t.byoyomi_periods, ti->len.t.byoyomi_stones);
561 s += snprintf(s, end - s, "\n");
562 if (stats) {
563 foreach_point(b) {
564 if (stats[c].playouts <= min_playouts) continue;
565 s += snprintf(s, end - s, "%s %d %.7f\n",
566 coord2sstr(c, b),
567 stats[c].playouts, stats[c].value);
568 } foreach_point_end;
570 s += snprintf(s, end - s, "\n");
573 /* Time control is mostly done by the slaves, so we use default values here. */
574 #define FUSEKI_END 20
575 #define YOSE_START 40
577 static coord_t *
578 distributed_genmove(struct engine *e, struct board *b, struct time_info *ti,
579 enum stone color, bool pass_all_alive)
581 struct distributed *dist = e->data;
582 double now = time_now();
583 double first = now;
585 char *cmd = pass_all_alive ? "pachi-genmoves_cleanup" : "pachi-genmoves";
586 char args[CMDS_SIZE];
588 coord_t best;
589 int played, playouts, threads;
591 if (ti->period == TT_NULL) *ti = default_ti;
592 struct time_stop stop;
593 time_stop_conditions(ti, b, FUSEKI_END, YOSE_START, &stop);
594 struct time_info saved_ti = *ti;
596 /* Send the first genmoves without stats. */
597 genmoves_args(args, b, color, 0, ti, NULL, 0);
599 /* Combined move stats from all slaves, only for children
600 * of the root node, plus 2 for pass and resign. */
601 struct move_stats *stats = alloca((board_size2(b)+2) * sizeof(struct move_stats));
602 stats += 2;
604 pthread_mutex_lock(&slave_lock);
605 new_cmd(b, cmd, args);
607 /* Loop until most slaves want to quit or time elapsed. */
608 for (;;) {
609 double start = now;
610 get_replies(now + STATS_UPDATE_INTERVAL);
611 now = time_now();
612 if (ti->dim == TD_WALLTIME)
613 time_sub(ti, now - start);
615 bool keep_looking;
616 best = select_best_move(b, stats, &played, &playouts, &threads, &keep_looking);
618 if (!keep_looking) break;
619 if (ti->dim == TD_WALLTIME) {
620 if (now - ti->len.t.timer_start >= stop.worst.time) break;
621 } else {
622 if (played >= stop.worst.playouts) break;
624 if (DEBUGL(2)) {
625 char buf[BSIZE];
626 char *coord = coord2sstr(best, b);
627 snprintf(buf, sizeof(buf),
628 "temp winner is %s %s with score %1.4f (%d/%d games)"
629 " %d slaves %d threads\n",
630 stone2str(color), coord, get_value(stats[best].value, color),
631 stats[best].playouts, playouts, reply_count, threads);
632 logline(NULL, "* ", buf);
634 /* Send the command with the same gtp id, to avoid discarding
635 * a reply to a previous genmoves at the same move. */
636 genmoves_args(args, b, color, played, ti, stats, stats[best].playouts / 100);
637 update_cmd(b, cmd, args, false);
639 int replies = reply_count;
641 /* Do not subtract time spent twice (see gtp_parse). */
642 *ti = saved_ti;
644 dist->my_last_move.color = color;
645 dist->my_last_move.coord = best;
646 dist->my_last_stats = stats[best];
648 /* Tell the slaves to commit to the selected move, overwriting
649 * the last "pachi-genmoves" in the command history. */
650 char *coord = coord2str(best, b);
651 snprintf(args, sizeof(args), "%s %s\n", stone2str(color), coord);
652 update_cmd(b, "play", args, true);
653 pthread_mutex_unlock(&slave_lock);
655 if (DEBUGL(1)) {
656 char buf[BSIZE];
657 double time = now - first + 0.000001; /* avoid divide by zero */
658 snprintf(buf, sizeof(buf),
659 "GLOBAL WINNER is %s %s with score %1.4f (%d/%d games)\n"
660 "genmove %d games in %0.2fs %d slaves %d threads (%d games/s,"
661 " %d games/s/slave, %d games/s/thread)\n",
662 stone2str(color), coord, get_value(stats[best].value, color),
663 stats[best].playouts, playouts, played, time, replies, threads,
664 (int)(played/time), (int)(played/time/replies),
665 (int)(played/time/threads));
666 logline(NULL, "* ", buf);
668 free(coord);
669 return coord_copy(best);
672 static char *
673 distributed_chat(struct engine *e, struct board *b, char *cmd)
675 struct distributed *dist = e->data;
676 static char reply[BSIZE];
678 cmd += strspn(cmd, " \n\t");
679 if (!strncasecmp(cmd, "winrate", 7)) {
680 enum stone color = dist->my_last_move.color;
681 snprintf(reply, BSIZE, "In %d playouts at %d machines, %s %s can win with %.2f%% probability.",
682 dist->my_last_stats.playouts, active_slaves, stone2str(color),
683 coord2sstr(dist->my_last_move.coord, b),
684 100 * get_value(dist->my_last_stats.value, color));
685 return reply;
687 return NULL;
690 static int
691 scmp(const void *p1, const void *p2)
693 return strcasecmp(*(char * const *)p1, *(char * const *)p2);
696 static void
697 distributed_dead_group_list(struct engine *e, struct board *b, struct move_queue *mq)
699 pthread_mutex_lock(&slave_lock);
701 new_cmd(b, "final_status_list", "dead\n");
702 get_replies(time_now() + MAX_FAST_CMD_WAIT);
704 /* Find the most popular reply. */
705 qsort(gtp_replies, reply_count, sizeof(char *), scmp);
706 int best_reply = 0;
707 int best_count = 1;
708 int count = 1;
709 for (int reply = 1; reply < reply_count; reply++) {
710 if (!strcmp(gtp_replies[reply], gtp_replies[reply-1])) {
711 count++;
712 } else {
713 count = 1;
715 if (count > best_count) {
716 best_count = count;
717 best_reply = reply;
721 /* Pick the first move of each line as group. */
722 char *dead = gtp_replies[best_reply];
723 dead = strchr(dead, ' '); // skip "id "
724 while (dead && *++dead != '\n') {
725 coord_t *c = str2coord(dead, board_size(b));
726 mq_add(mq, *c);
727 coord_done(c);
728 dead = strchr(dead, '\n');
730 pthread_mutex_unlock(&slave_lock);
733 static struct distributed *
734 distributed_state_init(char *arg, struct board *b)
736 struct distributed *dist = calloc2(1, sizeof(struct distributed));
738 dist->max_slaves = 100;
739 if (arg) {
740 char *optspec, *next = arg;
741 while (*next) {
742 optspec = next;
743 next += strcspn(next, ",");
744 if (*next) { *next++ = 0; } else { *next = 0; }
746 char *optname = optspec;
747 char *optval = strchr(optspec, '=');
748 if (optval) *optval++ = 0;
750 if (!strcasecmp(optname, "slave_port") && optval) {
751 dist->slave_port = strdup(optval);
752 } else if (!strcasecmp(optname, "proxy_port") && optval) {
753 dist->proxy_port = strdup(optval);
754 } else if (!strcasecmp(optname, "max_slaves") && optval) {
755 dist->max_slaves = atoi(optval);
756 } else if (!strcasecmp(optname, "slaves_quit")) {
757 dist->slaves_quit = !optval || atoi(optval);
758 } else {
759 fprintf(stderr, "distributed: Invalid engine argument %s or missing value\n", optname);
764 gtp_replies = calloc2(dist->max_slaves, sizeof(char *));
766 if (!dist->slave_port) {
767 fprintf(stderr, "distributed: missing slave_port\n");
768 exit(1);
770 int slave_sock = port_listen(dist->slave_port, dist->max_slaves);
771 pthread_t thread;
772 for (int id = 0; id < dist->max_slaves; id++) {
773 pthread_create(&thread, NULL, slave_thread, (void *)(long)slave_sock);
776 if (dist->proxy_port) {
777 int proxy_sock = port_listen(dist->proxy_port, dist->max_slaves);
778 for (int id = 0; id < dist->max_slaves; id++) {
779 pthread_create(&thread, NULL, proxy_thread, (void *)(long)proxy_sock);
782 return dist;
785 struct engine *
786 engine_distributed_init(char *arg, struct board *b)
788 start_time = time_now();
789 struct distributed *dist = distributed_state_init(arg, b);
790 struct engine *e = calloc2(1, sizeof(struct engine));
791 e->name = "Distributed Engine";
792 e->comment = "I'm playing the distributed engine. When I'm losing, I will resign, "
793 "if I think I win, I play until you pass. "
794 "Anyone can send me 'winrate' in private chat to get my assessment of the position.";
795 e->notify = distributed_notify;
796 e->genmove = distributed_genmove;
797 e->dead_group_list = distributed_dead_group_list;
798 e->chat = distributed_chat;
799 e->data = dist;
800 // Keep the threads and the open socket connections:
801 e->keep_on_clear = true;
803 return e;