distributed engine: do not resend history continously.
[pachi.git] / distributed / distributed.c
blobd44ddd41768880fa4faaf58acf9829b56c139a62
1 /* This is a master for the "distributed" engine. It receives connections
2 * from slave machines, sends them gtp commands, then aggregates the
3 * results. It can also act as a proxy for the logs of all slave machines.
4 * The slave machines must run with engine "uct" (not "distributed").
5 * The master sends the pachi-genmoves gtp command to each slave,
6 * gets as replies a list of candidate moves, their number of playouts
7 * and their value. The master then picks the most popular move. */
9 /* The master trusts the majority of slaves for time control:
10 * it picks the move when half the slaves have replied, except
11 * when the allowed time is already passed. In this case the
12 * master picks among the available replies, or waits for just
13 * one reply if there is none yet. */
15 /* This first version does not send tree updates between slaves,
16 * but it has fault tolerance. If a slave is out of sync, the master
17 * sends it the whole command history. */
19 /* Pass me arguments like a=b,c=d,...
20 * Supported arguments:
21 * slave_port=SLAVE_PORT slaves connect to this port; this parameter is mandatory.
22 * max_slaves=MAX_SLAVES default 100
23 * slaves_quit=0|1 quit gtp command also sent to slaves, default false.
24 * proxy_port=PROXY_PORT slaves optionally send their logs to this port.
25 * Warning: with proxy_port, the master stderr mixes the logs of all
26 * machines but you can separate them again:
27 * slave logs: sed -n '/< .*:/s/.*< /< /p' logfile
28 * master logs: perl -0777 -pe 's/<[ <].*:.*\n//g' logfile
31 /* A configuration without proxy would have one master run on masterhost as:
32 * zzgo -e distributed slave_port=1234
33 * and N slaves running as:
34 * zzgo -e uct -g masterhost:1234 slave
35 * With log proxy:
36 * zzgo -e distributed slave_port=1234,proxy_port=1235
37 * zzgo -e uct -g masterhost:1234 -l masterhost:1235 slave
38 * If the master itself runs on a machine other than that running gogui,
39 * gogui-twogtp, kgsGtp or cgosGtp, it can redirect its gtp port:
40 * zzgo -e distributed -g 10000 slave_port=1234,proxy_port=1235
43 #include <assert.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <pthread.h>
48 #include <limits.h>
49 #include <ctype.h>
50 #include <time.h>
51 #include <alloca.h>
52 #include <sys/types.h>
53 #include <sys/socket.h>
54 #include <arpa/inet.h>
56 #define DEBUG
58 #include "board.h"
59 #include "engine.h"
60 #include "move.h"
61 #include "timeinfo.h"
62 #include "network.h"
63 #include "playout.h"
64 #include "random.h"
65 #include "stats.h"
66 #include "mq.h"
67 #include "debug.h"
68 #include "distributed/distributed.h"
70 /* Internal engine state. */
71 struct distributed {
72 char *slave_port;
73 char *proxy_port;
74 int max_slaves;
75 bool slaves_quit;
76 struct move my_last_move;
77 struct move_stats my_last_stats;
80 #define get_value(value, color) \
81 ((color) == S_BLACK ? (value) : 1 - (value))
83 /* Max size for one reply or slave log. */
84 #define BSIZE 4096
86 /* Max size of all gtp commands for one game */
87 #define CMDS_SIZE (40*MAX_GAMELEN)
89 /* All gtp commands for current game separated by \n */
90 char gtp_cmds[CMDS_SIZE];
92 /* Latest gtp command sent to slaves. */
93 char *gtp_cmd = NULL;
95 /* Number of active slave machines working for this master. */
96 int active_slaves = 0;
98 /* Number of replies to last gtp command already received. */
99 int reply_count = 0;
101 /* All replies to latest gtp command are in gtp_replies[0..reply_count-1]. */
102 char **gtp_replies;
104 /* Mutex protecting gtp_cmds, gtp_cmd, active_slaves, reply_count & gtp_replies */
105 pthread_mutex_t slave_lock = PTHREAD_MUTEX_INITIALIZER;
107 /* Condition signaled when a new gtp command is available. */
108 static pthread_cond_t cmd_cond = PTHREAD_COND_INITIALIZER;
110 /* Condition signaled when reply_count increases. */
111 static pthread_cond_t reply_cond = PTHREAD_COND_INITIALIZER;
113 /* Mutex protecting stderr. Must not be held at same time as slave_lock. */
114 pthread_mutex_t log_lock = PTHREAD_MUTEX_INITIALIZER;
116 /* Absolute time when this program was started.
117 * For debugging only. */
118 double start_time;
120 /* Write the time, client address, prefix, and string s to stderr atomically.
121 * s should end with a \n */
122 static void
123 logline(struct in_addr *client, char *prefix, char *s)
125 double now = time_now();
126 char addr[INET_ADDRSTRLEN];
127 if (client) {
128 inet_ntop(AF_INET, client, addr, sizeof(addr));
129 } else {
130 addr[0] = '\0';
132 pthread_mutex_lock(&log_lock);
133 fprintf(stderr, "%s%15s %9.3f: %s", prefix, addr, now - start_time, s);
134 pthread_mutex_unlock(&log_lock);
137 /* Thread opening a connection on the given socket and copying input
138 * from there to stderr. */
139 static void *
140 proxy_thread(void *arg)
142 int proxy_sock = (long)arg;
143 assert(proxy_sock >= 0);
144 for (;;) {
145 struct in_addr client;
146 int conn = open_server_connection(proxy_sock, &client);
147 FILE *f = fdopen(conn, "r");
148 char buf[BSIZE];
149 while (fgets(buf, BSIZE, f)) {
150 logline(&client, "< ", buf);
152 fclose(f);
156 /* Main loop of a slave thread.
157 * Send the current command to the slave machine and wait for a reply.
158 * Resend the whole command history if the slave machine is out of sync.
159 * Returns when the connection with the slave machine is cut.
160 * slave_lock is held on both entry and exit of this function. */
161 static void
162 slave_loop(FILE *f, struct in_addr client, char *buf, bool resend)
164 char *to_send = gtp_cmd;
165 int cmd_id = -1;
166 int reply_id = -1;
167 for (;;) {
168 while (cmd_id == reply_id && !resend) {
169 // Wait for a new gtp command.
170 pthread_cond_wait(&cmd_cond, &slave_lock);
171 if (gtp_cmd)
172 cmd_id = atoi(gtp_cmd);
173 to_send = gtp_cmd;
176 /* Command available, send it to slave machine.
177 * If slave was out of sync, send all the history. */
178 assert(to_send && gtp_cmd);
179 strncpy(buf, to_send, CMDS_SIZE);
180 cmd_id = atoi(gtp_cmd);
182 pthread_mutex_unlock(&slave_lock);
183 if (DEBUGL(2))
184 logline(&client, ">>", buf);
185 fputs(buf, f);
186 fflush(f);
188 /* Read the reply, which always ends with \n\n
189 * The slave machine sends "=id reply" or "?id reply"
190 * with id == cmd_id if it is in sync. */
191 *buf = '\0';
192 reply_id = -1;
193 char *line = buf;
194 while (fgets(line, buf + CMDS_SIZE - line, f) && *line != '\n') {
195 if (DEBUGL(2))
196 logline(&client, "<<", line);
197 if (reply_id < 0 && (*line == '=' || *line == '?') && isdigit(line[1]))
198 reply_id = atoi(line+1);
199 line += strlen(line);
202 pthread_mutex_lock(&slave_lock);
203 if (*line != '\n') return;
204 if (reply_id == cmd_id && *buf == '=') {
205 resend = false;
206 gtp_replies[reply_count++] = buf;
207 pthread_cond_signal(&reply_cond);
208 } else {
209 /* The slave was out of sync or had an incorrect board.
210 * Send the whole command history without wait.
211 * The slave will send a single reply with the
212 * id of the last command. */
213 to_send = gtp_cmds;
214 resend = true;
215 if (DEBUGL(1))
216 logline(&client, "? ", "Resending all history\n");
221 /* Thread sending gtp commands to one slave machine, and
222 * reading replies. If a slave machine dies, this thread waits
223 * for a connection from another slave. */
224 static void *
225 slave_thread(void *arg)
227 int slave_sock = (long)arg;
228 assert(slave_sock >= 0);
229 char slave_buf[CMDS_SIZE];
230 bool resend = false;
232 for (;;) {
233 /* Wait for a connection from any slave. */
234 struct in_addr client;
235 int conn = open_server_connection(slave_sock, &client);
237 FILE *f = fdopen(conn, "r+");
238 if (DEBUGL(2))
239 logline(&client, "= ", "new slave\n");
241 /* Minimal check of the slave identity. */
242 fputs("name\n", f);
243 if (!fgets(slave_buf, sizeof(slave_buf), f)
244 || strncasecmp(slave_buf, "= Pachi", 7)
245 || !fgets(slave_buf, sizeof(slave_buf), f)
246 || strcmp(slave_buf, "\n")) {
247 logline(&client, "? ", "bad slave\n");
248 fclose(f);
249 continue;
252 pthread_mutex_lock(&slave_lock);
253 active_slaves++;
254 slave_loop(f, client, slave_buf, resend);
256 assert(active_slaves > 0);
257 active_slaves--;
258 pthread_mutex_unlock(&slave_lock);
260 resend = true;
261 if (DEBUGL(2))
262 logline(&client, "= ", "lost slave\n");
263 fclose(f);
267 /* Create a new gtp command for all slaves. The slave lock is held
268 * upon entry and upon return, so the command will actually be
269 * sent when the lock is released. The last command is overwritten
270 * if gtp_cmd points to a non-empty string. cmd is a single word;
271 * args has all arguments and is empty or has a trailing \n */
272 static void
273 update_cmd(struct board *b, char *cmd, char *args)
275 assert(gtp_cmd);
276 /* To make sure the slaves are in sync, we ignore the original id
277 * and use the board number plus some random bits as gtp id.
278 * Make sure the new command has a new id otherwise slaves
279 * won't send it. */
280 static int gtp_id = -1;
281 int id;
282 int moves = is_reset(cmd) ? 0 : b->moves;
283 do {
284 /* fast_random() is 16-bit only so the multiplication can't overflow. */
285 id = force_reply(moves + fast_random(65535) * DIST_GAMELEN);
286 } while (id == gtp_id);
287 gtp_id = id;
288 snprintf(gtp_cmd, gtp_cmds + CMDS_SIZE - gtp_cmd, "%d %s %s",
289 id, cmd, *args ? args : "\n");
290 reply_count = 0;
293 /* Wait for slave replies until we get at least 50% of the
294 * slaves or the given absolute time (if non zero) is passed.
295 * If we get 50% of the slaves, we wait another 0.5s to get
296 * as many slaves as possible while not wasting time waiting
297 * for stuck or dead slaves.
298 * The replies are returned in gtp_replies[0..reply_count-1]
299 * slave_lock is held on entry and on return. */
300 static void
301 get_replies(double time_limit)
303 #define EXTRA_TIME 0.5
304 while (reply_count == 0 || reply_count < active_slaves) {
305 if (time_limit && reply_count > 0) {
306 struct timespec ts;
307 double sec;
308 ts.tv_nsec = (int)(modf(time_limit, &sec)*1000000000.0);
309 ts.tv_sec = (int)sec;
310 pthread_cond_timedwait(&reply_cond, &slave_lock, &ts);
311 } else {
312 pthread_cond_wait(&reply_cond, &slave_lock);
314 if (reply_count == 0) continue;
315 if (reply_count >= active_slaves) break;
316 double now = time_now();
317 if (time_limit && now >= time_limit) break;
318 if (reply_count >= active_slaves / 2
319 && (!time_limit || now + EXTRA_TIME < time_limit))
320 time_limit = now + EXTRA_TIME;
322 assert(reply_count > 0);
325 /* Dispatch a new gtp command to all slaves.
326 * The slave lock must not be held upon entry and is released upon return.
327 * args is empty or ends with '\n' */
328 static enum parse_code
329 distributed_notify(struct engine *e, struct board *b, int id, char *cmd, char *args, char **reply)
331 struct distributed *dist = e->data;
333 if ((!strcasecmp(cmd, "quit") && !dist->slaves_quit)
334 || !strcasecmp(cmd, "uct_genbook")
335 || !strcasecmp(cmd, "uct_dumpbook")
336 || !strcasecmp(cmd, "kgs-chat"))
337 return P_OK;
339 pthread_mutex_lock(&slave_lock);
341 // Clear the history when a new game starts:
342 if (!gtp_cmd || is_gamestart(cmd)) {
343 gtp_cmd = gtp_cmds;
344 } else {
345 /* Preserve command history for new slaves.
346 * To indicate that the slave should only reply to
347 * the last command we force the id of previous
348 * commands to be just the move number. */
349 int id = prevent_reply(atoi(gtp_cmd));
350 int len = strspn(gtp_cmd, "0123456789");
351 char buf[32];
352 snprintf(buf, sizeof(buf), "%0*d", len, id);
353 memcpy(gtp_cmd, buf, len);
355 gtp_cmd += strlen(gtp_cmd);
358 if (!strcasecmp(cmd, "genmove")) {
359 cmd = "pachi-genmoves";
360 } else if (!strcasecmp(cmd, "kgs-genmove_cleanup")) {
361 cmd = "pachi-genmoves_cleanup";
362 } else if (!strcasecmp(cmd, "final_score")) {
363 cmd = "final_status_list";
366 // Let the slaves send the new gtp command:
367 update_cmd(b, cmd, args);
368 pthread_cond_broadcast(&cmd_cond);
370 /* Wait for replies here except for specific commands
371 * handled by the engine later. If we don't wait, we run
372 * the risk of getting out of sync with most slaves and
373 * sending complete command history too frequently. */
374 if (strcasecmp(cmd, "pachi-genmoves")
375 && strcasecmp(cmd, "pachi-genmoves_cleanup")
376 && strcasecmp(cmd, "final_status_list"))
377 get_replies(0);
379 pthread_mutex_unlock(&slave_lock);
380 return P_OK;
383 /* pachi-genmoves returns a line "=id total_playouts threads[ reserved]" then a list of lines
384 * "coord playouts value". Keep this function in sync with uct_notify().
385 * Return the move with most playouts, its average value, and stats for debugging.
386 * slave_lock is held on entry and on return. */
387 static coord_t
388 select_best_move(struct board *b, struct move_stats *best_stats,
389 int *total_playouts, int *total_threads)
391 assert(reply_count > 0);
393 /* +2 for pass and resign. */
394 struct move_stats *stats = alloca((board_size2(b)+2) * sizeof(struct move_stats));
395 memset(stats, 0, (board_size2(b)+2) * sizeof(*stats));
396 stats += 2;
398 coord_t best_move = pass;
399 int best_playouts = -1;
400 *total_playouts = *total_threads = 0;
402 for (int reply = 0; reply < reply_count; reply++) {
403 char *r = gtp_replies[reply];
404 int id, playouts, threads;
405 if (sscanf(r, "=%d %d %d", &id, &playouts, &threads) != 3) continue;
406 *total_playouts += playouts;
407 *total_threads += threads;
408 // Skip the rest of the firt line if any (allow future extensions)
409 r = strchr(r, '\n');
411 char move[64];
412 struct move_stats s;
413 while (r && sscanf(++r, "%63s %d %f", move, &s.playouts, &s.value) == 3) {
414 coord_t *c = str2coord(move, board_size(b));
415 stats_add_result(&stats[*c], s.value, s.playouts);
416 if (stats[*c].playouts > best_playouts) {
417 best_playouts = stats[*c].playouts;
418 best_move = *c;
420 coord_done(c);
421 r = strchr(r, '\n');
424 *best_stats = stats[best_move];
425 return best_move;
428 /* Time control is mostly done by the slaves, so we use default values here. */
429 #define FUSEKI_END 20
430 #define YOSE_START 40
432 static coord_t *
433 distributed_genmove(struct engine *e, struct board *b, struct time_info *ti, enum stone color, bool pass_all_alive)
435 struct distributed *dist = e->data;
436 double start = time_now();
438 /* If we do not have time constraints we just wait for
439 * slaves to reply as they have been configured by default. */
440 long time_limit = 0;
441 if (ti->period != TT_NULL && ti->dim == TD_WALLTIME) {
442 struct time_stop stop;
443 time_stop_conditions(ti, b, FUSEKI_END, YOSE_START, &stop);
444 time_limit = ti->len.t.timer_start + stop.worst.time;
447 pthread_mutex_lock(&slave_lock);
448 get_replies(time_limit);
449 int replies = reply_count;
451 int playouts, threads;
452 dist->my_last_move.color = color;
453 dist->my_last_move.coord = select_best_move(b, &dist->my_last_stats, &playouts, &threads);
455 /* Tell the slaves to commit to the selected move, overwriting
456 * the last "pachi-genmoves" in the command history. */
457 char args[64];
458 char *coord = coord2str(dist->my_last_move.coord, b);
459 snprintf(args, sizeof(args), "%s %s\n", stone2str(color), coord);
460 update_cmd(b, "play", args);
461 pthread_cond_broadcast(&cmd_cond);
462 pthread_mutex_unlock(&slave_lock);
464 if (DEBUGL(1)) {
465 char buf[BSIZE];
466 enum stone color = dist->my_last_move.color;
467 double time = time_now() - start + 0.000001; /* avoid divide by zero */
468 snprintf(buf, sizeof(buf),
469 "GLOBAL WINNER is %s %s with score %1.4f (%d/%d games)\n"
470 "genmove in %0.2fs (%d games/s, %d games/s/slave, %d games/s/thread)\n",
471 stone2str(color), coord, get_value(dist->my_last_stats.value, color),
472 dist->my_last_stats.playouts, playouts, time,
473 (int)(playouts/time), (int)(playouts/time/replies),
474 (int)(playouts/time/threads));
475 logline(NULL, "*** ", buf);
477 free(coord);
478 return coord_copy(dist->my_last_move.coord);
481 static char *
482 distributed_chat(struct engine *e, struct board *b, char *cmd)
484 struct distributed *dist = e->data;
485 static char reply[BSIZE];
487 cmd += strspn(cmd, " \n\t");
488 if (!strncasecmp(cmd, "winrate", 7)) {
489 enum stone color = dist->my_last_move.color;
490 snprintf(reply, BSIZE, "In %d playouts at %d machines, %s %s can win with %.2f%% probability.",
491 dist->my_last_stats.playouts, active_slaves, stone2str(color),
492 coord2sstr(dist->my_last_move.coord, b),
493 100 * get_value(dist->my_last_stats.value, color));
494 return reply;
496 return NULL;
499 static int
500 scmp(const void *p1, const void *p2)
502 return strcasecmp(*(char * const *)p1, *(char * const *)p2);
505 static void
506 distributed_dead_group_list(struct engine *e, struct board *b, struct move_queue *mq)
508 pthread_mutex_lock(&slave_lock);
509 get_replies(0);
511 /* Find the most popular reply. */
512 qsort(gtp_replies, reply_count, sizeof(char *), scmp);
513 int best_reply = 0;
514 int best_count = 1;
515 int count = 1;
516 for (int reply = 1; reply < reply_count; reply++) {
517 if (!strcmp(gtp_replies[reply], gtp_replies[reply-1])) {
518 count++;
519 } else {
520 count = 1;
522 if (count > best_count) {
523 best_count = count;
524 best_reply = reply;
528 /* Pick the first move of each line as group. */
529 char *dead = gtp_replies[best_reply];
530 dead = strchr(dead, ' '); // skip "id "
531 while (dead && *++dead != '\n') {
532 coord_t *c = str2coord(dead, board_size(b));
533 mq_add(mq, *c);
534 coord_done(c);
535 dead = strchr(dead, '\n');
537 pthread_mutex_unlock(&slave_lock);
540 static struct distributed *
541 distributed_state_init(char *arg, struct board *b)
543 struct distributed *dist = calloc(1, sizeof(struct distributed));
545 dist->max_slaves = 100;
546 if (arg) {
547 char *optspec, *next = arg;
548 while (*next) {
549 optspec = next;
550 next += strcspn(next, ",");
551 if (*next) { *next++ = 0; } else { *next = 0; }
553 char *optname = optspec;
554 char *optval = strchr(optspec, '=');
555 if (optval) *optval++ = 0;
557 if (!strcasecmp(optname, "slave_port") && optval) {
558 dist->slave_port = strdup(optval);
559 } else if (!strcasecmp(optname, "proxy_port") && optval) {
560 dist->proxy_port = strdup(optval);
561 } else if (!strcasecmp(optname, "max_slaves") && optval) {
562 dist->max_slaves = atoi(optval);
563 } else if (!strcasecmp(optname, "slaves_quit")) {
564 dist->slaves_quit = !optval || atoi(optval);
565 } else {
566 fprintf(stderr, "distributed: Invalid engine argument %s or missing value\n", optname);
571 gtp_replies = calloc(dist->max_slaves, sizeof(char *));
573 if (!dist->slave_port) {
574 fprintf(stderr, "distributed: missing slave_port\n");
575 exit(1);
577 int slave_sock = port_listen(dist->slave_port, dist->max_slaves);
578 pthread_t thread;
579 for (int id = 0; id < dist->max_slaves; id++) {
580 pthread_create(&thread, NULL, slave_thread, (void *)(long)slave_sock);
583 if (dist->proxy_port) {
584 int proxy_sock = port_listen(dist->proxy_port, dist->max_slaves);
585 for (int id = 0; id < dist->max_slaves; id++) {
586 pthread_create(&thread, NULL, proxy_thread, (void *)(long)proxy_sock);
589 return dist;
592 struct engine *
593 engine_distributed_init(char *arg, struct board *b)
595 start_time = time_now();
596 struct distributed *dist = distributed_state_init(arg, b);
597 struct engine *e = calloc(1, sizeof(struct engine));
598 e->name = "Distributed Engine";
599 e->comment = "I'm playing the distributed engine. When I'm losing, I will resign, "
600 "if I think I win, I play until you pass. "
601 "Anyone can send me 'winrate' in private chat to get my assessment of the position.";
602 e->notify = distributed_notify;
603 e->genmove = distributed_genmove;
604 e->dead_group_list = distributed_dead_group_list;
605 e->chat = distributed_chat;
606 e->data = dist;
607 // Keep the threads and the open socket connections:
608 e->keep_on_clear = true;
610 return e;