distributed.c: make global variables static.
[pachi.git] / distributed / distributed.c
blob6a5d80660dada1cd753dcc5f45f72d9098be293b
1 /* This is a master for the "distributed" engine. It receives connections
2 * from slave machines, sends them gtp commands, then aggregates the
3 * results. It can also act as a proxy for the logs of all slave machines.
4 * The slave machines must run with engine "uct" (not "distributed").
5 * The master sends the pachi-genmoves gtp command to each slave,
6 * gets as replies a list of candidate moves, their number of playouts
7 * and their value. The master then picks the most popular move. */
9 /* With time control, the master waits for all slaves, except
10 * when the allowed time is already passed. In this case the
11 * master picks among the available replies, or waits for just
12 * one reply if there is none yet.
13 * Without time control, the master waits until the desired
14 * number of games have been simulated. In this case the -t
15 * parameter for the master should be the sum of the parameters
16 * for all slaves. */
18 /* To minimize the number of ignored replies because they arrive
19 * too late, slaves send temporary replies to the genmoves
20 * command, with the best moves so far. So when the master
21 * has to choose, it should have final replies from most
22 * slaves and at least temporary replies from all of them. */
24 /* This first version does not send tree updates between slaves,
25 * but it has fault tolerance. If a slave is out of sync, the master
26 * sends it the appropriate command history. */
28 /* Pass me arguments like a=b,c=d,...
29 * Supported arguments:
30 * slave_port=SLAVE_PORT slaves connect to this port; this parameter is mandatory.
31 * max_slaves=MAX_SLAVES default 100
32 * slaves_quit=0|1 quit gtp command also sent to slaves, default false.
33 * proxy_port=PROXY_PORT slaves optionally send their logs to this port.
34 * Warning: with proxy_port, the master stderr mixes the logs of all
35 * machines but you can separate them again:
36 * slave logs: sed -n '/< .*:/s/.*< /< /p' logfile
37 * master logs: perl -0777 -pe 's/<[ <].*:.*\n//g' logfile
40 /* A configuration without proxy would have one master run on masterhost as:
41 * zzgo -e distributed slave_port=1234
42 * and N slaves running as:
43 * zzgo -e uct -g masterhost:1234 slave
44 * With log proxy:
45 * zzgo -e distributed slave_port=1234,proxy_port=1235
46 * zzgo -e uct -g masterhost:1234 -l masterhost:1235 slave
47 * If the master itself runs on a machine other than that running gogui,
48 * gogui-twogtp, kgsGtp or cgosGtp, it can redirect its gtp port:
49 * zzgo -e distributed -g 10000 slave_port=1234,proxy_port=1235
52 #include <assert.h>
53 #include <stdio.h>
54 #include <stdlib.h>
55 #include <string.h>
56 #include <pthread.h>
57 #include <limits.h>
58 #include <ctype.h>
59 #include <time.h>
60 #include <alloca.h>
61 #include <sys/types.h>
62 #include <sys/socket.h>
63 #include <arpa/inet.h>
65 #define DEBUG
67 #include "board.h"
68 #include "engine.h"
69 #include "move.h"
70 #include "timeinfo.h"
71 #include "network.h"
72 #include "playout.h"
73 #include "random.h"
74 #include "stats.h"
75 #include "mq.h"
76 #include "debug.h"
77 #include "distributed/distributed.h"
79 /* Internal engine state. */
80 struct distributed {
81 char *slave_port;
82 char *proxy_port;
83 int max_slaves;
84 bool slaves_quit;
85 struct move my_last_move;
86 struct move_stats my_last_stats;
89 static coord_t select_best_move(struct board *b, struct move_stats *best_stats,
90 int *total_playouts, int *total_threads);
92 /* Default number of simulations to perform per move.
93 * Note that this is in total over all slaves! */
94 #define DIST_GAMES 80000
95 static const struct time_info default_ti = {
96 .period = TT_MOVE,
97 .dim = TD_GAMES,
98 .len = { .games = DIST_GAMES },
101 #define get_value(value, color) \
102 ((color) == S_BLACK ? (value) : 1 - (value))
104 /* Max size for one reply or slave log. */
105 #define BSIZE 4096
107 /* Max size of all gtp commands for one game */
108 #define CMDS_SIZE (40*MAX_GAMELEN)
110 /* All gtp commands for current game separated by \n */
111 static char gtp_cmds[CMDS_SIZE];
113 /* Latest gtp command sent to slaves. */
114 static char *gtp_cmd = NULL;
116 /* Remember at most 3 gtp ids per move (time_left, genmoves, play).
117 * For move 0 there can be more than 3 commands
118 * but then we resend the whole history. */
119 #define MAX_CMDS_PER_MOVE 3
121 /* History of gtp commands sent for current game, indexed by move. */
122 static int id_history[MAX_GAMELEN][MAX_CMDS_PER_MOVE];
123 static char *cmd_history[MAX_GAMELEN][MAX_CMDS_PER_MOVE];
125 /* Number of active slave machines working for this master. */
126 static int active_slaves = 0;
128 /* Number of replies to last gtp command already received. */
129 static int reply_count = 0;
130 static int final_reply_count = 0;
132 /* All replies to latest gtp command are in gtp_replies[0..reply_count-1]. */
133 static char **gtp_replies;
135 /* Mutex protecting gtp_cmds, gtp_cmd, id_history, cmd_history,
136 * active_slaves, reply_count, final_reply_count & gtp_replies */
137 static pthread_mutex_t slave_lock = PTHREAD_MUTEX_INITIALIZER;
139 /* Condition signaled when a new gtp command is available. */
140 static pthread_cond_t cmd_cond = PTHREAD_COND_INITIALIZER;
142 /* Condition signaled when reply_count increases. */
143 static pthread_cond_t reply_cond = PTHREAD_COND_INITIALIZER;
145 /* Mutex protecting stderr. Must not be held at same time as slave_lock. */
146 static pthread_mutex_t log_lock = PTHREAD_MUTEX_INITIALIZER;
148 /* Absolute time when this program was started.
149 * For debugging only. */
150 static double start_time;
152 /* Write the time, client address, prefix, and string s to stderr atomically.
153 * s should end with a \n */
154 static void
155 logline(struct in_addr *client, char *prefix, char *s)
157 double now = time_now();
158 char addr[INET_ADDRSTRLEN];
159 if (client) {
160 inet_ntop(AF_INET, client, addr, sizeof(addr));
161 } else {
162 addr[0] = '\0';
164 pthread_mutex_lock(&log_lock);
165 fprintf(stderr, "%s%15s %9.3f: %s", prefix, addr, now - start_time, s);
166 pthread_mutex_unlock(&log_lock);
169 /* Thread opening a connection on the given socket and copying input
170 * from there to stderr. */
171 static void *
172 proxy_thread(void *arg)
174 int proxy_sock = (long)arg;
175 assert(proxy_sock >= 0);
176 for (;;) {
177 struct in_addr client;
178 int conn = open_server_connection(proxy_sock, &client);
179 FILE *f = fdopen(conn, "r");
180 char buf[BSIZE];
181 while (fgets(buf, BSIZE, f)) {
182 logline(&client, "< ", buf);
184 fclose(f);
188 /* Get a reply to one gtp command. If we get a temporary
189 * reply, put it in gtp_replies[reply_slot], notify the main
190 * thread, and continue reading until we get a final reply.
191 * Return the gtp command id, or -1 if error.
192 * slave_buf and reply must have at least CMDS_SIZE bytes.
193 * slave_lock is not held on either entry or exit of this function. */
194 static int
195 get_reply(FILE *f, struct in_addr client, char *slave_buf, char *reply, int *reply_slot)
197 int reply_id = -1;
198 *reply_slot = -1;
199 *reply = '\0';
200 char *line = reply;
201 while (fgets(line, reply + CMDS_SIZE - line, f) && *line != '\n') {
202 if (DEBUGL(2))
203 logline(&client, "<<", line);
204 if (reply_id < 0 && (*line == '=' || *line == '?') && isdigit(line[1]))
205 reply_id = atoi(line+1);
206 if (*line == '#') {
207 /* Temporary reply. */
208 line = reply;
209 pthread_mutex_lock(&slave_lock);
210 if (reply_id != atoi(gtp_cmd)) {
211 pthread_mutex_unlock(&slave_lock);
212 continue; // read and discard the rest
214 strncpy(slave_buf, reply, CMDS_SIZE);
215 if (*reply_slot < 0)
216 *reply_slot = reply_count++;
217 gtp_replies[*reply_slot] = slave_buf;
218 pthread_cond_signal(&reply_cond);
219 pthread_mutex_unlock(&slave_lock);
220 } else {
221 line += strlen(line);
224 if (*line != '\n') return -1;
225 return reply_id;
228 /* Main loop of a slave thread.
229 * Send the current command to the slave machine and wait for a reply.
230 * Resend command history if the slave machine is out of sync.
231 * Returns when the connection with the slave machine is cut.
232 * slave_lock is held on both entry and exit of this function. */
233 static void
234 slave_loop(FILE *f, struct in_addr client, char *buf, bool resend)
236 char *to_send = gtp_cmd;
237 int cmd_id = -1;
238 int reply_id = -1;
239 int reply_slot;
240 for (;;) {
241 while (cmd_id == reply_id && !resend) {
242 // Wait for a new gtp command.
243 pthread_cond_wait(&cmd_cond, &slave_lock);
244 if (gtp_cmd)
245 cmd_id = atoi(gtp_cmd);
246 to_send = gtp_cmd;
249 /* Command available, send it to slave machine.
250 * If slave was out of sync, send the history. */
251 assert(to_send && gtp_cmd);
252 strncpy(buf, to_send, CMDS_SIZE);
253 cmd_id = atoi(gtp_cmd);
255 pthread_mutex_unlock(&slave_lock);
257 if (DEBUGL(1) && resend) {
258 if (to_send == gtp_cmds) {
259 logline(&client, "? ", "Slave out-of-sync, resending all history\n");
260 } else {
261 logline(&client, "? ", "Slave behind, partial resend\n");
264 if (DEBUGL(2))
265 logline(&client, ">>", buf);
266 fputs(buf, f);
267 fflush(f);
269 /* Read the reply, which always ends with \n\n
270 * The slave machine sends "=id reply" or "?id reply"
271 * with id == cmd_id if it is in sync. */
272 char reply[CMDS_SIZE];
273 reply_id = get_reply(f, client, buf, reply, &reply_slot);
275 pthread_mutex_lock(&slave_lock);
276 if (reply_id == -1) return;
278 // Make sure we are still in sync:
279 cmd_id = atoi(gtp_cmd);
280 if (reply_id == cmd_id && *reply == '=') {
281 resend = false;
282 strncpy(buf, reply, CMDS_SIZE);
283 final_reply_count++;
284 if (reply_slot < 0)
285 reply_slot = reply_count++;
286 gtp_replies[reply_slot] = buf;
287 pthread_cond_signal(&reply_cond);
288 continue;
290 resend = true;
291 to_send = gtp_cmds;
292 /* Resend everything if slave got latest command,
293 * but doesn't have a correct board. */
294 if (reply_id == cmd_id) continue;
296 /* The slave is ouf-of-sync. Check whether the last command
297 * it received belongs to the current game. If so resend
298 * starting at the last move known by slave, otherwise
299 * resend the whole history. */
300 int reply_move = move_number(reply_id);
301 if (reply_move > move_number(cmd_id)) continue;
303 for (int slot = 0; slot < MAX_CMDS_PER_MOVE; slot++) {
304 if (reply_id == id_history[reply_move][slot]) {
305 to_send = cmd_history[reply_move][slot];
306 break;
312 /* Thread sending gtp commands to one slave machine, and
313 * reading replies. If a slave machine dies, this thread waits
314 * for a connection from another slave. */
315 static void *
316 slave_thread(void *arg)
318 int slave_sock = (long)arg;
319 assert(slave_sock >= 0);
320 char slave_buf[CMDS_SIZE];
321 bool resend = false;
323 for (;;) {
324 /* Wait for a connection from any slave. */
325 struct in_addr client;
326 int conn = open_server_connection(slave_sock, &client);
328 FILE *f = fdopen(conn, "r+");
329 if (DEBUGL(2))
330 logline(&client, "= ", "new slave\n");
332 /* Minimal check of the slave identity. */
333 fputs("name\n", f);
334 if (!fgets(slave_buf, sizeof(slave_buf), f)
335 || strncasecmp(slave_buf, "= Pachi", 7)
336 || !fgets(slave_buf, sizeof(slave_buf), f)
337 || strcmp(slave_buf, "\n")) {
338 logline(&client, "? ", "bad slave\n");
339 fclose(f);
340 continue;
343 pthread_mutex_lock(&slave_lock);
344 active_slaves++;
345 slave_loop(f, client, slave_buf, resend);
347 assert(active_slaves > 0);
348 active_slaves--;
349 pthread_mutex_unlock(&slave_lock);
351 resend = true;
352 if (DEBUGL(2))
353 logline(&client, "= ", "lost slave\n");
354 fclose(f);
358 /* Create a new gtp command for all slaves. The slave lock is held
359 * upon entry and upon return, so the command will actually be
360 * sent when the lock is released. The last command is overwritten
361 * if gtp_cmd points to a non-empty string. cmd is a single word;
362 * args has all arguments and is empty or has a trailing \n */
363 static void
364 update_cmd(struct board *b, char *cmd, char *args)
366 assert(gtp_cmd);
367 /* To make sure the slaves are in sync, we ignore the original id
368 * and use the board number plus some random bits as gtp id.
369 * Make sure the new command has a new id otherwise slaves
370 * won't send it. */
371 static int gtp_id = -1;
372 int id;
373 int moves = is_reset(cmd) ? 0 : b->moves;
374 do {
375 /* fast_random() is 16-bit only so the multiplication can't overflow. */
376 id = force_reply(moves + fast_random(65535) * DIST_GAMELEN);
377 } while (id == gtp_id);
378 gtp_id = id;
379 snprintf(gtp_cmd, gtp_cmds + CMDS_SIZE - gtp_cmd, "%d %s %s",
380 id, cmd, *args ? args : "\n");
381 reply_count = final_reply_count = 0;
383 /* Remember history for out-of-sync slaves, at most 3 ids per move
384 * (time_left, genmoves, play). */
385 static int slot = 0;
386 slot = (slot + 1) % MAX_CMDS_PER_MOVE;
387 id_history[moves][slot] = id;
388 cmd_history[moves][slot] = gtp_cmd;
390 // Notify the slave threads about the new command.
391 pthread_cond_broadcast(&cmd_cond);
394 /* Update the command history, then create a new gtp command
395 * for all slaves. The slave lock is held upon entry and
396 * upon return, so the command will actually be sent when the
397 * lock is released. cmd is a single word; args has all
398 * arguments and is empty or has a trailing \n */
399 static void
400 new_cmd(struct board *b, char *cmd, char *args)
402 // Clear the history when a new game starts:
403 if (!gtp_cmd || is_gamestart(cmd)) {
404 gtp_cmd = gtp_cmds;
405 } else {
406 /* Preserve command history for new slaves.
407 * To indicate that the slave should only reply to
408 * the last command we force the id of previous
409 * commands to be just the move number. */
410 int id = prevent_reply(atoi(gtp_cmd));
411 int len = strspn(gtp_cmd, "0123456789");
412 char buf[32];
413 snprintf(buf, sizeof(buf), "%0*d", len, id);
414 memcpy(gtp_cmd, buf, len);
416 gtp_cmd += strlen(gtp_cmd);
419 // Let the slave threads send the new gtp command:
420 update_cmd(b, cmd, args);
423 /* If time_limit > 0, wait until all slaves have replied, or if the
424 * given absolute time is passed, wait for at least one reply.
425 * If time_limit == 0, wait until we get at least min_playouts games
426 * simulated in total by all the slaves, or until all slaves have replied.
427 * The replies are returned in gtp_replies[0..reply_count-1]
428 * slave_lock is held on entry and on return. */
429 static void
430 get_replies(double time_limit, int min_playouts, struct board *b)
432 while (reply_count == 0 || final_reply_count < active_slaves) {
433 if (time_limit && reply_count > 0) {
434 struct timespec ts;
435 double sec;
436 ts.tv_nsec = (int)(modf(time_limit, &sec)*1000000000.0);
437 ts.tv_sec = (int)sec;
438 pthread_cond_timedwait(&reply_cond, &slave_lock, &ts);
439 } else {
440 pthread_cond_wait(&reply_cond, &slave_lock);
442 if (reply_count == 0) continue;
443 if (final_reply_count >= active_slaves) return;
444 if (time_limit) {
445 if (time_now() >= time_limit) break;
446 } else {
447 int playouts, threads;
448 struct move_stats s;
449 select_best_move(b, &s, &playouts, &threads);
450 if (playouts >= min_playouts) return;
453 if (DEBUGL(1)) {
454 char buf[1024];
455 snprintf(buf, sizeof(buf),
456 "get_replies timeout %.3f >= %.3f, final %d, temp %d, active %d\n",
457 time_now() - start_time, time_limit - start_time,
458 final_reply_count, reply_count, active_slaves);
459 logline(NULL, "? ", buf);
461 assert(reply_count > 0 && final_reply_count <= reply_count);
464 /* Maximum time (seconds) to wait for answers to fast gtp commands
465 * (all commands except pachi-genmoves and final_status_list). */
466 #define MAX_FAST_CMD_WAIT 1.0
468 /* Dispatch a new gtp command to all slaves.
469 * The slave lock must not be held upon entry and is released upon return.
470 * args is empty or ends with '\n' */
471 static enum parse_code
472 distributed_notify(struct engine *e, struct board *b, int id, char *cmd, char *args, char **reply)
474 struct distributed *dist = e->data;
476 /* Commands that should not be sent to slaves */
477 if ((!strcasecmp(cmd, "quit") && !dist->slaves_quit)
478 || !strcasecmp(cmd, "uct_genbook")
479 || !strcasecmp(cmd, "uct_dumpbook")
480 || !strcasecmp(cmd, "kgs-chat")
482 /* and commands that will be sent to slaves later */
483 || !strcasecmp(cmd, "genmove")
484 || !strcasecmp(cmd, "kgs-genmove_cleanup")
485 || !strcasecmp(cmd, "final_score")
486 || !strcasecmp(cmd, "final_status_list"))
487 return P_OK;
489 pthread_mutex_lock(&slave_lock);
491 // Create a new command to be sent by the slave threads.
492 new_cmd(b, cmd, args);
494 /* Wait for replies here. If we don't wait, we run the
495 * risk of getting out of sync with most slaves and
496 * sending command history too frequently. */
497 get_replies(time_now() + MAX_FAST_CMD_WAIT, 0, b);
499 pthread_mutex_unlock(&slave_lock);
500 return P_OK;
503 /* pachi-genmoves returns a line "=id total_playouts threads[ reserved]" then a list of lines
504 * "coord playouts value". Keep this function in sync with uct_notify().
505 * Return the move with most playouts, its average value, and stats for debugging.
506 * slave_lock is held on entry and on return. */
507 static coord_t
508 select_best_move(struct board *b, struct move_stats *best_stats,
509 int *total_playouts, int *total_threads)
511 assert(reply_count > 0);
513 /* +2 for pass and resign. */
514 struct move_stats *stats = alloca((board_size2(b)+2) * sizeof(struct move_stats));
515 memset(stats, 0, (board_size2(b)+2) * sizeof(*stats));
516 stats += 2;
518 coord_t best_move = pass;
519 int best_playouts = -1;
520 *total_playouts = *total_threads = 0;
522 for (int reply = 0; reply < reply_count; reply++) {
523 char *r = gtp_replies[reply];
524 int id, playouts, threads;
525 if (sscanf(r, "=%d %d %d", &id, &playouts, &threads) != 3) continue;
526 *total_playouts += playouts;
527 *total_threads += threads;
528 // Skip the rest of the firt line if any (allow future extensions)
529 r = strchr(r, '\n');
531 char move[64];
532 struct move_stats s;
533 while (r && sscanf(++r, "%63s %d %f", move, &s.playouts, &s.value) == 3) {
534 coord_t *c = str2coord(move, board_size(b));
535 stats_add_result(&stats[*c], s.value, s.playouts);
536 if (stats[*c].playouts > best_playouts) {
537 best_playouts = stats[*c].playouts;
538 best_move = *c;
540 coord_done(c);
541 r = strchr(r, '\n');
544 *best_stats = stats[best_move];
545 return best_move;
548 /* Time control is mostly done by the slaves, so we use default values here. */
549 #define FUSEKI_END 20
550 #define YOSE_START 40
552 static coord_t *
553 distributed_genmove(struct engine *e, struct board *b, struct time_info *ti, enum stone color, bool pass_all_alive)
555 struct distributed *dist = e->data;
556 double start = time_now();
558 long time_limit = 0;
559 int min_playouts = 0;
561 char *cmd = pass_all_alive ? "pachi-genmoves_cleanup" : "pachi-genmoves";
562 char args[128];
564 if (ti->period == TT_NULL) *ti = default_ti;
565 struct time_stop stop;
566 time_stop_conditions(ti, b, FUSEKI_END, YOSE_START, &stop);
568 if (ti->dim == TD_WALLTIME) {
569 time_limit = ti->len.t.timer_start + stop.worst.time;
571 /* Send time info to the slaves to make sure they all
572 * reply in time, particularly if they were out of sync
573 * and there are no time_left commands. We cannot send
574 * the absolute time limit because slaves may have a
575 * different system time.
576 * Keep this code in sync with gtp_parse(). */
577 snprintf(args, sizeof(args), "%s %.3f %.3f %d %d\n",
578 stone2str(color), ti->len.t.main_time,
579 ti->len.t.byoyomi_time, ti->len.t.byoyomi_periods,
580 ti->len.t.byoyomi_stones);
581 } else {
582 min_playouts = stop.desired.playouts;
584 /* For absolute number of simulations, slaves still
585 * use their own -t =NUM parameter. (The master
586 * needs to know the total number of simulations over
587 * all slaves so it has a different -t parameter.) */
588 snprintf(args, sizeof(args), "%s\n", stone2str(color));
591 pthread_mutex_lock(&slave_lock);
592 new_cmd(b, cmd, args);
594 get_replies(time_limit, min_playouts, b);
595 int replies = reply_count;
597 int playouts, threads;
598 dist->my_last_move.color = color;
599 dist->my_last_move.coord = select_best_move(b, &dist->my_last_stats, &playouts, &threads);
601 /* Tell the slaves to commit to the selected move, overwriting
602 * the last "pachi-genmoves" in the command history. */
603 char *coord = coord2str(dist->my_last_move.coord, b);
604 snprintf(args, sizeof(args), "%s %s\n", stone2str(color), coord);
605 update_cmd(b, "play", args);
606 pthread_mutex_unlock(&slave_lock);
608 if (DEBUGL(1)) {
609 char buf[BSIZE];
610 enum stone color = dist->my_last_move.color;
611 double time = time_now() - start + 0.000001; /* avoid divide by zero */
612 snprintf(buf, sizeof(buf),
613 "GLOBAL WINNER is %s %s with score %1.4f (%d/%d games)\n"
614 "genmove in %0.2fs %d slaves %d threads (%d games/s,"
615 " %d games/s/slave, %d games/s/thread)\n",
616 stone2str(color), coord, get_value(dist->my_last_stats.value, color),
617 dist->my_last_stats.playouts, playouts, time, replies, threads,
618 (int)(playouts/time), (int)(playouts/time/replies),
619 (int)(playouts/time/threads));
620 logline(NULL, "* ", buf);
622 free(coord);
623 return coord_copy(dist->my_last_move.coord);
626 static char *
627 distributed_chat(struct engine *e, struct board *b, char *cmd)
629 struct distributed *dist = e->data;
630 static char reply[BSIZE];
632 cmd += strspn(cmd, " \n\t");
633 if (!strncasecmp(cmd, "winrate", 7)) {
634 enum stone color = dist->my_last_move.color;
635 snprintf(reply, BSIZE, "In %d playouts at %d machines, %s %s can win with %.2f%% probability.",
636 dist->my_last_stats.playouts, active_slaves, stone2str(color),
637 coord2sstr(dist->my_last_move.coord, b),
638 100 * get_value(dist->my_last_stats.value, color));
639 return reply;
641 return NULL;
644 static int
645 scmp(const void *p1, const void *p2)
647 return strcasecmp(*(char * const *)p1, *(char * const *)p2);
650 static void
651 distributed_dead_group_list(struct engine *e, struct board *b, struct move_queue *mq)
653 pthread_mutex_lock(&slave_lock);
655 new_cmd(b, "final_status_list", "dead\n");
656 get_replies(time_now() + MAX_FAST_CMD_WAIT, 0, b);
658 /* Find the most popular reply. */
659 qsort(gtp_replies, reply_count, sizeof(char *), scmp);
660 int best_reply = 0;
661 int best_count = 1;
662 int count = 1;
663 for (int reply = 1; reply < reply_count; reply++) {
664 if (!strcmp(gtp_replies[reply], gtp_replies[reply-1])) {
665 count++;
666 } else {
667 count = 1;
669 if (count > best_count) {
670 best_count = count;
671 best_reply = reply;
675 /* Pick the first move of each line as group. */
676 char *dead = gtp_replies[best_reply];
677 dead = strchr(dead, ' '); // skip "id "
678 while (dead && *++dead != '\n') {
679 coord_t *c = str2coord(dead, board_size(b));
680 mq_add(mq, *c);
681 coord_done(c);
682 dead = strchr(dead, '\n');
684 pthread_mutex_unlock(&slave_lock);
687 static struct distributed *
688 distributed_state_init(char *arg, struct board *b)
690 struct distributed *dist = calloc(1, sizeof(struct distributed));
692 dist->max_slaves = 100;
693 if (arg) {
694 char *optspec, *next = arg;
695 while (*next) {
696 optspec = next;
697 next += strcspn(next, ",");
698 if (*next) { *next++ = 0; } else { *next = 0; }
700 char *optname = optspec;
701 char *optval = strchr(optspec, '=');
702 if (optval) *optval++ = 0;
704 if (!strcasecmp(optname, "slave_port") && optval) {
705 dist->slave_port = strdup(optval);
706 } else if (!strcasecmp(optname, "proxy_port") && optval) {
707 dist->proxy_port = strdup(optval);
708 } else if (!strcasecmp(optname, "max_slaves") && optval) {
709 dist->max_slaves = atoi(optval);
710 } else if (!strcasecmp(optname, "slaves_quit")) {
711 dist->slaves_quit = !optval || atoi(optval);
712 } else {
713 fprintf(stderr, "distributed: Invalid engine argument %s or missing value\n", optname);
718 gtp_replies = calloc(dist->max_slaves, sizeof(char *));
720 if (!dist->slave_port) {
721 fprintf(stderr, "distributed: missing slave_port\n");
722 exit(1);
724 int slave_sock = port_listen(dist->slave_port, dist->max_slaves);
725 pthread_t thread;
726 for (int id = 0; id < dist->max_slaves; id++) {
727 pthread_create(&thread, NULL, slave_thread, (void *)(long)slave_sock);
730 if (dist->proxy_port) {
731 int proxy_sock = port_listen(dist->proxy_port, dist->max_slaves);
732 for (int id = 0; id < dist->max_slaves; id++) {
733 pthread_create(&thread, NULL, proxy_thread, (void *)(long)proxy_sock);
736 return dist;
739 struct engine *
740 engine_distributed_init(char *arg, struct board *b)
742 start_time = time_now();
743 struct distributed *dist = distributed_state_init(arg, b);
744 struct engine *e = calloc(1, sizeof(struct engine));
745 e->name = "Distributed Engine";
746 e->comment = "I'm playing the distributed engine. When I'm losing, I will resign, "
747 "if I think I win, I play until you pass. "
748 "Anyone can send me 'winrate' in private chat to get my assessment of the position.";
749 e->notify = distributed_notify;
750 e->genmove = distributed_genmove;
751 e->dead_group_list = distributed_dead_group_list;
752 e->chat = distributed_chat;
753 e->data = dist;
754 // Keep the threads and the open socket connections:
755 e->keep_on_clear = true;
757 return e;