1 /* This is a master for the "distributed" engine. It receives connections
2 * from slave machines, sends them gtp commands, then aggregates the
3 * results. It can also act as a proxy for the logs of all slave machines.
4 * The slave machines must run with engine "uct" (not "distributed").
5 * The master sends the pachi-genmoves gtp command to each slave,
6 * gets as replies a list of candidate moves, their number of playouts
7 * and their value. The master then picks the most popular move. */
9 /* With time control, the master waits for all slaves, except
10 * when the allowed time is already passed. In this case the
11 * master picks among the available replies, or waits for just
12 * one reply if there is none yet.
13 * Without time control, the master waits until the desired
14 * number of games have been simulated. In this case the -t
15 * parameter for the master should be the sum of the parameters
18 /* This first version does not send tree updates between slaves,
19 * but it has fault tolerance. If a slave is out of sync, the master
20 * sends it the appropriate command history. */
22 /* Pass me arguments like a=b,c=d,...
23 * Supported arguments:
24 * slave_port=SLAVE_PORT slaves connect to this port; this parameter is mandatory.
25 * max_slaves=MAX_SLAVES default 100
26 * slaves_quit=0|1 quit gtp command also sent to slaves, default false.
27 * proxy_port=PROXY_PORT slaves optionally send their logs to this port.
28 * Warning: with proxy_port, the master stderr mixes the logs of all
29 * machines but you can separate them again:
30 * slave logs: sed -n '/< .*:/s/.*< /< /p' logfile
31 * master logs: perl -0777 -pe 's/<[ <].*:.*\n//g' logfile
34 /* A configuration without proxy would have one master run on masterhost as:
35 * zzgo -e distributed slave_port=1234
36 * and N slaves running as:
37 * zzgo -e uct -g masterhost:1234 slave
39 * zzgo -e distributed slave_port=1234,proxy_port=1235
40 * zzgo -e uct -g masterhost:1234 -l masterhost:1235 slave
41 * If the master itself runs on a machine other than that running gogui,
42 * gogui-twogtp, kgsGtp or cgosGtp, it can redirect its gtp port:
43 * zzgo -e distributed -g 10000 slave_port=1234,proxy_port=1235
55 #include <sys/types.h>
56 #include <sys/socket.h>
57 #include <arpa/inet.h>
71 #include "distributed/distributed.h"
73 /* Internal engine state. */
79 struct move my_last_move
;
80 struct move_stats my_last_stats
;
83 static coord_t
select_best_move(struct board
*b
, struct move_stats
*best_stats
,
84 int *total_playouts
, int *total_threads
);
86 /* Default number of simulations to perform per move.
87 * Note that this is in total over all slaves! */
88 #define DIST_GAMES 80000
89 static const struct time_info default_ti
= {
92 .len
= { .games
= DIST_GAMES
},
95 #define get_value(value, color) \
96 ((color) == S_BLACK ? (value) : 1 - (value))
98 /* Max size for one reply or slave log. */
101 /* Max size of all gtp commands for one game */
102 #define CMDS_SIZE (40*MAX_GAMELEN)
104 /* All gtp commands for current game separated by \n */
105 char gtp_cmds
[CMDS_SIZE
];
107 /* Latest gtp command sent to slaves. */
108 char *gtp_cmd
= NULL
;
110 /* Remember at most 3 gtp ids per move (time_left, genmoves, play).
111 * For move 0 there can be more than 3 commands
112 * but then we resend the whole history. */
113 #define MAX_CMDS_PER_MOVE 3
115 /* History of gtp commands sent for current game, indexed by move. */
116 int id_history
[MAX_GAMELEN
][MAX_CMDS_PER_MOVE
];
117 char *cmd_history
[MAX_GAMELEN
][MAX_CMDS_PER_MOVE
];
119 /* Number of active slave machines working for this master. */
120 int active_slaves
= 0;
122 /* Number of replies to last gtp command already received. */
125 /* All replies to latest gtp command are in gtp_replies[0..reply_count-1]. */
128 /* Mutex protecting gtp_cmds, gtp_cmd, id_history, cmd_history,
129 * active_slaves, reply_count & gtp_replies */
130 pthread_mutex_t slave_lock
= PTHREAD_MUTEX_INITIALIZER
;
132 /* Condition signaled when a new gtp command is available. */
133 static pthread_cond_t cmd_cond
= PTHREAD_COND_INITIALIZER
;
135 /* Condition signaled when reply_count increases. */
136 static pthread_cond_t reply_cond
= PTHREAD_COND_INITIALIZER
;
138 /* Mutex protecting stderr. Must not be held at same time as slave_lock. */
139 pthread_mutex_t log_lock
= PTHREAD_MUTEX_INITIALIZER
;
141 /* Absolute time when this program was started.
142 * For debugging only. */
145 /* Write the time, client address, prefix, and string s to stderr atomically.
146 * s should end with a \n */
148 logline(struct in_addr
*client
, char *prefix
, char *s
)
150 double now
= time_now();
151 char addr
[INET_ADDRSTRLEN
];
153 inet_ntop(AF_INET
, client
, addr
, sizeof(addr
));
157 pthread_mutex_lock(&log_lock
);
158 fprintf(stderr
, "%s%15s %9.3f: %s", prefix
, addr
, now
- start_time
, s
);
159 pthread_mutex_unlock(&log_lock
);
162 /* Thread opening a connection on the given socket and copying input
163 * from there to stderr. */
165 proxy_thread(void *arg
)
167 int proxy_sock
= (long)arg
;
168 assert(proxy_sock
>= 0);
170 struct in_addr client
;
171 int conn
= open_server_connection(proxy_sock
, &client
);
172 FILE *f
= fdopen(conn
, "r");
174 while (fgets(buf
, BSIZE
, f
)) {
175 logline(&client
, "< ", buf
);
181 /* Main loop of a slave thread.
182 * Send the current command to the slave machine and wait for a reply.
183 * Resend command history if the slave machine is out of sync.
184 * Returns when the connection with the slave machine is cut.
185 * slave_lock is held on both entry and exit of this function. */
187 slave_loop(FILE *f
, struct in_addr client
, char *buf
, bool resend
)
189 char *to_send
= gtp_cmd
;
193 while (cmd_id
== reply_id
&& !resend
) {
194 // Wait for a new gtp command.
195 pthread_cond_wait(&cmd_cond
, &slave_lock
);
197 cmd_id
= atoi(gtp_cmd
);
201 /* Command available, send it to slave machine.
202 * If slave was out of sync, send the history. */
203 assert(to_send
&& gtp_cmd
);
204 strncpy(buf
, to_send
, CMDS_SIZE
);
205 cmd_id
= atoi(gtp_cmd
);
207 pthread_mutex_unlock(&slave_lock
);
209 if (DEBUGL(1) && resend
) {
210 if (to_send
== gtp_cmds
) {
211 logline(&client
, "? ", "Slave out-of-sync, resending all history\n");
213 logline(&client
, "? ", "Slave behind, partial resend\n");
217 logline(&client
, ">>", buf
);
221 /* Read the reply, which always ends with \n\n
222 * The slave machine sends "=id reply" or "?id reply"
223 * with id == cmd_id if it is in sync. */
227 while (fgets(line
, buf
+ CMDS_SIZE
- line
, f
) && *line
!= '\n') {
229 logline(&client
, "<<", line
);
230 if (reply_id
< 0 && (*line
== '=' || *line
== '?') && isdigit(line
[1]))
231 reply_id
= atoi(line
+1);
232 line
+= strlen(line
);
235 pthread_mutex_lock(&slave_lock
);
236 if (*line
!= '\n') return;
237 // Make sure we are still in sync:
238 cmd_id
= atoi(gtp_cmd
);
239 if (reply_id
== cmd_id
&& *buf
== '=') {
241 gtp_replies
[reply_count
++] = buf
;
242 pthread_cond_signal(&reply_cond
);
247 /* Resend everything if slave got latest command,
248 * but doesn't have a correct board. */
249 if (reply_id
== cmd_id
) continue;
251 /* The slave is ouf-of-sync. Check whether the last command
252 * it received belongs to the current game. If so resend
253 * starting at the last move known by slave, otherwise
254 * resend the whole history. */
255 int reply_move
= move_number(reply_id
);
256 if (reply_move
> move_number(cmd_id
)) continue;
258 for (int slot
= 0; slot
< MAX_CMDS_PER_MOVE
; slot
++) {
259 if (reply_id
== id_history
[reply_move
][slot
]) {
260 to_send
= cmd_history
[reply_move
][slot
];
267 /* Thread sending gtp commands to one slave machine, and
268 * reading replies. If a slave machine dies, this thread waits
269 * for a connection from another slave. */
271 slave_thread(void *arg
)
273 int slave_sock
= (long)arg
;
274 assert(slave_sock
>= 0);
275 char slave_buf
[CMDS_SIZE
];
279 /* Wait for a connection from any slave. */
280 struct in_addr client
;
281 int conn
= open_server_connection(slave_sock
, &client
);
283 FILE *f
= fdopen(conn
, "r+");
285 logline(&client
, "= ", "new slave\n");
287 /* Minimal check of the slave identity. */
289 if (!fgets(slave_buf
, sizeof(slave_buf
), f
)
290 || strncasecmp(slave_buf
, "= Pachi", 7)
291 || !fgets(slave_buf
, sizeof(slave_buf
), f
)
292 || strcmp(slave_buf
, "\n")) {
293 logline(&client
, "? ", "bad slave\n");
298 pthread_mutex_lock(&slave_lock
);
300 slave_loop(f
, client
, slave_buf
, resend
);
302 assert(active_slaves
> 0);
304 pthread_mutex_unlock(&slave_lock
);
308 logline(&client
, "= ", "lost slave\n");
313 /* Create a new gtp command for all slaves. The slave lock is held
314 * upon entry and upon return, so the command will actually be
315 * sent when the lock is released. The last command is overwritten
316 * if gtp_cmd points to a non-empty string. cmd is a single word;
317 * args has all arguments and is empty or has a trailing \n */
319 update_cmd(struct board
*b
, char *cmd
, char *args
)
322 /* To make sure the slaves are in sync, we ignore the original id
323 * and use the board number plus some random bits as gtp id.
324 * Make sure the new command has a new id otherwise slaves
326 static int gtp_id
= -1;
328 int moves
= is_reset(cmd
) ? 0 : b
->moves
;
330 /* fast_random() is 16-bit only so the multiplication can't overflow. */
331 id
= force_reply(moves
+ fast_random(65535) * DIST_GAMELEN
);
332 } while (id
== gtp_id
);
334 snprintf(gtp_cmd
, gtp_cmds
+ CMDS_SIZE
- gtp_cmd
, "%d %s %s",
335 id
, cmd
, *args
? args
: "\n");
338 /* Remember history for out-of-sync slaves, at most 3 ids per move
339 * (time_left, genmoves, play). */
341 slot
= (slot
+ 1) % MAX_CMDS_PER_MOVE
;
342 id_history
[moves
][slot
] = id
;
343 cmd_history
[moves
][slot
] = gtp_cmd
;
345 // Notify the slave threads about the new command.
346 pthread_cond_broadcast(&cmd_cond
);
349 /* Update the command history, then create a new gtp command
350 * for all slaves. The slave lock is held upon entry and
351 * upon return, so the command will actually be sent when the
352 * lock is released. cmd is a single word; args has all
353 * arguments and is empty or has a trailing \n */
355 new_cmd(struct board
*b
, char *cmd
, char *args
)
357 // Clear the history when a new game starts:
358 if (!gtp_cmd
|| is_gamestart(cmd
)) {
361 /* Preserve command history for new slaves.
362 * To indicate that the slave should only reply to
363 * the last command we force the id of previous
364 * commands to be just the move number. */
365 int id
= prevent_reply(atoi(gtp_cmd
));
366 int len
= strspn(gtp_cmd
, "0123456789");
368 snprintf(buf
, sizeof(buf
), "%0*d", len
, id
);
369 memcpy(gtp_cmd
, buf
, len
);
371 gtp_cmd
+= strlen(gtp_cmd
);
374 // Let the slave threads send the new gtp command:
375 update_cmd(b
, cmd
, args
);
378 /* If time_limit > 0, wait until all slaves have replied, or if the
379 * given absolute time is passed, wait for at least one reply.
380 * If time_limit == 0, wait until we get at least min_playouts games
381 * simulated in total by all the slaves, or until all slaves have replied.
382 * The replies are returned in gtp_replies[0..reply_count-1]
383 * slave_lock is held on entry and on return. */
385 get_replies(double time_limit
, int min_playouts
, struct board
*b
)
388 while (reply_count
== 0 || reply_count
< active_slaves
) {
389 if (time_limit
&& reply_count
> 0) {
392 ts
.tv_nsec
= (int)(modf(time_limit
, &sec
)*1000000000.0);
393 ts
.tv_sec
= (int)sec
;
394 pthread_cond_timedwait(&reply_cond
, &slave_lock
, &ts
);
396 pthread_cond_wait(&reply_cond
, &slave_lock
);
398 if (reply_count
== 0) continue;
399 if (reply_count
>= active_slaves
) return;
402 if (now
>= time_limit
) break;
404 int playouts
, threads
;
406 select_best_move(b
, &s
, &playouts
, &threads
);
407 if (playouts
>= min_playouts
) return;
412 snprintf(buf
, sizeof(buf
),
413 "get_replies timeout %.3f >= %.3f, replies %d < active %d\n",
414 now
- start_time
, time_limit
- start_time
, reply_count
, active_slaves
);
415 logline(NULL
, "? ", buf
);
417 assert(reply_count
> 0);
420 /* Maximum time (seconds) to wait for answers to fast gtp commands
421 * (all commands except pachi-genmoves and final_status_list). */
422 #define MAX_FAST_CMD_WAIT 1.0
424 /* Dispatch a new gtp command to all slaves.
425 * The slave lock must not be held upon entry and is released upon return.
426 * args is empty or ends with '\n' */
427 static enum parse_code
428 distributed_notify(struct engine
*e
, struct board
*b
, int id
, char *cmd
, char *args
, char **reply
)
430 struct distributed
*dist
= e
->data
;
432 /* Commands that should not be sent to slaves */
433 if ((!strcasecmp(cmd
, "quit") && !dist
->slaves_quit
)
434 || !strcasecmp(cmd
, "uct_genbook")
435 || !strcasecmp(cmd
, "uct_dumpbook")
436 || !strcasecmp(cmd
, "kgs-chat")
438 /* and commands that will be sent to slaves later */
439 || !strcasecmp(cmd
, "genmove")
440 || !strcasecmp(cmd
, "kgs-genmove_cleanup")
441 || !strcasecmp(cmd
, "final_score")
442 || !strcasecmp(cmd
, "final_status_list"))
445 pthread_mutex_lock(&slave_lock
);
447 // Create a new command to be sent by the slave threads.
448 new_cmd(b
, cmd
, args
);
450 /* Wait for replies here. If we don't wait, we run the
451 * risk of getting out of sync with most slaves and
452 * sending command history too frequently. */
453 get_replies(time_now() + MAX_FAST_CMD_WAIT
, 0, b
);
455 pthread_mutex_unlock(&slave_lock
);
459 /* pachi-genmoves returns a line "=id total_playouts threads[ reserved]" then a list of lines
460 * "coord playouts value". Keep this function in sync with uct_notify().
461 * Return the move with most playouts, its average value, and stats for debugging.
462 * slave_lock is held on entry and on return. */
464 select_best_move(struct board
*b
, struct move_stats
*best_stats
,
465 int *total_playouts
, int *total_threads
)
467 assert(reply_count
> 0);
469 /* +2 for pass and resign. */
470 struct move_stats
*stats
= alloca((board_size2(b
)+2) * sizeof(struct move_stats
));
471 memset(stats
, 0, (board_size2(b
)+2) * sizeof(*stats
));
474 coord_t best_move
= pass
;
475 int best_playouts
= -1;
476 *total_playouts
= *total_threads
= 0;
478 for (int reply
= 0; reply
< reply_count
; reply
++) {
479 char *r
= gtp_replies
[reply
];
480 int id
, playouts
, threads
;
481 if (sscanf(r
, "=%d %d %d", &id
, &playouts
, &threads
) != 3) continue;
482 *total_playouts
+= playouts
;
483 *total_threads
+= threads
;
484 // Skip the rest of the firt line if any (allow future extensions)
489 while (r
&& sscanf(++r
, "%63s %d %f", move
, &s
.playouts
, &s
.value
) == 3) {
490 coord_t
*c
= str2coord(move
, board_size(b
));
491 stats_add_result(&stats
[*c
], s
.value
, s
.playouts
);
492 if (stats
[*c
].playouts
> best_playouts
) {
493 best_playouts
= stats
[*c
].playouts
;
500 *best_stats
= stats
[best_move
];
504 /* Time control is mostly done by the slaves, so we use default values here. */
505 #define FUSEKI_END 20
506 #define YOSE_START 40
509 distributed_genmove(struct engine
*e
, struct board
*b
, struct time_info
*ti
, enum stone color
, bool pass_all_alive
)
511 struct distributed
*dist
= e
->data
;
512 double start
= time_now();
515 int min_playouts
= 0;
517 char *cmd
= pass_all_alive
? "pachi-genmoves_cleanup" : "pachi-genmoves";
520 if (ti
->period
== TT_NULL
) *ti
= default_ti
;
521 struct time_stop stop
;
522 time_stop_conditions(ti
, b
, FUSEKI_END
, YOSE_START
, &stop
);
524 if (ti
->dim
== TD_WALLTIME
) {
525 time_limit
= ti
->len
.t
.timer_start
+ stop
.worst
.time
;
527 /* Send time info to the slaves to make sure they all
528 * reply in time, particularly if they were out of sync
529 * and there are no time_left commands. We cannot send
530 * the absolute time limit because slaves may have a
531 * different system time.
532 * Keep this code in sync with gtp_parse(). */
533 snprintf(args
, sizeof(args
), "%s %.3f %.3f %d %d\n",
534 stone2str(color
), ti
->len
.t
.main_time
,
535 ti
->len
.t
.byoyomi_time
, ti
->len
.t
.byoyomi_periods
,
536 ti
->len
.t
.byoyomi_stones
);
538 min_playouts
= stop
.desired
.playouts
;
540 /* For absolute number of simulations, slaves still
541 * use their own -t =NUM parameter. (The master
542 * needs to know the total number of simulations over
543 * all slaves so it has a different -t parameter.) */
544 snprintf(args
, sizeof(args
), "%s\n", stone2str(color
));
547 pthread_mutex_lock(&slave_lock
);
548 new_cmd(b
, cmd
, args
);
550 get_replies(time_limit
, min_playouts
, b
);
551 int replies
= reply_count
;
553 int playouts
, threads
;
554 dist
->my_last_move
.color
= color
;
555 dist
->my_last_move
.coord
= select_best_move(b
, &dist
->my_last_stats
, &playouts
, &threads
);
557 /* Tell the slaves to commit to the selected move, overwriting
558 * the last "pachi-genmoves" in the command history. */
559 char *coord
= coord2str(dist
->my_last_move
.coord
, b
);
560 snprintf(args
, sizeof(args
), "%s %s\n", stone2str(color
), coord
);
561 update_cmd(b
, "play", args
);
562 pthread_mutex_unlock(&slave_lock
);
566 enum stone color
= dist
->my_last_move
.color
;
567 double time
= time_now() - start
+ 0.000001; /* avoid divide by zero */
568 snprintf(buf
, sizeof(buf
),
569 "GLOBAL WINNER is %s %s with score %1.4f (%d/%d games)\n"
570 "genmove in %0.2fs %d slaves %d threads (%d games/s,"
571 " %d games/s/slave, %d games/s/thread)\n",
572 stone2str(color
), coord
, get_value(dist
->my_last_stats
.value
, color
),
573 dist
->my_last_stats
.playouts
, playouts
, time
, replies
, threads
,
574 (int)(playouts
/time
), (int)(playouts
/time
/replies
),
575 (int)(playouts
/time
/threads
));
576 logline(NULL
, "* ", buf
);
579 return coord_copy(dist
->my_last_move
.coord
);
583 distributed_chat(struct engine
*e
, struct board
*b
, char *cmd
)
585 struct distributed
*dist
= e
->data
;
586 static char reply
[BSIZE
];
588 cmd
+= strspn(cmd
, " \n\t");
589 if (!strncasecmp(cmd
, "winrate", 7)) {
590 enum stone color
= dist
->my_last_move
.color
;
591 snprintf(reply
, BSIZE
, "In %d playouts at %d machines, %s %s can win with %.2f%% probability.",
592 dist
->my_last_stats
.playouts
, active_slaves
, stone2str(color
),
593 coord2sstr(dist
->my_last_move
.coord
, b
),
594 100 * get_value(dist
->my_last_stats
.value
, color
));
601 scmp(const void *p1
, const void *p2
)
603 return strcasecmp(*(char * const *)p1
, *(char * const *)p2
);
607 distributed_dead_group_list(struct engine
*e
, struct board
*b
, struct move_queue
*mq
)
609 pthread_mutex_lock(&slave_lock
);
611 new_cmd(b
, "final_status_list", "dead\n");
612 get_replies(time_now() + MAX_FAST_CMD_WAIT
, 0, b
);
614 /* Find the most popular reply. */
615 qsort(gtp_replies
, reply_count
, sizeof(char *), scmp
);
619 for (int reply
= 1; reply
< reply_count
; reply
++) {
620 if (!strcmp(gtp_replies
[reply
], gtp_replies
[reply
-1])) {
625 if (count
> best_count
) {
631 /* Pick the first move of each line as group. */
632 char *dead
= gtp_replies
[best_reply
];
633 dead
= strchr(dead
, ' '); // skip "id "
634 while (dead
&& *++dead
!= '\n') {
635 coord_t
*c
= str2coord(dead
, board_size(b
));
638 dead
= strchr(dead
, '\n');
640 pthread_mutex_unlock(&slave_lock
);
643 static struct distributed
*
644 distributed_state_init(char *arg
, struct board
*b
)
646 struct distributed
*dist
= calloc(1, sizeof(struct distributed
));
648 dist
->max_slaves
= 100;
650 char *optspec
, *next
= arg
;
653 next
+= strcspn(next
, ",");
654 if (*next
) { *next
++ = 0; } else { *next
= 0; }
656 char *optname
= optspec
;
657 char *optval
= strchr(optspec
, '=');
658 if (optval
) *optval
++ = 0;
660 if (!strcasecmp(optname
, "slave_port") && optval
) {
661 dist
->slave_port
= strdup(optval
);
662 } else if (!strcasecmp(optname
, "proxy_port") && optval
) {
663 dist
->proxy_port
= strdup(optval
);
664 } else if (!strcasecmp(optname
, "max_slaves") && optval
) {
665 dist
->max_slaves
= atoi(optval
);
666 } else if (!strcasecmp(optname
, "slaves_quit")) {
667 dist
->slaves_quit
= !optval
|| atoi(optval
);
669 fprintf(stderr
, "distributed: Invalid engine argument %s or missing value\n", optname
);
674 gtp_replies
= calloc(dist
->max_slaves
, sizeof(char *));
676 if (!dist
->slave_port
) {
677 fprintf(stderr
, "distributed: missing slave_port\n");
680 int slave_sock
= port_listen(dist
->slave_port
, dist
->max_slaves
);
682 for (int id
= 0; id
< dist
->max_slaves
; id
++) {
683 pthread_create(&thread
, NULL
, slave_thread
, (void *)(long)slave_sock
);
686 if (dist
->proxy_port
) {
687 int proxy_sock
= port_listen(dist
->proxy_port
, dist
->max_slaves
);
688 for (int id
= 0; id
< dist
->max_slaves
; id
++) {
689 pthread_create(&thread
, NULL
, proxy_thread
, (void *)(long)proxy_sock
);
696 engine_distributed_init(char *arg
, struct board
*b
)
698 start_time
= time_now();
699 struct distributed
*dist
= distributed_state_init(arg
, b
);
700 struct engine
*e
= calloc(1, sizeof(struct engine
));
701 e
->name
= "Distributed Engine";
702 e
->comment
= "I'm playing the distributed engine. When I'm losing, I will resign, "
703 "if I think I win, I play until you pass. "
704 "Anyone can send me 'winrate' in private chat to get my assessment of the position.";
705 e
->notify
= distributed_notify
;
706 e
->genmove
= distributed_genmove
;
707 e
->dead_group_list
= distributed_dead_group_list
;
708 e
->chat
= distributed_chat
;
710 // Keep the threads and the open socket connections:
711 e
->keep_on_clear
= true;