1 #ifndef ZZGO_DISTRIBUTED_DISTRIBUTED_H
2 #define ZZGO_DISTRIBUTED_DISTRIBUTED_H
9 /* A coord path encodes coordinates from root child to a given node:
10 * A1->B2->C3 is encoded as coord(A1)<<18 + coord(B2)<<9 + coord(C3)
11 * for 19x19. In this version the table is not a transposition table
12 * so A1->B2->C3 and C3->B2->A1 are different.
13 * The depth is limited to 7 for 19x19 (9 for 9x9) to fit in 64 bits.
14 * path_t is signed to include pass and resign. */
15 typedef int64_t path_t
;
16 #define PRIpath PRIx64
17 #define PATH_T_MAX INT64_MAX
19 #define hash_mask(bits) ((1<<(bits))-1)
21 /* parent_path() must never be used if path might be pass or resign. */
22 #define parent_path(path, board) ((path) >> board_bits2(board))
23 #define leaf_coord(path, board) ((path) & hash_mask(board_bits2(board)))
24 #define append_child(path, c, board) (((path) << board_bits2(board)) | (c))
25 #define max_parent_path(u, b) (((path_t)1) << (((u)->shared_levels - 1) * board_bits2(b)))
28 /* For debugging only */
36 /* Find a hash table entry given its coord path from root.
37 * Set found to false if the entry is empty.
38 * Abort if the table gets too full (should never happen).
39 * We use double hashing and coord_path = 0 for unused entries. */
40 #define find_hash(hash, table, hash_bits, path, found, counts) \
42 if (DEBUG_MODE) counts.lookups++; \
43 int mask = hash_mask(hash_bits); \
44 int delta = (int)((path) >> (hash_bits)) | 1; \
45 hash = ((int)(path) ^ delta ^ (delta >> (hash_bits))) & mask; \
46 path_t cp = (table)[hash].coord_path; \
47 found = (cp == path); \
48 if (found | !cp) break; \
49 int tries = 1 << ((hash_bits)-2); \
51 if (DEBUG_MODE) counts.collisions++; \
52 hash = (hash + delta) & mask; \
53 cp = (table)[hash].coord_path; \
54 found = (cp == path); \
55 if (found | !cp) break; \
61 /* Stats exchanged between master and slave. They are always
62 * incremental values to be added to what was last sent. */
65 struct move_stats incr
;
68 /* A slave machine updates at most 7 (19x19) or 9 (9x9) nodes for each
69 * update of the root node. If we have at most 20 threads at 1500
70 * games/s each, a slave machine can do at most 30K games/s. */
72 /* At 30K games/s a slave can output 270K nodes/s or 4.2 MB/s. The master
73 * with a 100 MB/s network can thus support at most 24 slaves. */
74 #define DEFAULT_MAX_SLAVES 24
76 /* In a 30s move at 270K nodes/s a slave can send and receive at most
77 * 8.1M nodes so at worst 23 bits are needed for the hash table in the
78 * slave and for the per-slave hash table in the master. However the
79 * same nodes are often sent so in practice 21 bits are sufficient.
80 * Larger hash tables are not desirable because it would take too much
81 * time to clear them at each move in the master. */
82 #define DEFAULT_STATS_HBITS 21
84 /* If we select a cycle of at most 40ms, a slave machine can update at
85 * most 10K different nodes per cycle. In practice the updates are
86 * biased so we update much fewer nodes. As shorter cyle is preferable
87 * because the stats are more fresh. The cycle time does not affect
88 * the number of slaves and the hash table size. */
89 #define DEFAULT_SHARED_NODES 1024
92 /* Maximum game length. Power of 10 jut to ease debugging. */
93 #define DIST_GAMELEN 1000
95 #define force_reply(id) ((id) + DIST_GAMELEN)
96 #define prevent_reply(id) ((id) % DIST_GAMELEN)
97 #define move_number(id) ((id) % DIST_GAMELEN)
98 #define reply_disabled(id) ((id) < DIST_GAMELEN)
100 char *path2sstr(path_t path
, struct board
*b
);
101 struct engine
*engine_distributed_init(char *arg
, struct board
*b
);