1 /* The master keeps stats received from slaves in a queue of received
2 * buffers that are merged together with the functions implemented
3 * here. It also has one hash table per slave to maintain cumulative
4 * stats that have not yet been sent to the slave machine. The queue
5 * and the hash tables are cleared at each new move. */
13 #include "distributed/distributed.h"
14 #include "distributed/merge.h"
16 /* We merge together debug stats for all hash tables. */
17 static struct hash_counts h_counts
;
19 /* Display and reset hash statistics. For debugging only. */
21 merge_print_stats(int total_hnodes
)
25 snprintf(buf
, sizeof(buf
),
26 "stats occupied %ld %.1f%% inserts %ld collisions %ld/%ld %.1f%%\n",
27 h_counts
.occupied
, h_counts
.occupied
* 100.0 / total_hnodes
,
28 h_counts
.inserts
, h_counts
.collisions
, h_counts
.lookups
,
29 h_counts
.collisions
* 100.0 / (h_counts
.lookups
+ 1));
30 logline(NULL
, "* ", buf
);
32 if (DEBUG_MODE
) h_counts
.occupied
= 0;
35 /* We maintain counts per bucket to avoid sorting large arrays.
36 * All nodes with n updates since last send go to bucket n.
37 * We have at most max_merged_nodes = (max_slaves-1) * shared_nodes
38 * nodes to merge, 230K nodes for 24 slaves. If we put all nodes above
39 * 1K updates in the top bucket, we get at most 230 nodes in this
40 * bucket. So we can select exactly the best shared_nodes nodes if
41 * shared_nodes >= 230. In practice there is overlap between
42 * nodes sent by different slaves so shared_nodes can be lower. */
43 #define MAX_BUCKETS 1024
45 /* Update the hash table for the given increment stats,
46 * and increment the bucket count. Return the hash index.
47 * The slave lock is not held on either entry or exit of this function */
49 stats_tally(struct incr_stats
*s
, struct slave_state
*sstate
, int *bucket_count
)
53 struct incr_stats
*stats_htable
= sstate
->stats_htable
;
54 find_hash(h
, stats_htable
, sstate
->stats_hbits
, s
->coord_path
, found
, h_counts
);
56 assert(stats_htable
[h
].incr
.playouts
> 0);
57 stats_add_result(&stats_htable
[h
].incr
, s
->incr
.value
, s
->incr
.playouts
);
60 if (DEBUG_MODE
) h_counts
.inserts
++, h_counts
.occupied
++;
63 int incr
= stats_htable
[h
].incr
.playouts
;
64 if (incr
>= MAX_BUCKETS
) incr
= MAX_BUCKETS
- 1;
69 static struct incr_stats terminator
= { .coord_path
= INT64_MAX
};
71 /* Initialize the next pointers (see merge_new_stats()).
72 * Exclude invalid buffers and my own buffers by setting their next pointer
73 * to a terminator value. Update min if there are too many nodes to merge,
74 * so that merge time remains reasonable and the merge buffer doesn't overflow.
75 * (We skip the oldest buffers if the slave thread is too much behind. It is
76 * more important to get frequent incomplete updates than late complete updates.)
77 * Return the total number of nodes to be merged.
78 * The slave lock is not held on either entry or exit of this function. */
80 filter_buffers(struct slave_state
*sstate
, struct incr_stats
**next
,
84 int max_size
= sstate
->max_merged_nodes
* sizeof(struct incr_stats
);
86 for (int q
= max
; q
>= *min
; q
--) {
87 if (!receive_queue
[q
] || receive_queue
[q
]->owner
== sstate
->thread_id
) {
88 next
[q
] = &terminator
;
89 } else if (size
+ receive_queue
[q
]->size
> max_size
) {
94 next
[q
] = (struct incr_stats
*)receive_queue
[q
]->buf
;
95 size
+= receive_queue
[q
]->size
;
98 return size
/ sizeof(struct incr_stats
);
101 /* Return the minimum coord path of next[min..max].
102 * This implementation is optimized for small values of max - min,
103 * which is the case if slaves are not too much behind.
104 * A heap (priority queue) could be used otherwise.
105 * The returned value might be come from a buffer that has
106 * been invalidated, the caller must check for this; in this
107 * case the returned value is < the correct value. */
109 min_coord(struct incr_stats
**next
, int min
, int max
)
111 path_t min_c
= next
[min
]->coord_path
;
112 for (int q
= min
+ 1; q
<= max
; q
++) {
113 if (next
[q
]->coord_path
< min_c
)
114 min_c
= next
[q
]->coord_path
;
119 /* Merge all valid incremental stats in receive_queue[min..max],
120 * update the hash table, set the bucket counts, and save the
121 * list of updated hash table entries. The input buffers and
122 * the output buffer are all sorted by increasing coord path.
123 * The input buffers end with a terminator value INT64_MAX.
124 * Return the number of updated hash table entries. */
126 /* The slave lock is not held on either entry or exit of this function,
127 * so receive_queue entries may be invalidated while we scan them.
128 * The receive queue might grow while we scan it but we ignore
129 * entries above max, they will be processed at the next call.
130 * This function does not modify the receive queue. */
132 merge_new_stats(struct slave_state
*sstate
, int min
, int max
,
133 int *bucket_count
, int *nodes_read
, int last_queue_age
)
136 if (max
< min
) return 0;
138 /* next[q] is the next value to be checked in receive_queue[q]->buf */
139 struct incr_stats
*next_
[max
- min
+ 1];
140 struct incr_stats
**next
= next_
- min
;
141 *nodes_read
= filter_buffers(sstate
, next
, &min
, max
);
143 /* prev_min_c is only used for debugging. */
144 path_t prev_min_c
= 0;
146 /* Do N-way merge, processing one coord path per iteration.
147 * If the minimum coord is INT64_MAX, either all buffers are
148 * invalidated, or at least one is valid and we are at the
149 * end of all valid buffers. In both cases we're done. */
152 while ((min_c
= min_coord(next
, min
, max
)) != INT64_MAX
) {
154 struct incr_stats sum
= { .coord_path
= min_c
,
155 .incr
= { .playouts
= 0, .value
= 0.0 }};
156 for (int q
= min
; q
<= max
; q
++) {
157 struct incr_stats s
= *(next
[q
]);
159 /* If s.coord_path != min_c, we must skip s.coord_path for now.
160 * If min_c is invalid, a future iteration will get a stable
161 * value since the call of min_coord(), so at some point we will
162 * get s.coord_path == min_c and we will not loop forever. */
163 if (s
.coord_path
!= min_c
) continue;
165 /* We check the buffer validity after s.coord has been checked
166 * to avoid a race condition, and also to avoid multiple useless
167 * checks for the same coord_path. */
168 if (unlikely(!receive_queue
[q
])) {
169 next
[q
] = &terminator
;
173 /* Stop if we have a new move. If queue_age is incremented
174 * after this check, the merged output will be discarded. */
175 if (unlikely(queue_age
> last_queue_age
)) return 0;
177 /* s.coord_path is valid here, so min_c is valid too.
178 * (An invalid min_c would be < s.coord_path.) */
179 assert(min_c
> prev_min_c
);
181 assert(s
.coord_path
&& s
.incr
.playouts
);
182 stats_add_result(&sum
.incr
, s
.incr
.value
, s
.incr
.playouts
);
185 /* All the buffers containing min_c may have been invalidated
186 * so sum may still be zero. But in this case the next[q] which
187 * contained min_c have been reset to &terminator so we will
188 * not loop forever. */
189 if (!sum
.incr
.playouts
) continue;
191 assert(min_c
> prev_min_c
);
192 if (DEBUG_MODE
) prev_min_c
= min_c
;
194 /* At this point sum contains only valid increments,
195 * so we can add it to the hash table. */
196 assert(merge_count
< sstate
->max_merged_nodes
);
197 sstate
->merged
[merge_count
++] = stats_tally(&sum
, sstate
, bucket_count
);
202 /* Save in buf the best increments from other slaves merged previously.
203 * To avoid a costly scan of the entire hash table we only send nodes
204 * that were previously sent recently by other slaves. It is possible
205 * but very unlikely that the hash table contains some nodes with
206 * higher number of playouts.
207 * Return the number of nodes to be sent.
208 * The slave lock is not held on either entry or exit of this function. */
210 output_stats(struct incr_stats
*buf
, struct slave_state
*sstate
,
211 int *bucket_count
, int merge_count
)
213 /* Find the minimum increment to send. The bucket with minimum
214 * increment may be sent only partially. */
216 int min_incr
= MAX_BUCKETS
;
217 int shared_nodes
= sstate
->max_buf_size
/ sizeof(*buf
);
219 out_count
+= bucket_count
[--min_incr
];
220 } while (min_incr
> 1 && out_count
< shared_nodes
);
222 /* Send all all increments > min_incr plus whatever we can at min_incr. */
223 int min_count
= bucket_count
[min_incr
] - (out_count
- shared_nodes
);
225 int *merged
= sstate
->merged
;
226 struct incr_stats
*stats_htable
= sstate
->stats_htable
;
227 while (merge_count
--) {
229 int delta
= stats_htable
[h
].incr
.playouts
- min_incr
;
230 if (delta
< 0 || (delta
== 0 && --min_count
< 0)) continue;
232 assert (out_count
< shared_nodes
);
233 buf
[out_count
++] = stats_htable
[h
];
235 /* Clear the hash table entry. (We could instead
236 * just clear the playouts but clearing the entry
237 * leads to fewer collisions later.) */
238 stats_htable
[h
].coord_path
= 0;
239 if (DEBUG_MODE
) h_counts
.occupied
--;
241 /* The slave expects increments sorted by coord path
242 * but they are sorted already. */
246 /* Get all incremental stats received from other slaves since the
247 * last send. Store in buf the stats with largest playout increments.
248 * Return the byte size of the resulting buffer. The caller must
249 * check that the result is still valid.
250 * The slave lock is held on both entry and exit of this function. */
252 get_new_stats(struct incr_stats
*buf
, struct slave_state
*sstate
, int cmd_id
)
254 /* Process all valid buffers in receive_queue[min..max] */
255 int min
= sstate
->last_processed
+ 1;
256 int max
= queue_length
- 1;
257 if (max
< min
&& cmd_id
== sstate
->stats_id
) return 0;
259 sstate
->last_processed
= max
;
260 int last_queue_age
= queue_age
;
262 /* It takes time to clear the hash table and merge the stats
263 * so do this unlocked. */
266 double start
= time_now();
267 double clear_time
= 0;
269 /* Clear the hash table at a new move; the old paths in
270 * the hash table are now meaningless. */
271 if (cmd_id
!= sstate
->stats_id
) {
272 memset(sstate
->stats_htable
, 0,
273 (1 << sstate
->stats_hbits
) * sizeof(sstate
->stats_htable
[0]));
274 sstate
->stats_id
= cmd_id
;
275 clear_time
= time_now() - start
;
278 /* Set the bucket counts and update the hash table stats. */
279 int bucket_count
[MAX_BUCKETS
];
280 memset(bucket_count
, 0, sizeof(bucket_count
));
282 int merge_count
= merge_new_stats(sstate
, min
, max
, bucket_count
,
283 &nodes_read
, last_queue_age
);
285 /* Put the best increments in the output buffer. */
286 int output_nodes
= output_stats(buf
, sstate
, bucket_count
, merge_count
);
290 snprintf(b
, sizeof(b
), "merged %d..%d %d/%d nodes,"
291 " output %d/%d nodes in %.3fms (clear %.3fms)\n",
292 min
, max
, merge_count
, nodes_read
, output_nodes
,
293 sstate
->max_buf_size
/ (int)sizeof(*buf
),
294 (time_now() - start
)*1000, clear_time
*1000);
295 logline(&sstate
->client
, "= ", b
);
300 return output_nodes
* sizeof(*buf
);
303 /* Allocate the buffers in the merge specific part of the slave sate,
304 * and reserve space for a terminator value (see merge_insert_hook). */
306 merge_state_alloc(struct slave_state
*sstate
)
308 sstate
->stats_htable
= calloc2(1 << sstate
->stats_hbits
, sizeof(struct incr_stats
));
309 sstate
->merged
= malloc2(sstate
->max_merged_nodes
* sizeof(int));
310 sstate
->max_buf_size
-= sizeof(struct incr_stats
);
313 /* Append a terminator value to make merge_new_stats() more
314 * efficient. merge_state_alloc() has reserved enough space. */
316 merge_insert_hook(struct incr_stats
*buf
, int size
)
318 int nodes
= size
/ sizeof(*buf
);
319 buf
[nodes
].coord_path
= INT64_MAX
;
322 /* Initiliaze merge-related fields of the default slave state. */
324 merge_init(struct slave_state
*sstate
, int shared_nodes
, int stats_hbits
, int max_slaves
)
326 /* See merge_state_alloc() for shared_nodes + 1 */
327 sstate
->max_buf_size
= (shared_nodes
+ 1) * sizeof(struct incr_stats
);
328 sstate
->stats_hbits
= stats_hbits
;
330 sstate
->insert_hook
= (buffer_hook
)merge_insert_hook
;
331 sstate
->alloc_hook
= merge_state_alloc
;
332 sstate
->args_hook
= (getargs_hook
)get_new_stats
;
334 /* At worst one late slave thread may have to merge up to
335 * shared_nodes * BUFFERS_PER_SLAVE * (max_slaves - 1)
336 * nodes but on average it should not have to merge more than
337 * dist->shared_nodes * (max_slaves - 1)
338 * Restricting the maximum number of merged nodes to the latter avoids
339 * spending excessive time on the merge. */
340 sstate
->max_merged_nodes
= shared_nodes
* (max_slaves
- 1);