s4-python: factorize the definition of get_dsServiceName
[Samba/gebeck_regimport.git] / lib / tdb2 / summary.c
blobf3a3a085f38654032db922c284fb9833e34dcd5d
1 /*
2 Trivial Database 2: human-readable summary code
3 Copyright (C) Rusty Russell 2010
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 3 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 #include "private.h"
19 #include <assert.h>
20 #include <ccan/tally/tally.h>
22 #define SUMMARY_FORMAT \
23 "Size of file/data: %zu/%zu\n" \
24 "Number of records: %zu\n" \
25 "Smallest/average/largest keys: %zu/%zu/%zu\n%s" \
26 "Smallest/average/largest data: %zu/%zu/%zu\n%s" \
27 "Smallest/average/largest padding: %zu/%zu/%zu\n%s" \
28 "Number of free records: %zu\n" \
29 "Smallest/average/largest free records: %zu/%zu/%zu\n%s" \
30 "Number of uncoalesced records: %zu\n" \
31 "Smallest/average/largest uncoalesced runs: %zu/%zu/%zu\n%s" \
32 "Toplevel hash used: %u of %u\n" \
33 "Number of chains: %zu\n" \
34 "Number of subhashes: %zu\n" \
35 "Smallest/average/largest subhash entries: %zu/%zu/%zu\n%s" \
36 "Percentage keys/data/padding/free/rechdrs/freehdrs/hashes: %.0f/%.0f/%.0f/%.0f/%.0f/%.0f/%.0f\n"
38 #define BUCKET_SUMMARY_FORMAT_A \
39 "Free bucket %zu: total entries %zu.\n" \
40 "Smallest/average/largest length: %zu/%zu/%zu\n%s"
41 #define BUCKET_SUMMARY_FORMAT_B \
42 "Free bucket %zu-%zu: total entries %zu.\n" \
43 "Smallest/average/largest length: %zu/%zu/%zu\n%s"
44 #define CAPABILITY_FORMAT \
45 "Capability %llu%s\n"
47 #define HISTO_WIDTH 70
48 #define HISTO_HEIGHT 20
50 static tdb_off_t count_hash(struct tdb_context *tdb,
51 tdb_off_t hash_off, unsigned bits)
53 const tdb_off_t *h;
54 tdb_off_t count = 0;
55 unsigned int i;
57 h = tdb_access_read(tdb, hash_off, sizeof(*h) << bits, true);
58 if (TDB_PTR_IS_ERR(h)) {
59 return TDB_ERR_TO_OFF(TDB_PTR_ERR(h));
61 for (i = 0; i < (1 << bits); i++)
62 count += (h[i] != 0);
64 tdb_access_release(tdb, h);
65 return count;
68 static enum TDB_ERROR summarize(struct tdb_context *tdb,
69 struct tally *hashes,
70 struct tally *ftables,
71 struct tally *fr,
72 struct tally *keys,
73 struct tally *data,
74 struct tally *extra,
75 struct tally *uncoal,
76 struct tally *chains)
78 tdb_off_t off;
79 tdb_len_t len;
80 tdb_len_t unc = 0;
82 for (off = sizeof(struct tdb_header);
83 off < tdb->file->map_size;
84 off += len) {
85 const union {
86 struct tdb_used_record u;
87 struct tdb_free_record f;
88 struct tdb_recovery_record r;
89 } *p;
90 /* We might not be able to get the whole thing. */
91 p = tdb_access_read(tdb, off, sizeof(p->f), true);
92 if (TDB_PTR_IS_ERR(p)) {
93 return TDB_PTR_ERR(p);
95 if (frec_magic(&p->f) != TDB_FREE_MAGIC) {
96 if (unc > 1) {
97 tally_add(uncoal, unc);
98 unc = 0;
102 if (p->r.magic == TDB_RECOVERY_INVALID_MAGIC
103 || p->r.magic == TDB_RECOVERY_MAGIC) {
104 len = sizeof(p->r) + p->r.max_len;
105 } else if (frec_magic(&p->f) == TDB_FREE_MAGIC) {
106 len = frec_len(&p->f);
107 tally_add(fr, len);
108 len += sizeof(p->u);
109 unc++;
110 } else if (rec_magic(&p->u) == TDB_USED_MAGIC) {
111 len = sizeof(p->u)
112 + rec_key_length(&p->u)
113 + rec_data_length(&p->u)
114 + rec_extra_padding(&p->u);
116 tally_add(keys, rec_key_length(&p->u));
117 tally_add(data, rec_data_length(&p->u));
118 tally_add(extra, rec_extra_padding(&p->u));
119 } else if (rec_magic(&p->u) == TDB_HTABLE_MAGIC) {
120 tdb_off_t count = count_hash(tdb,
121 off + sizeof(p->u),
122 TDB_SUBLEVEL_HASH_BITS);
123 if (TDB_OFF_IS_ERR(count)) {
124 return TDB_OFF_TO_ERR(count);
126 tally_add(hashes, count);
127 tally_add(extra, rec_extra_padding(&p->u));
128 len = sizeof(p->u)
129 + rec_data_length(&p->u)
130 + rec_extra_padding(&p->u);
131 } else if (rec_magic(&p->u) == TDB_FTABLE_MAGIC) {
132 len = sizeof(p->u)
133 + rec_data_length(&p->u)
134 + rec_extra_padding(&p->u);
135 tally_add(ftables, rec_data_length(&p->u));
136 tally_add(extra, rec_extra_padding(&p->u));
137 } else if (rec_magic(&p->u) == TDB_CHAIN_MAGIC) {
138 len = sizeof(p->u)
139 + rec_data_length(&p->u)
140 + rec_extra_padding(&p->u);
141 tally_add(chains, 1);
142 tally_add(extra, rec_extra_padding(&p->u));
143 } else {
144 len = dead_space(tdb, off);
145 if (TDB_OFF_IS_ERR(len)) {
146 return TDB_OFF_TO_ERR(len);
149 tdb_access_release(tdb, p);
151 if (unc)
152 tally_add(uncoal, unc);
153 return TDB_SUCCESS;
156 static size_t num_capabilities(struct tdb_context *tdb)
158 tdb_off_t off, next;
159 const struct tdb_capability *cap;
160 size_t count = 0;
162 off = tdb_read_off(tdb, offsetof(struct tdb_header, capabilities));
163 if (TDB_OFF_IS_ERR(off))
164 return count;
166 /* Count capability list. */
167 for (; off; off = next) {
168 cap = tdb_access_read(tdb, off, sizeof(*cap), true);
169 if (TDB_PTR_IS_ERR(cap)) {
170 break;
172 count++;
173 next = cap->next;
174 tdb_access_release(tdb, cap);
176 return count;
179 static void add_capabilities(struct tdb_context *tdb, size_t num, char *summary)
181 tdb_off_t off, next;
182 const struct tdb_capability *cap;
183 size_t count = 0;
185 /* Append to summary. */
186 summary += strlen(summary);
188 off = tdb_read_off(tdb, offsetof(struct tdb_header, capabilities));
189 if (TDB_OFF_IS_ERR(off))
190 return;
192 /* Walk capability list. */
193 for (; off; off = next) {
194 cap = tdb_access_read(tdb, off, sizeof(*cap), true);
195 if (TDB_PTR_IS_ERR(cap)) {
196 break;
198 count++;
199 sprintf(summary, CAPABILITY_FORMAT,
200 cap->type & TDB_CAP_TYPE_MASK,
201 /* Noopen? How did we get here? */
202 (cap->type & TDB_CAP_NOOPEN) ? " (unopenable)"
203 : ((cap->type & TDB_CAP_NOWRITE)
204 && (cap->type & TDB_CAP_NOCHECK)) ? " (uncheckable,read-only)"
205 : (cap->type & TDB_CAP_NOWRITE) ? " (read-only)"
206 : (cap->type & TDB_CAP_NOCHECK) ? " (uncheckable)"
207 : "");
208 summary += strlen(summary);
209 next = cap->next;
210 tdb_access_release(tdb, cap);
214 enum TDB_ERROR tdb_summary(struct tdb_context *tdb,
215 enum tdb_summary_flags flags,
216 char **summary)
218 tdb_len_t len;
219 size_t num_caps;
220 struct tally *ftables, *hashes, *freet, *keys, *data, *extra, *uncoal,
221 *chains;
222 char *hashesg, *freeg, *keysg, *datag, *extrag, *uncoalg;
223 enum TDB_ERROR ecode;
225 if (tdb->flags & TDB_VERSION1) {
226 /* tdb1 doesn't do graphs. */
227 *summary = tdb1_summary(tdb);
228 if (!*summary)
229 return tdb->last_error;
230 return TDB_SUCCESS;
233 hashesg = freeg = keysg = datag = extrag = uncoalg = NULL;
235 ecode = tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
236 if (ecode != TDB_SUCCESS) {
237 return tdb->last_error = ecode;
240 ecode = tdb_lock_expand(tdb, F_RDLCK);
241 if (ecode != TDB_SUCCESS) {
242 tdb_allrecord_unlock(tdb, F_RDLCK);
243 return tdb->last_error = ecode;
246 /* Start stats off empty. */
247 ftables = tally_new(HISTO_HEIGHT);
248 hashes = tally_new(HISTO_HEIGHT);
249 freet = tally_new(HISTO_HEIGHT);
250 keys = tally_new(HISTO_HEIGHT);
251 data = tally_new(HISTO_HEIGHT);
252 extra = tally_new(HISTO_HEIGHT);
253 uncoal = tally_new(HISTO_HEIGHT);
254 chains = tally_new(HISTO_HEIGHT);
255 if (!ftables || !hashes || !freet || !keys || !data || !extra
256 || !uncoal || !chains) {
257 ecode = tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
258 "tdb_summary: failed to allocate"
259 " tally structures");
260 goto unlock;
263 ecode = summarize(tdb, hashes, ftables, freet, keys, data, extra,
264 uncoal, chains);
265 if (ecode != TDB_SUCCESS) {
266 goto unlock;
269 if (flags & TDB_SUMMARY_HISTOGRAMS) {
270 hashesg = tally_histogram(hashes, HISTO_WIDTH, HISTO_HEIGHT);
271 freeg = tally_histogram(freet, HISTO_WIDTH, HISTO_HEIGHT);
272 keysg = tally_histogram(keys, HISTO_WIDTH, HISTO_HEIGHT);
273 datag = tally_histogram(data, HISTO_WIDTH, HISTO_HEIGHT);
274 extrag = tally_histogram(extra, HISTO_WIDTH, HISTO_HEIGHT);
275 uncoalg = tally_histogram(uncoal, HISTO_WIDTH, HISTO_HEIGHT);
278 num_caps = num_capabilities(tdb);
280 /* 20 is max length of a %llu. */
281 len = strlen(SUMMARY_FORMAT) + 33*20 + 1
282 + (hashesg ? strlen(hashesg) : 0)
283 + (freeg ? strlen(freeg) : 0)
284 + (keysg ? strlen(keysg) : 0)
285 + (datag ? strlen(datag) : 0)
286 + (extrag ? strlen(extrag) : 0)
287 + (uncoalg ? strlen(uncoalg) : 0)
288 + num_caps * (strlen(CAPABILITY_FORMAT) + 20*4);
290 *summary = malloc(len);
291 if (!*summary) {
292 ecode = tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
293 "tdb_summary: failed to allocate string");
294 goto unlock;
297 sprintf(*summary, SUMMARY_FORMAT,
298 (size_t)tdb->file->map_size,
299 tally_total(keys, NULL) + tally_total(data, NULL),
300 tally_num(keys),
301 tally_min(keys), tally_mean(keys), tally_max(keys),
302 keysg ? keysg : "",
303 tally_min(data), tally_mean(data), tally_max(data),
304 datag ? datag : "",
305 tally_min(extra), tally_mean(extra), tally_max(extra),
306 extrag ? extrag : "",
307 tally_num(freet),
308 tally_min(freet), tally_mean(freet), tally_max(freet),
309 freeg ? freeg : "",
310 tally_total(uncoal, NULL),
311 tally_min(uncoal), tally_mean(uncoal), tally_max(uncoal),
312 uncoalg ? uncoalg : "",
313 (unsigned)count_hash(tdb, offsetof(struct tdb_header,
314 hashtable),
315 TDB_TOPLEVEL_HASH_BITS),
316 1 << TDB_TOPLEVEL_HASH_BITS,
317 tally_num(chains),
318 tally_num(hashes),
319 tally_min(hashes), tally_mean(hashes), tally_max(hashes),
320 hashesg ? hashesg : "",
321 tally_total(keys, NULL) * 100.0 / tdb->file->map_size,
322 tally_total(data, NULL) * 100.0 / tdb->file->map_size,
323 tally_total(extra, NULL) * 100.0 / tdb->file->map_size,
324 tally_total(freet, NULL) * 100.0 / tdb->file->map_size,
325 (tally_num(keys) + tally_num(freet) + tally_num(hashes))
326 * sizeof(struct tdb_used_record) * 100.0 / tdb->file->map_size,
327 tally_num(ftables) * sizeof(struct tdb_freetable)
328 * 100.0 / tdb->file->map_size,
329 (tally_num(hashes)
330 * (sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS)
331 + (sizeof(tdb_off_t) << TDB_TOPLEVEL_HASH_BITS)
332 + sizeof(struct tdb_chain) * tally_num(chains))
333 * 100.0 / tdb->file->map_size);
335 add_capabilities(tdb, num_caps, *summary);
337 unlock:
338 free(hashesg);
339 free(freeg);
340 free(keysg);
341 free(datag);
342 free(extrag);
343 free(uncoalg);
344 free(hashes);
345 free(freet);
346 free(keys);
347 free(data);
348 free(extra);
349 free(uncoal);
350 free(ftables);
351 free(chains);
353 tdb_allrecord_unlock(tdb, F_RDLCK);
354 tdb_unlock_expand(tdb, F_RDLCK);
355 return tdb->last_error = ecode;