2 #include "parse-options.h"
10 static int analyze_step
;
13 * Dump the contents of the "dir" and "name" hash tables to stdout.
14 * If you sort the result, you can compare it with the other type
15 * mode and verify that both single and multi produce the same set.
17 static void dump_run(void)
19 struct hashmap_iter iter_dir
;
20 struct hashmap_iter iter_cache
;
22 /* Stolen from name-hash.c */
24 struct hashmap_entry ent
;
25 struct dir_entry
*parent
;
28 char name
[FLEX_ARRAY
];
31 struct dir_entry
*dir
;
32 struct cache_entry
*ce
;
36 test_lazy_init_name_hash(&the_index
, 0);
38 int nr_threads_used
= test_lazy_init_name_hash(&the_index
, 1);
40 die("non-threaded code path used");
43 dir
= hashmap_iter_first(&the_index
.dir_hash
, &iter_dir
);
45 printf("dir %08x %7d %s\n", dir
->ent
.hash
, dir
->nr
, dir
->name
);
46 dir
= hashmap_iter_next(&iter_dir
);
49 ce
= hashmap_iter_first(&the_index
.name_hash
, &iter_cache
);
51 printf("name %08x %s\n", ce
->ent
.hash
, ce
->name
);
52 ce
= hashmap_iter_next(&iter_cache
);
59 * Run the single or multi threaded version "count" times and
60 * report on the time taken.
62 static uint64_t time_runs(int try_threaded
)
70 for (i
= 0; i
< count
; i
++) {
74 nr_threads_used
= test_lazy_init_name_hash(&the_index
, try_threaded
);
79 if (try_threaded
&& !nr_threads_used
)
80 die("non-threaded code path used");
83 printf("%f %f %d multi %d\n",
84 ((double)(t1
- t0
))/1000000000,
85 ((double)(t2
- t1
))/1000000000,
89 printf("%f %f %d single\n",
90 ((double)(t1
- t0
))/1000000000,
91 ((double)(t2
- t1
))/1000000000,
100 printf("avg %f %s\n",
101 (double)avg
/1000000000,
102 (try_threaded
) ? "multi" : "single");
108 * Try a series of runs varying the "istate->cache_nr" and
109 * try to find a good value for the multi-threaded criteria.
111 static void analyze_run(void)
113 uint64_t t1s
, t1m
, t2s
, t2m
;
120 cache_nr_limit
= the_index
.cache_nr
;
125 uint64_t sum_single
= 0;
126 uint64_t sum_multi
= 0;
130 if (nr
> cache_nr_limit
)
133 for (i
= 0; i
< count
; i
++) {
135 the_index
.cache_nr
= nr
; /* cheap truncate of index */
137 test_lazy_init_name_hash(&the_index
, 0);
139 sum_single
+= (t2s
- t1s
);
140 the_index
.cache_nr
= cache_nr_limit
;
144 the_index
.cache_nr
= nr
; /* cheap truncate of index */
146 nr_threads_used
= test_lazy_init_name_hash(&the_index
, 1);
148 sum_multi
+= (t2m
- t1m
);
149 the_index
.cache_nr
= cache_nr_limit
;
152 if (!nr_threads_used
)
153 printf(" [size %8d] [single %f] non-threaded code path used\n",
154 nr
, ((double)(t2s
- t1s
))/1000000000);
156 printf(" [size %8d] [single %f] %c [multi %f %d]\n",
158 ((double)(t2s
- t1s
))/1000000000,
159 (((t2s
- t1s
) < (t2m
- t1m
)) ? '<' : '>'),
160 ((double)(t2m
- t1m
))/1000000000,
165 avg_single
= sum_single
/ count
;
166 avg_multi
= sum_multi
/ count
;
167 if (!nr_threads_used
)
168 printf("avg [size %8d] [single %f]\n",
170 (double)avg_single
/1000000000);
172 printf("avg [size %8d] [single %f] %c [multi %f %d]\n",
174 (double)avg_single
/1000000000,
175 (avg_single
< avg_multi
? '<' : '>'),
176 (double)avg_multi
/1000000000,
181 if (nr
>= cache_nr_limit
)
187 int cmd_main(int argc
, const char **argv
)
189 const char *usage
[] = {
190 "test-lazy-init-name-hash -d (-s | -m)",
191 "test-lazy-init-name-hash -p [-c c]",
192 "test-lazy-init-name-hash -a a [--step s] [-c c]",
193 "test-lazy-init-name-hash (-s | -m) [-c c]",
194 "test-lazy-init-name-hash -s -m [-c c]",
197 struct option options
[] = {
198 OPT_BOOL('s', "single", &single
, "run single-threaded code"),
199 OPT_BOOL('m', "multi", &multi
, "run multi-threaded code"),
200 OPT_INTEGER('c', "count", &count
, "number of passes"),
201 OPT_BOOL('d', "dump", &dump
, "dump hash tables"),
202 OPT_BOOL('p', "perf", &perf
, "compare single vs multi"),
203 OPT_INTEGER('a', "analyze", &analyze
, "analyze different multi sizes"),
204 OPT_INTEGER(0, "step", &analyze_step
, "analyze step factor"),
208 uint64_t avg_single
, avg_multi
;
210 prefix
= setup_git_directory();
212 argc
= parse_options(argc
, argv
, prefix
, options
, usage
, 0);
215 * istate->dir_hash is only created when ignore_case is set.
220 if (perf
|| analyze
> 0)
221 die("cannot combine dump, perf, or analyze");
223 die("count not valid with dump");
225 die("cannot use both single and multi with dump");
226 if (!single
&& !multi
)
227 die("dump requires either single or multi");
234 die("cannot combine dump, perf, or analyze");
236 die("cannot use single or multi with perf");
237 avg_single
= time_runs(0);
238 avg_multi
= time_runs(1);
239 if (avg_multi
> avg_single
)
240 die("multi is slower");
246 die("analyze must be at least 500");
248 analyze_step
= analyze
;
250 die("cannot use single or multi with analyze");
255 if (!single
&& !multi
)
256 die("require either -s or -m or both");