1 #define USE_THE_INDEX_VARIABLE
4 #include "parse-options.h"
12 static int analyze_step
;
15 * Dump the contents of the "dir" and "name" hash tables to stdout.
16 * If you sort the result, you can compare it with the other type
17 * mode and verify that both single and multi produce the same set.
19 static void dump_run(void)
21 struct hashmap_iter iter_dir
;
22 struct hashmap_iter iter_cache
;
24 /* Stolen from name-hash.c */
26 struct hashmap_entry ent
;
27 struct dir_entry
*parent
;
30 char name
[FLEX_ARRAY
];
33 struct dir_entry
*dir
;
34 struct cache_entry
*ce
;
36 repo_read_index(the_repository
);
38 test_lazy_init_name_hash(&the_index
, 0);
40 int nr_threads_used
= test_lazy_init_name_hash(&the_index
, 1);
42 die("non-threaded code path used");
45 hashmap_for_each_entry(&the_index
.dir_hash
, &iter_dir
, dir
,
46 ent
/* member name */)
47 printf("dir %08x %7d %s\n", dir
->ent
.hash
, dir
->nr
, dir
->name
);
49 hashmap_for_each_entry(&the_index
.name_hash
, &iter_cache
, ce
,
50 ent
/* member name */)
51 printf("name %08x %s\n", ce
->ent
.hash
, ce
->name
);
53 discard_index(&the_index
);
57 * Run the single or multi threaded version "count" times and
58 * report on the time taken.
60 static uint64_t time_runs(int try_threaded
)
68 for (i
= 0; i
< count
; i
++) {
70 repo_read_index(the_repository
);
72 nr_threads_used
= test_lazy_init_name_hash(&the_index
, try_threaded
);
77 if (try_threaded
&& !nr_threads_used
)
78 die("non-threaded code path used");
81 printf("%f %f %d multi %d\n",
82 ((double)(t1
- t0
))/1000000000,
83 ((double)(t2
- t1
))/1000000000,
87 printf("%f %f %d single\n",
88 ((double)(t1
- t0
))/1000000000,
89 ((double)(t2
- t1
))/1000000000,
93 discard_index(&the_index
);
99 (double)avg
/1000000000,
100 (try_threaded
) ? "multi" : "single");
106 * Try a series of runs varying the "istate->cache_nr" and
107 * try to find a good value for the multi-threaded criteria.
109 static void analyze_run(void)
111 uint64_t t1s
, t1m
, t2s
, t2m
;
113 int nr_threads_used
= 0;
117 repo_read_index(the_repository
);
118 cache_nr_limit
= the_index
.cache_nr
;
119 discard_index(&the_index
);
123 uint64_t sum_single
= 0;
124 uint64_t sum_multi
= 0;
128 if (nr
> cache_nr_limit
)
131 for (i
= 0; i
< count
; i
++) {
132 repo_read_index(the_repository
);
133 the_index
.cache_nr
= nr
; /* cheap truncate of index */
135 test_lazy_init_name_hash(&the_index
, 0);
137 sum_single
+= (t2s
- t1s
);
138 the_index
.cache_nr
= cache_nr_limit
;
139 discard_index(&the_index
);
141 repo_read_index(the_repository
);
142 the_index
.cache_nr
= nr
; /* cheap truncate of index */
144 nr_threads_used
= test_lazy_init_name_hash(&the_index
, 1);
146 sum_multi
+= (t2m
- t1m
);
147 the_index
.cache_nr
= cache_nr_limit
;
148 discard_index(&the_index
);
150 if (!nr_threads_used
)
151 printf(" [size %8d] [single %f] non-threaded code path used\n",
152 nr
, ((double)(t2s
- t1s
))/1000000000);
154 printf(" [size %8d] [single %f] %c [multi %f %d]\n",
156 ((double)(t2s
- t1s
))/1000000000,
157 (((t2s
- t1s
) < (t2m
- t1m
)) ? '<' : '>'),
158 ((double)(t2m
- t1m
))/1000000000,
163 avg_single
= sum_single
/ count
;
164 avg_multi
= sum_multi
/ count
;
165 if (!nr_threads_used
)
166 printf("avg [size %8d] [single %f]\n",
168 (double)avg_single
/1000000000);
170 printf("avg [size %8d] [single %f] %c [multi %f %d]\n",
172 (double)avg_single
/1000000000,
173 (avg_single
< avg_multi
? '<' : '>'),
174 (double)avg_multi
/1000000000,
179 if (nr
>= cache_nr_limit
)
185 int cmd__lazy_init_name_hash(int argc
, const char **argv
)
187 const char *usage
[] = {
188 "test-tool lazy-init-name-hash -d (-s | -m)",
189 "test-tool lazy-init-name-hash -p [-c c]",
190 "test-tool lazy-init-name-hash -a a [--step s] [-c c]",
191 "test-tool lazy-init-name-hash (-s | -m) [-c c]",
192 "test-tool lazy-init-name-hash -s -m [-c c]",
195 struct option options
[] = {
196 OPT_BOOL('s', "single", &single
, "run single-threaded code"),
197 OPT_BOOL('m', "multi", &multi
, "run multi-threaded code"),
198 OPT_INTEGER('c', "count", &count
, "number of passes"),
199 OPT_BOOL('d', "dump", &dump
, "dump hash tables"),
200 OPT_BOOL('p', "perf", &perf
, "compare single vs multi"),
201 OPT_INTEGER('a', "analyze", &analyze
, "analyze different multi sizes"),
202 OPT_INTEGER(0, "step", &analyze_step
, "analyze step factor"),
206 uint64_t avg_single
, avg_multi
;
208 prefix
= setup_git_directory();
210 argc
= parse_options(argc
, argv
, prefix
, options
, usage
, 0);
213 * istate->dir_hash is only created when ignore_case is set.
218 if (perf
|| analyze
> 0)
219 die("cannot combine dump, perf, or analyze");
221 die("count not valid with dump");
223 die("cannot use both single and multi with dump");
224 if (!single
&& !multi
)
225 die("dump requires either single or multi");
232 die("cannot combine dump, perf, or analyze");
234 die("cannot use single or multi with perf");
235 avg_single
= time_runs(0);
236 avg_multi
= time_runs(1);
237 if (avg_multi
> avg_single
)
238 die("multi is slower");
244 die("analyze must be at least 500");
246 analyze_step
= analyze
;
248 die("cannot use single or multi with analyze");
253 if (!single
&& !multi
)
254 die("require either -s or -m or both");