1 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. See the AUTHORS file for names of contributors.
8 #include "db/db_impl.h"
9 #include "db/version_set.h"
10 #include "leveldb/cache.h"
11 #include "leveldb/db.h"
12 #include "leveldb/env.h"
13 #include "leveldb/write_batch.h"
14 #include "port/port.h"
15 #include "util/crc32c.h"
16 #include "util/histogram.h"
17 #include "util/mutexlock.h"
18 #include "util/random.h"
19 #include "util/testutil.h"
21 // Comma-separated list of operations to run in the specified order
23 // fillseq -- write N values in sequential key order in async mode
24 // fillrandom -- write N values in random key order in async mode
25 // overwrite -- overwrite N values in random key order in async mode
26 // fillsync -- write N/100 values in random key order in sync mode
27 // fill100K -- write N/1000 100K values in random order in async mode
28 // deleteseq -- delete N keys in sequential order
29 // deleterandom -- delete N keys in random order
30 // readseq -- read N times sequentially
31 // readreverse -- read N times in reverse order
32 // readrandom -- read N times in random order
33 // readmissing -- read N missing keys in random order
34 // readhot -- read N times in random order from 1% section of DB
35 // seekrandom -- N random seeks
36 // open -- cost of opening a DB
37 // crc32c -- repeated crc32c of 4K of data
38 // acquireload -- load N*1000 times
40 // compact -- Compact the entire DB
41 // stats -- Print DB stats
42 // sstables -- Print sstable info
43 // heapprofile -- Dump a heap profile (if supported by this port)
44 static const char* FLAGS_benchmarks
=
50 "readrandom," // Extra run to allow previous compactions to quiesce
64 // Number of key/values to place in database
65 static int FLAGS_num
= 1000000;
67 // Number of read operations to do. If negative, do FLAGS_num reads.
68 static int FLAGS_reads
= -1;
70 // Number of concurrent threads to run.
71 static int FLAGS_threads
= 1;
74 static int FLAGS_value_size
= 100;
76 // Arrange to generate values that shrink to this fraction of
77 // their original size after compression
78 static double FLAGS_compression_ratio
= 0.5;
80 // Print histogram of operation timings
81 static bool FLAGS_histogram
= false;
83 // Number of bytes to buffer in memtable before compacting
84 // (initialized to default value by "main")
85 static int FLAGS_write_buffer_size
= 0;
87 // Number of bytes written to each file.
88 // (initialized to default value by "main")
89 static int FLAGS_max_file_size
= 0;
91 // Approximate size of user data packed per block (before compression.
92 // (initialized to default value by "main")
93 static int FLAGS_block_size
= 0;
95 // Number of bytes to use as a cache of uncompressed data.
96 // Negative means use default settings.
97 static int FLAGS_cache_size
= -1;
99 // Maximum number of files to keep open at the same time (use default if == 0)
100 static int FLAGS_open_files
= 0;
102 // Bloom filter bits per key.
103 // Negative means use default settings.
104 static int FLAGS_bloom_bits
= -1;
106 // If true, do not destroy the existing database. If you set this
107 // flag and also specify a benchmark that wants a fresh database, that
108 // benchmark will fail.
109 static bool FLAGS_use_existing_db
= false;
111 // If true, reuse existing log/MANIFEST files when re-opening a database.
112 static bool FLAGS_reuse_logs
= false;
114 // Use the db with the following name.
115 static const char* FLAGS_db
= NULL
;
120 leveldb::Env
* g_env
= NULL
;
122 // Helper for quickly generating random data.
123 class RandomGenerator
{
130 // We use a limited amount of data over and over again and ensure
131 // that it is larger than the compression window (32KB), and also
132 // large enough to serve all typical value sizes we want to write.
135 while (data_
.size() < 1048576) {
136 // Add a short fragment that is as compressible as specified
137 // by FLAGS_compression_ratio.
138 test::CompressibleString(&rnd
, FLAGS_compression_ratio
, 100, &piece
);
144 Slice
Generate(size_t len
) {
145 if (pos_
+ len
> data_
.size()) {
147 assert(len
< data_
.size());
150 return Slice(data_
.data() + pos_
- len
, len
);
155 static Slice
TrimSpace(Slice s
) {
157 while (start
< s
.size() && isspace(s
[start
])) {
160 size_t limit
= s
.size();
161 while (limit
> start
&& isspace(s
[limit
-1])) {
164 return Slice(s
.data() + start
, limit
- start
);
168 static void AppendWithSpace(std::string
* str
, Slice msg
) {
169 if (msg
.empty()) return;
173 str
->append(msg
.data(), msg
.size());
184 double last_op_finish_
;
186 std::string message_
;
193 last_op_finish_
= start_
;
198 start_
= g_env
->NowMicros();
203 void Merge(const Stats
& other
) {
204 hist_
.Merge(other
.hist_
);
205 done_
+= other
.done_
;
206 bytes_
+= other
.bytes_
;
207 seconds_
+= other
.seconds_
;
208 if (other
.start_
< start_
) start_
= other
.start_
;
209 if (other
.finish_
> finish_
) finish_
= other
.finish_
;
211 // Just keep the messages from one thread
212 if (message_
.empty()) message_
= other
.message_
;
216 finish_
= g_env
->NowMicros();
217 seconds_
= (finish_
- start_
) * 1e-6;
220 void AddMessage(Slice msg
) {
221 AppendWithSpace(&message_
, msg
);
224 void FinishedSingleOp() {
225 if (FLAGS_histogram
) {
226 double now
= g_env
->NowMicros();
227 double micros
= now
- last_op_finish_
;
229 if (micros
> 20000) {
230 fprintf(stderr
, "long op: %.1f micros%30s\r", micros
, "");
233 last_op_finish_
= now
;
237 if (done_
>= next_report_
) {
238 if (next_report_
< 1000) next_report_
+= 100;
239 else if (next_report_
< 5000) next_report_
+= 500;
240 else if (next_report_
< 10000) next_report_
+= 1000;
241 else if (next_report_
< 50000) next_report_
+= 5000;
242 else if (next_report_
< 100000) next_report_
+= 10000;
243 else if (next_report_
< 500000) next_report_
+= 50000;
244 else next_report_
+= 100000;
245 fprintf(stderr
, "... finished %d ops%30s\r", done_
, "");
250 void AddBytes(int64_t n
) {
254 void Report(const Slice
& name
) {
255 // Pretend at least one op was done in case we are running a benchmark
256 // that does not call FinishedSingleOp().
257 if (done_
< 1) done_
= 1;
261 // Rate is computed on actual elapsed time, not the sum of per-thread
263 double elapsed
= (finish_
- start_
) * 1e-6;
265 snprintf(rate
, sizeof(rate
), "%6.1f MB/s",
266 (bytes_
/ 1048576.0) / elapsed
);
269 AppendWithSpace(&extra
, message_
);
271 fprintf(stdout
, "%-12s : %11.3f micros/op;%s%s\n",
272 name
.ToString().c_str(),
273 seconds_
* 1e6
/ done_
,
274 (extra
.empty() ? "" : " "),
276 if (FLAGS_histogram
) {
277 fprintf(stdout
, "Microseconds per op:\n%s\n", hist_
.ToString().c_str());
283 // State shared by all concurrent executions of the same benchmark.
289 // Each thread goes through the following states:
291 // (2) waiting for others to be initialized
299 SharedState() : cv(&mu
) { }
302 // Per-thread state for concurrent executions of the same benchmark.
304 int tid
; // 0..n-1 when running in n threads
305 Random rand
; // Has different seeds for different threads
309 ThreadState(int index
)
320 const FilterPolicy
* filter_policy_
;
324 int entries_per_batch_
;
325 WriteOptions write_options_
;
330 const int kKeySize
= 16;
332 fprintf(stdout
, "Keys: %d bytes each\n", kKeySize
);
333 fprintf(stdout
, "Values: %d bytes each (%d bytes after compression)\n",
335 static_cast<int>(FLAGS_value_size
* FLAGS_compression_ratio
+ 0.5));
336 fprintf(stdout
, "Entries: %d\n", num_
);
337 fprintf(stdout
, "RawSize: %.1f MB (estimated)\n",
338 ((static_cast<int64_t>(kKeySize
+ FLAGS_value_size
) * num_
)
340 fprintf(stdout
, "FileSize: %.1f MB (estimated)\n",
341 (((kKeySize
+ FLAGS_value_size
* FLAGS_compression_ratio
) * num_
)
344 fprintf(stdout
, "------------------------------------------------\n");
347 void PrintWarnings() {
348 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
350 "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
355 "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
358 // See if snappy is working by attempting to compress a compressible string
359 const char text
[] = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy";
360 std::string compressed
;
361 if (!port::Snappy_Compress(text
, sizeof(text
), &compressed
)) {
362 fprintf(stdout
, "WARNING: Snappy compression is not enabled\n");
363 } else if (compressed
.size() >= sizeof(text
)) {
364 fprintf(stdout
, "WARNING: Snappy compression is not effective\n");
368 void PrintEnvironment() {
369 fprintf(stderr
, "LevelDB: version %d.%d\n",
370 kMajorVersion
, kMinorVersion
);
373 time_t now
= time(NULL
);
374 fprintf(stderr
, "Date: %s", ctime(&now
)); // ctime() adds newline
376 FILE* cpuinfo
= fopen("/proc/cpuinfo", "r");
377 if (cpuinfo
!= NULL
) {
380 std::string cpu_type
;
381 std::string cache_size
;
382 while (fgets(line
, sizeof(line
), cpuinfo
) != NULL
) {
383 const char* sep
= strchr(line
, ':');
387 Slice key
= TrimSpace(Slice(line
, sep
- 1 - line
));
388 Slice val
= TrimSpace(Slice(sep
+ 1));
389 if (key
== "model name") {
391 cpu_type
= val
.ToString();
392 } else if (key
== "cache size") {
393 cache_size
= val
.ToString();
397 fprintf(stderr
, "CPU: %d * %s\n", num_cpus
, cpu_type
.c_str());
398 fprintf(stderr
, "CPUCache: %s\n", cache_size
.c_str());
405 : cache_(FLAGS_cache_size
>= 0 ? NewLRUCache(FLAGS_cache_size
) : NULL
),
406 filter_policy_(FLAGS_bloom_bits
>= 0
407 ? NewBloomFilterPolicy(FLAGS_bloom_bits
)
411 value_size_(FLAGS_value_size
),
412 entries_per_batch_(1),
413 reads_(FLAGS_reads
< 0 ? FLAGS_num
: FLAGS_reads
),
415 std::vector
<std::string
> files
;
416 g_env
->GetChildren(FLAGS_db
, &files
);
417 for (size_t i
= 0; i
< files
.size(); i
++) {
418 if (Slice(files
[i
]).starts_with("heap-")) {
419 g_env
->DeleteFile(std::string(FLAGS_db
) + "/" + files
[i
]);
422 if (!FLAGS_use_existing_db
) {
423 DestroyDB(FLAGS_db
, Options());
430 delete filter_policy_
;
437 const char* benchmarks
= FLAGS_benchmarks
;
438 while (benchmarks
!= NULL
) {
439 const char* sep
= strchr(benchmarks
, ',');
445 name
= Slice(benchmarks
, sep
- benchmarks
);
446 benchmarks
= sep
+ 1;
449 // Reset parameters that may be overridden below
451 reads_
= (FLAGS_reads
< 0 ? FLAGS_num
: FLAGS_reads
);
452 value_size_
= FLAGS_value_size
;
453 entries_per_batch_
= 1;
454 write_options_
= WriteOptions();
456 void (Benchmark::*method
)(ThreadState
*) = NULL
;
457 bool fresh_db
= false;
458 int num_threads
= FLAGS_threads
;
460 if (name
== Slice("open")) {
461 method
= &Benchmark::OpenBench
;
463 if (num_
< 1) num_
= 1;
464 } else if (name
== Slice("fillseq")) {
466 method
= &Benchmark::WriteSeq
;
467 } else if (name
== Slice("fillbatch")) {
469 entries_per_batch_
= 1000;
470 method
= &Benchmark::WriteSeq
;
471 } else if (name
== Slice("fillrandom")) {
473 method
= &Benchmark::WriteRandom
;
474 } else if (name
== Slice("overwrite")) {
476 method
= &Benchmark::WriteRandom
;
477 } else if (name
== Slice("fillsync")) {
480 write_options_
.sync
= true;
481 method
= &Benchmark::WriteRandom
;
482 } else if (name
== Slice("fill100K")) {
485 value_size_
= 100 * 1000;
486 method
= &Benchmark::WriteRandom
;
487 } else if (name
== Slice("readseq")) {
488 method
= &Benchmark::ReadSequential
;
489 } else if (name
== Slice("readreverse")) {
490 method
= &Benchmark::ReadReverse
;
491 } else if (name
== Slice("readrandom")) {
492 method
= &Benchmark::ReadRandom
;
493 } else if (name
== Slice("readmissing")) {
494 method
= &Benchmark::ReadMissing
;
495 } else if (name
== Slice("seekrandom")) {
496 method
= &Benchmark::SeekRandom
;
497 } else if (name
== Slice("readhot")) {
498 method
= &Benchmark::ReadHot
;
499 } else if (name
== Slice("readrandomsmall")) {
501 method
= &Benchmark::ReadRandom
;
502 } else if (name
== Slice("deleteseq")) {
503 method
= &Benchmark::DeleteSeq
;
504 } else if (name
== Slice("deleterandom")) {
505 method
= &Benchmark::DeleteRandom
;
506 } else if (name
== Slice("readwhilewriting")) {
507 num_threads
++; // Add extra thread for writing
508 method
= &Benchmark::ReadWhileWriting
;
509 } else if (name
== Slice("compact")) {
510 method
= &Benchmark::Compact
;
511 } else if (name
== Slice("crc32c")) {
512 method
= &Benchmark::Crc32c
;
513 } else if (name
== Slice("acquireload")) {
514 method
= &Benchmark::AcquireLoad
;
515 } else if (name
== Slice("snappycomp")) {
516 method
= &Benchmark::SnappyCompress
;
517 } else if (name
== Slice("snappyuncomp")) {
518 method
= &Benchmark::SnappyUncompress
;
519 } else if (name
== Slice("heapprofile")) {
521 } else if (name
== Slice("stats")) {
522 PrintStats("leveldb.stats");
523 } else if (name
== Slice("sstables")) {
524 PrintStats("leveldb.sstables");
526 if (name
!= Slice()) { // No error message for empty name
527 fprintf(stderr
, "unknown benchmark '%s'\n", name
.ToString().c_str());
532 if (FLAGS_use_existing_db
) {
533 fprintf(stdout
, "%-12s : skipped (--use_existing_db is true)\n",
534 name
.ToString().c_str());
539 DestroyDB(FLAGS_db
, Options());
544 if (method
!= NULL
) {
545 RunBenchmark(num_threads
, name
, method
);
555 void (Benchmark::*method
)(ThreadState
*);
558 static void ThreadBody(void* v
) {
559 ThreadArg
* arg
= reinterpret_cast<ThreadArg
*>(v
);
560 SharedState
* shared
= arg
->shared
;
561 ThreadState
* thread
= arg
->thread
;
563 MutexLock
l(&shared
->mu
);
564 shared
->num_initialized
++;
565 if (shared
->num_initialized
>= shared
->total
) {
566 shared
->cv
.SignalAll();
568 while (!shared
->start
) {
573 thread
->stats
.Start();
574 (arg
->bm
->*(arg
->method
))(thread
);
575 thread
->stats
.Stop();
578 MutexLock
l(&shared
->mu
);
580 if (shared
->num_done
>= shared
->total
) {
581 shared
->cv
.SignalAll();
586 void RunBenchmark(int n
, Slice name
,
587 void (Benchmark::*method
)(ThreadState
*)) {
590 shared
.num_initialized
= 0;
592 shared
.start
= false;
594 ThreadArg
* arg
= new ThreadArg
[n
];
595 for (int i
= 0; i
< n
; i
++) {
597 arg
[i
].method
= method
;
598 arg
[i
].shared
= &shared
;
599 arg
[i
].thread
= new ThreadState(i
);
600 arg
[i
].thread
->shared
= &shared
;
601 g_env
->StartThread(ThreadBody
, &arg
[i
]);
605 while (shared
.num_initialized
< n
) {
610 shared
.cv
.SignalAll();
611 while (shared
.num_done
< n
) {
616 for (int i
= 1; i
< n
; i
++) {
617 arg
[0].thread
->stats
.Merge(arg
[i
].thread
->stats
);
619 arg
[0].thread
->stats
.Report(name
);
621 for (int i
= 0; i
< n
; i
++) {
622 delete arg
[i
].thread
;
627 void Crc32c(ThreadState
* thread
) {
628 // Checksum about 500MB of data total
629 const int size
= 4096;
630 const char* label
= "(4K per op)";
631 std::string
data(size
, 'x');
634 while (bytes
< 500 * 1048576) {
635 crc
= crc32c::Value(data
.data(), size
);
636 thread
->stats
.FinishedSingleOp();
639 // Print so result is not dead
640 fprintf(stderr
, "... crc=0x%x\r", static_cast<unsigned int>(crc
));
642 thread
->stats
.AddBytes(bytes
);
643 thread
->stats
.AddMessage(label
);
646 void AcquireLoad(ThreadState
* thread
) {
648 port::AtomicPointer
ap(&dummy
);
651 thread
->stats
.AddMessage("(each op is 1000 loads)");
652 while (count
< 100000) {
653 for (int i
= 0; i
< 1000; i
++) {
654 ptr
= ap
.Acquire_Load();
657 thread
->stats
.FinishedSingleOp();
659 if (ptr
== NULL
) exit(1); // Disable unused variable warning.
662 void SnappyCompress(ThreadState
* thread
) {
664 Slice input
= gen
.Generate(Options().block_size
);
666 int64_t produced
= 0;
668 std::string compressed
;
669 while (ok
&& bytes
< 1024 * 1048576) { // Compress 1G
670 ok
= port::Snappy_Compress(input
.data(), input
.size(), &compressed
);
671 produced
+= compressed
.size();
672 bytes
+= input
.size();
673 thread
->stats
.FinishedSingleOp();
677 thread
->stats
.AddMessage("(snappy failure)");
680 snprintf(buf
, sizeof(buf
), "(output: %.1f%%)",
681 (produced
* 100.0) / bytes
);
682 thread
->stats
.AddMessage(buf
);
683 thread
->stats
.AddBytes(bytes
);
687 void SnappyUncompress(ThreadState
* thread
) {
689 Slice input
= gen
.Generate(Options().block_size
);
690 std::string compressed
;
691 bool ok
= port::Snappy_Compress(input
.data(), input
.size(), &compressed
);
693 char* uncompressed
= new char[input
.size()];
694 while (ok
&& bytes
< 1024 * 1048576) { // Compress 1G
695 ok
= port::Snappy_Uncompress(compressed
.data(), compressed
.size(),
697 bytes
+= input
.size();
698 thread
->stats
.FinishedSingleOp();
700 delete[] uncompressed
;
703 thread
->stats
.AddMessage("(snappy failure)");
705 thread
->stats
.AddBytes(bytes
);
713 options
.create_if_missing
= !FLAGS_use_existing_db
;
714 options
.block_cache
= cache_
;
715 options
.write_buffer_size
= FLAGS_write_buffer_size
;
716 options
.max_file_size
= FLAGS_max_file_size
;
717 options
.block_size
= FLAGS_block_size
;
718 options
.max_open_files
= FLAGS_open_files
;
719 options
.filter_policy
= filter_policy_
;
720 options
.reuse_logs
= FLAGS_reuse_logs
;
721 Status s
= DB::Open(options
, FLAGS_db
, &db_
);
723 fprintf(stderr
, "open error: %s\n", s
.ToString().c_str());
728 void OpenBench(ThreadState
* thread
) {
729 for (int i
= 0; i
< num_
; i
++) {
732 thread
->stats
.FinishedSingleOp();
736 void WriteSeq(ThreadState
* thread
) {
737 DoWrite(thread
, true);
740 void WriteRandom(ThreadState
* thread
) {
741 DoWrite(thread
, false);
744 void DoWrite(ThreadState
* thread
, bool seq
) {
745 if (num_
!= FLAGS_num
) {
747 snprintf(msg
, sizeof(msg
), "(%d ops)", num_
);
748 thread
->stats
.AddMessage(msg
);
755 for (int i
= 0; i
< num_
; i
+= entries_per_batch_
) {
757 for (int j
= 0; j
< entries_per_batch_
; j
++) {
758 const int k
= seq
? i
+j
: (thread
->rand
.Next() % FLAGS_num
);
760 snprintf(key
, sizeof(key
), "%016d", k
);
761 batch
.Put(key
, gen
.Generate(value_size_
));
762 bytes
+= value_size_
+ strlen(key
);
763 thread
->stats
.FinishedSingleOp();
765 s
= db_
->Write(write_options_
, &batch
);
767 fprintf(stderr
, "put error: %s\n", s
.ToString().c_str());
771 thread
->stats
.AddBytes(bytes
);
774 void ReadSequential(ThreadState
* thread
) {
775 Iterator
* iter
= db_
->NewIterator(ReadOptions());
778 for (iter
->SeekToFirst(); i
< reads_
&& iter
->Valid(); iter
->Next()) {
779 bytes
+= iter
->key().size() + iter
->value().size();
780 thread
->stats
.FinishedSingleOp();
784 thread
->stats
.AddBytes(bytes
);
787 void ReadReverse(ThreadState
* thread
) {
788 Iterator
* iter
= db_
->NewIterator(ReadOptions());
791 for (iter
->SeekToLast(); i
< reads_
&& iter
->Valid(); iter
->Prev()) {
792 bytes
+= iter
->key().size() + iter
->value().size();
793 thread
->stats
.FinishedSingleOp();
797 thread
->stats
.AddBytes(bytes
);
800 void ReadRandom(ThreadState
* thread
) {
804 for (int i
= 0; i
< reads_
; i
++) {
806 const int k
= thread
->rand
.Next() % FLAGS_num
;
807 snprintf(key
, sizeof(key
), "%016d", k
);
808 if (db_
->Get(options
, key
, &value
).ok()) {
811 thread
->stats
.FinishedSingleOp();
814 snprintf(msg
, sizeof(msg
), "(%d of %d found)", found
, num_
);
815 thread
->stats
.AddMessage(msg
);
818 void ReadMissing(ThreadState
* thread
) {
821 for (int i
= 0; i
< reads_
; i
++) {
823 const int k
= thread
->rand
.Next() % FLAGS_num
;
824 snprintf(key
, sizeof(key
), "%016d.", k
);
825 db_
->Get(options
, key
, &value
);
826 thread
->stats
.FinishedSingleOp();
830 void ReadHot(ThreadState
* thread
) {
833 const int range
= (FLAGS_num
+ 99) / 100;
834 for (int i
= 0; i
< reads_
; i
++) {
836 const int k
= thread
->rand
.Next() % range
;
837 snprintf(key
, sizeof(key
), "%016d", k
);
838 db_
->Get(options
, key
, &value
);
839 thread
->stats
.FinishedSingleOp();
843 void SeekRandom(ThreadState
* thread
) {
846 for (int i
= 0; i
< reads_
; i
++) {
847 Iterator
* iter
= db_
->NewIterator(options
);
849 const int k
= thread
->rand
.Next() % FLAGS_num
;
850 snprintf(key
, sizeof(key
), "%016d", k
);
852 if (iter
->Valid() && iter
->key() == key
) found
++;
854 thread
->stats
.FinishedSingleOp();
857 snprintf(msg
, sizeof(msg
), "(%d of %d found)", found
, num_
);
858 thread
->stats
.AddMessage(msg
);
861 void DoDelete(ThreadState
* thread
, bool seq
) {
865 for (int i
= 0; i
< num_
; i
+= entries_per_batch_
) {
867 for (int j
= 0; j
< entries_per_batch_
; j
++) {
868 const int k
= seq
? i
+j
: (thread
->rand
.Next() % FLAGS_num
);
870 snprintf(key
, sizeof(key
), "%016d", k
);
872 thread
->stats
.FinishedSingleOp();
874 s
= db_
->Write(write_options_
, &batch
);
876 fprintf(stderr
, "del error: %s\n", s
.ToString().c_str());
882 void DeleteSeq(ThreadState
* thread
) {
883 DoDelete(thread
, true);
886 void DeleteRandom(ThreadState
* thread
) {
887 DoDelete(thread
, false);
890 void ReadWhileWriting(ThreadState
* thread
) {
891 if (thread
->tid
> 0) {
894 // Special thread that keeps writing until other threads are done.
898 MutexLock
l(&thread
->shared
->mu
);
899 if (thread
->shared
->num_done
+ 1 >= thread
->shared
->num_initialized
) {
900 // Other threads have finished
905 const int k
= thread
->rand
.Next() % FLAGS_num
;
907 snprintf(key
, sizeof(key
), "%016d", k
);
908 Status s
= db_
->Put(write_options_
, key
, gen
.Generate(value_size_
));
910 fprintf(stderr
, "put error: %s\n", s
.ToString().c_str());
915 // Do not count any of the preceding work/delay in stats.
916 thread
->stats
.Start();
920 void Compact(ThreadState
* thread
) {
921 db_
->CompactRange(NULL
, NULL
);
924 void PrintStats(const char* key
) {
926 if (!db_
->GetProperty(key
, &stats
)) {
929 fprintf(stdout
, "\n%s\n", stats
.c_str());
932 static void WriteToFile(void* arg
, const char* buf
, int n
) {
933 reinterpret_cast<WritableFile
*>(arg
)->Append(Slice(buf
, n
));
938 snprintf(fname
, sizeof(fname
), "%s/heap-%04d", FLAGS_db
, ++heap_counter_
);
940 Status s
= g_env
->NewWritableFile(fname
, &file
);
942 fprintf(stderr
, "%s\n", s
.ToString().c_str());
945 bool ok
= port::GetHeapProfile(WriteToFile
, file
);
948 fprintf(stderr
, "heap profiling not supported\n");
949 g_env
->DeleteFile(fname
);
954 } // namespace leveldb
956 int main(int argc
, char** argv
) {
957 FLAGS_write_buffer_size
= leveldb::Options().write_buffer_size
;
958 FLAGS_max_file_size
= leveldb::Options().max_file_size
;
959 FLAGS_block_size
= leveldb::Options().block_size
;
960 FLAGS_open_files
= leveldb::Options().max_open_files
;
961 std::string default_db_path
;
963 for (int i
= 1; i
< argc
; i
++) {
967 if (leveldb::Slice(argv
[i
]).starts_with("--benchmarks=")) {
968 FLAGS_benchmarks
= argv
[i
] + strlen("--benchmarks=");
969 } else if (sscanf(argv
[i
], "--compression_ratio=%lf%c", &d
, &junk
) == 1) {
970 FLAGS_compression_ratio
= d
;
971 } else if (sscanf(argv
[i
], "--histogram=%d%c", &n
, &junk
) == 1 &&
972 (n
== 0 || n
== 1)) {
974 } else if (sscanf(argv
[i
], "--use_existing_db=%d%c", &n
, &junk
) == 1 &&
975 (n
== 0 || n
== 1)) {
976 FLAGS_use_existing_db
= n
;
977 } else if (sscanf(argv
[i
], "--reuse_logs=%d%c", &n
, &junk
) == 1 &&
978 (n
== 0 || n
== 1)) {
979 FLAGS_reuse_logs
= n
;
980 } else if (sscanf(argv
[i
], "--num=%d%c", &n
, &junk
) == 1) {
982 } else if (sscanf(argv
[i
], "--reads=%d%c", &n
, &junk
) == 1) {
984 } else if (sscanf(argv
[i
], "--threads=%d%c", &n
, &junk
) == 1) {
986 } else if (sscanf(argv
[i
], "--value_size=%d%c", &n
, &junk
) == 1) {
987 FLAGS_value_size
= n
;
988 } else if (sscanf(argv
[i
], "--write_buffer_size=%d%c", &n
, &junk
) == 1) {
989 FLAGS_write_buffer_size
= n
;
990 } else if (sscanf(argv
[i
], "--max_file_size=%d%c", &n
, &junk
) == 1) {
991 FLAGS_max_file_size
= n
;
992 } else if (sscanf(argv
[i
], "--block_size=%d%c", &n
, &junk
) == 1) {
993 FLAGS_block_size
= n
;
994 } else if (sscanf(argv
[i
], "--cache_size=%d%c", &n
, &junk
) == 1) {
995 FLAGS_cache_size
= n
;
996 } else if (sscanf(argv
[i
], "--bloom_bits=%d%c", &n
, &junk
) == 1) {
997 FLAGS_bloom_bits
= n
;
998 } else if (sscanf(argv
[i
], "--open_files=%d%c", &n
, &junk
) == 1) {
999 FLAGS_open_files
= n
;
1000 } else if (strncmp(argv
[i
], "--db=", 5) == 0) {
1001 FLAGS_db
= argv
[i
] + 5;
1003 fprintf(stderr
, "Invalid flag '%s'\n", argv
[i
]);
1008 leveldb::g_env
= leveldb::Env::Default();
1010 // Choose a location for the test database if none given with --db=<path>
1011 if (FLAGS_db
== NULL
) {
1012 leveldb::g_env
->GetTestDirectory(&default_db_path
);
1013 default_db_path
+= "/dbbench";
1014 FLAGS_db
= default_db_path
.c_str();
1017 leveldb::Benchmark benchmark
;