Skip to content

Commit bc14aef

Browse files
committed
List nanoseconds per operation, instead of millions of operations per second
1 parent 8b45425 commit bc14aef

File tree

1 file changed

+12
-12
lines changed

1 file changed

+12
-12
lines changed

benchmarks/bulk-insert-and-query.cc

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,8 @@ const size_t SAMPLE_SIZE = 10 * 1000 * 1000;
5959
// The statistics gathered for each table type:
6060
struct Statistics {
6161
size_t add_count;
62-
double adds_per_nano;
63-
map<int, double> finds_per_nano; // The key is the percent of queries that were expected
62+
double nanos_per_add;
63+
map<int, double> nanos_per_finds; // The key is the percent of queries that were expected
6464
// to be positive
6565
double false_positive_probabilty;
6666
double bits_per_item;
@@ -84,15 +84,15 @@ string StatisticsTableHeader(int type_width, int find_percent_count) {
8484
ostringstream os;
8585

8686
os << string(type_width, ' ');
87-
os << setw(12) << right << "million";
87+
os << setw(12) << right << "";
8888
for (int i = 0; i < find_percent_count; ++i) {
8989
os << setw(8) << "find";
9090
}
91-
os << setw(8) << "" << setw(11) << "" << setw(11)
91+
os << setw(9) << "" << setw(11) << "" << setw(11)
9292
<< "optimal" << setw(8) << "wasted" << setw(8) << "million" << endl;
9393

9494
os << string(type_width, ' ');
95-
os << setw(12) << right << "adds/sec";
95+
os << setw(12) << right << "ns/add";
9696
for (int i = 0; i < find_percent_count; ++i) {
9797
os << setw(7)
9898
<< static_cast<int>(100 * i / static_cast<double>(find_percent_count - 1)) << '%';
@@ -106,11 +106,10 @@ string StatisticsTableHeader(int type_width, int find_percent_count) {
106106
template <class CharT, class Traits>
107107
basic_ostream<CharT, Traits>& operator<<(
108108
basic_ostream<CharT, Traits>& os, const Statistics& stats) {
109-
constexpr double NANOS_PER_MILLION = 1000;
110109
os << fixed << setprecision(2) << setw(12) << right
111-
<< stats.adds_per_nano * NANOS_PER_MILLION;
112-
for (const auto& fps : stats.finds_per_nano) {
113-
os << setw(8) << fps.second * NANOS_PER_MILLION;
110+
<< stats.nanos_per_add;
111+
for (const auto& fps : stats.nanos_per_finds) {
112+
os << setw(8) << fps.second;
114113
}
115114
// we get some nonsensical result for very small fpps
116115
if(stats.false_positive_probabilty > 0.0000001) {
@@ -435,8 +434,9 @@ Statistics FilterBenchmark(
435434
for (size_t added = 0; added < add_count; ++added) {
436435
assert(FilterAPI<Table>::Contain(to_add[added], &filter) == 1);
437436
}
437+
auto time = NowNanos() - start_time;
438438
result.add_count = add_count;
439-
result.adds_per_nano = add_count / static_cast<double>(NowNanos() - start_time);
439+
result.nanos_per_add = static_cast<double>(time) / add_count;
440440
result.bits_per_item = static_cast<double>(CHAR_BIT * filter.SizeInBytes()) / add_count;
441441
::std::random_device random;
442442
size_t found_count = 0;
@@ -495,8 +495,8 @@ Statistics FilterBenchmark(
495495
cerr << "ERROR: This is a potential bug!" << endl;
496496
}
497497
}
498-
result.finds_per_nano[100 * found_probability] =
499-
actual_sample_size / static_cast<double>(lookup_time);
498+
result.nanos_per_finds[100 * found_probability] =
499+
static_cast<double>(lookup_time) / actual_sample_size;
500500
if (0.0 == found_probability) {
501501
////////////////////////////
502502
// This is obviously technically wrong!!! The assumption is that there is no overlap between the random

0 commit comments

Comments
 (0)