Skip to content

Commit e34c0ee

Browse files
committed
Move to standardized benchmark names+tables, in preparation for future charting; introduce Set vs Fill benchmarks; report bytes/entry and blocks/entry.
1 parent 58f187c commit e34c0ee

File tree

1 file changed

+78
-22
lines changed

1 file changed

+78
-22
lines changed

hamt_bench_test.go

Lines changed: 78 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -51,47 +51,103 @@ func BenchmarkSerializeNode(b *testing.B) {
5151
}
5252

5353
type benchSetCase struct {
54-
count int
54+
kcount int
5555
bitwidth int
5656
}
5757

58-
func BenchmarkSet(b *testing.B) {
59-
kCounts := []int{1, 10, 100}
60-
bitwidths := []int{5, 8}
58+
var benchSetCaseTable []benchSetCase
6159

62-
var table []benchSetCase
60+
func init() {
61+
kCounts := []int{
62+
1,
63+
10,
64+
100,
65+
1000, // aka 1M
66+
10000, // aka 10M -- you'll need a lot of RAM for this. Also, some patience.
67+
}
68+
bitwidths := []int{
69+
3,
70+
//4,
71+
5,
72+
//6,
73+
//7,
74+
8,
75+
}
76+
// bucketsize-aka-arraywidth? maybe someday.
6377
for _, c := range kCounts {
64-
6578
for _, bw := range bitwidths {
66-
table = append(table, benchSetCase{count: c * 1000, bitwidth: bw})
79+
benchSetCaseTable = append(benchSetCaseTable, benchSetCase{kcount: c, bitwidth: bw})
6780
}
81+
}
82+
}
6883

84+
// BenchmarkFill creates a large HAMT, and measures how long it takes to generate all of this many entries.
85+
//
86+
// The number of blocks saved to the blockstore per entry is reported, and the total content size in bytes.
87+
// The nanoseconds-per-op report on this function is not very useful, because the size of "op" varies with "n" between benchmarks.
88+
//
89+
// See "BenchmarkSet" for a probe of how long it takes to set additional entries in an already-large hamt
90+
// (this gives a more interesting and useful nanoseconds-per-op).
91+
func BenchmarkFill(b *testing.B) {
92+
for _, t := range benchSetCaseTable {
93+
b.Run(fmt.Sprintf("n=%dk/bitwidth=%d", t.kcount, t.bitwidth), func(b *testing.B) {
94+
for i := 0; i < b.N; i++ {
95+
r := rander{rand.New(rand.NewSource(int64(i)))}
96+
blockstore := newMockBlocks()
97+
n := NewNode(cbor.NewCborStore(blockstore), UseTreeBitWidth(t.bitwidth))
98+
//b.ResetTimer()
99+
for j := 0; j < t.kcount*1000; j++ {
100+
if err := n.Set(context.Background(), r.randString(), r.randValue()); err != nil {
101+
b.Fatal(err)
102+
}
103+
}
104+
b.StopTimer()
105+
b.ReportMetric(float64(len(blockstore.data))/float64(t.kcount*1000), "blocks/entry")
106+
binarySize, _ := n.checkSize(context.Background())
107+
b.ReportMetric(float64(binarySize)/float64(t.kcount*1000), "bytes/entry")
108+
b.StartTimer()
109+
}
110+
})
69111
}
70-
r := rander{rand.New(rand.NewSource(int64(42)))}
71-
for _, t := range table {
72-
b.Run(fmt.Sprintf("%d/%d", t.count, t.bitwidth), func(b *testing.B) {
73-
ctx := context.Background()
74-
n := NewNode(cbor.NewCborStore(newMockBlocks()), UseTreeBitWidth(t.bitwidth))
75-
b.ResetTimer()
112+
}
113+
114+
// BenchmarkSet creates a large HAMT, then resets the timer, and does another 1000 inserts,
115+
// measuring the time taken for this second batch of inserts.
116+
//
117+
// The number of *additional* blocks per entry is reported.
118+
func BenchmarkSet(b *testing.B) {
119+
for _, t := range benchSetCaseTable {
120+
b.Run(fmt.Sprintf("n=%dk/bitwidth=%d", t.kcount, t.bitwidth), func(b *testing.B) {
76121
for i := 0; i < b.N; i++ {
77-
for j := 0; j < t.count; j++ {
78-
if err := n.Set(ctx, r.randString(), r.randValue()); err != nil {
122+
r := rander{rand.New(rand.NewSource(int64(i)))}
123+
blockstore := newMockBlocks()
124+
n := NewNode(cbor.NewCborStore(blockstore), UseTreeBitWidth(t.bitwidth))
125+
// Initial fill:
126+
for j := 0; j < t.kcount*1000; j++ {
127+
if err := n.Set(context.Background(), r.randString(), r.randValue()); err != nil {
79128
b.Fatal(err)
80129
}
81130
}
131+
initalBlockstoreSize := len(blockstore.data)
132+
b.ResetTimer()
133+
// Additional inserts:
134+
b.ReportAllocs()
135+
for j := 0; j < 1000; j++ {
136+
if err := n.Set(context.Background(), r.randString(), r.randValue()); err != nil {
137+
b.Fatal(err)
138+
}
139+
}
140+
b.ReportMetric(float64(len(blockstore.data)-initalBlockstoreSize)/float64(1000), "addntlBlocks/addntlEntry")
82141
}
83142
})
84143
}
85144
}
86145

87146
func BenchmarkFind(b *testing.B) {
88-
b.Run("find-10k", doBenchmarkEntriesCount(10000, 8))
89-
b.Run("find-100k", doBenchmarkEntriesCount(100000, 8))
90-
b.Run("find-1m", doBenchmarkEntriesCount(1000000, 8))
91-
b.Run("find-10k-bitwidth-5", doBenchmarkEntriesCount(10000, 5))
92-
b.Run("find-100k-bitwidth-5", doBenchmarkEntriesCount(100000, 5))
93-
b.Run("find-1m-bitwidth-5", doBenchmarkEntriesCount(1000000, 5))
94-
147+
for _, t := range benchSetCaseTable {
148+
b.Run(fmt.Sprintf("n=%dk/bitwidth=%d", t.kcount, t.bitwidth),
149+
doBenchmarkEntriesCount(t.kcount*1000, t.bitwidth))
150+
}
95151
}
96152

97153
func doBenchmarkEntriesCount(num int, bitWidth int) func(b *testing.B) {

0 commit comments

Comments
 (0)