Skip to content

Commit 85f1f05

Browse files
committed
WIP
1 parent 2ac0461 commit 85f1f05

File tree

1 file changed

+60
-24
lines changed

1 file changed

+60
-24
lines changed

hamt_bench_test.go

Lines changed: 60 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -51,12 +51,28 @@ func BenchmarkSerializeNode(b *testing.B) {
5151
}
5252

5353
type benchSetCase struct {
54+
kcount int
55+
bitwidth int
56+
datasize int
57+
flushInterval int
58+
}
59+
60+
type benchFillCase struct {
61+
kcount int
62+
bitwidth int
63+
datasize int
64+
flushInterval int
65+
}
66+
67+
type benchFindCase struct {
5468
kcount int
5569
bitwidth int
56-
// flushInterval int
70+
datasize int
5771
}
5872

5973
var benchSetCaseTable []benchSetCase
74+
var benchFillCaseTable []benchFillCase
75+
var benchFindCaseTable []benchFindCase
6076

6177
func init() {
6278
kCounts := []int{
@@ -77,13 +93,40 @@ func init() {
7793
7,
7894
8,
7995
}
80-
// flushIntervals := []int{
81-
// 1,
82-
// }
96+
flushIntervals := []int{
97+
1,
98+
1000,
99+
}
100+
dataSize := []int{
101+
1,
102+
}
83103
// bucketsize-aka-arraywidth? maybe someday.
84104
for _, c := range kCounts {
85-
for _, bw := range bitwidths {
86-
benchSetCaseTable = append(benchSetCaseTable, benchSetCase{kcount: c, bitwidth: bw})
105+
for _, d := range dataSize {
106+
for _, bw := range bitwidths {
107+
benchFindCaseTable = append(benchFindCaseTable,
108+
benchFindCase{
109+
kcount: c,
110+
bitwidth: bw,
111+
datasize: d,
112+
})
113+
for _, f := range flushIntervals {
114+
benchFillCaseTable = append(benchFillCaseTable,
115+
benchFillCase{
116+
kcount: c,
117+
bitwidth: bw,
118+
datasize: d,
119+
flushInterval: f,
120+
})
121+
benchSetCaseTable = append(benchSetCaseTable,
122+
benchSetCase{
123+
kcount: c,
124+
bitwidth: bw,
125+
datasize: d,
126+
flushInterval: f,
127+
})
128+
}
129+
}
87130
}
88131
}
89132
}
@@ -114,7 +157,7 @@ func init() {
114157
// See "BenchmarkSet*" for a probe of how long it takes to set additional entries in an already-large hamt
115158
// (this gives a more interesting and useful nanoseconds-per-op indicators).
116159
func BenchmarkFill(b *testing.B) {
117-
for _, t := range benchSetCaseTable {
160+
for _, t := range benchFillCaseTable {
118161
b.Run(fmt.Sprintf("n=%dk/bitwidth=%d", t.kcount, t.bitwidth), func(b *testing.B) {
119162
for i := 0; i < b.N; i++ {
120163
r := rander{rand.New(rand.NewSource(int64(i)))}
@@ -148,32 +191,24 @@ func BenchmarkFill(b *testing.B) {
148191
}
149192
}
150193

151-
// BenchmarkSetBulk creates a large HAMT, then resets the timer, and does another 1000 inserts,
194+
// BenchmarkSet creates a large HAMT, then starts the timer, and does another 1000 inserts,
152195
// measuring the time taken for this second batch of inserts.
153-
// Flushing happens once after all 1000 inserts.
154-
//
155-
// The number of *additional* blocks per entry is reported.
156-
// This number is usually less than one, because the bulk flush means changes might be amortized.
157-
// func BenchmarkSetBulk(b *testing.B) {
158-
// doBenchmarkSetSuite(b, false)
159-
// }
160-
161-
// BenchmarkSetIndividual is the same as BenchmarkSetBulk, but flushes more.
162-
// Flush happens per insert.
196+
// Flushing rate is parameterized.
163197
//
164198
// The number of *additional* blocks per entry is reported.
165-
// Since we flush each insert individually, this number should be at least 1 --
199+
// This number is usually less than one with high flush interval means changes might be amortized.
200+
// For flush interval one this number should be at least 1 --
166201
// however, since we choose random keys, it can still turn out lower if keys happen to collide.
167202
// (The Set method does not make it possible to adjust our denominator to compensate for this: it does not yield previous values nor indicators of prior presense.)
168-
func BenchmarkSetIndividual(b *testing.B) {
169-
doBenchmarkSetSuite(b, true)
203+
func BenchmarkSet(b *testing.B) {
204+
doBenchmarkSetSuite(b)
170205
}
171206

172-
func doBenchmarkSetSuite(b *testing.B, flushPer bool) {
207+
func doBenchmarkSetSuite(b *testing.B) {
173208
for j, t := range benchSetCaseTable {
174209
b.Run(fmt.Sprintf("n=%dk/bitwidth=%d", t.kcount, t.bitwidth), func(b *testing.B) {
175-
fmt.Printf("Case: %d, b.N=%d\n", j, b.N)
176210
for i := 0; i < b.N; i++ {
211+
b.StopTimer()
177212
r := rander{rand.New(rand.NewSource(int64(i)))}
178213
blockstore := newMockBlocks()
179214
n := NewNode(cbor.NewCborStore(blockstore), UseTreeBitWidth(t.bitwidth))
@@ -187,10 +222,11 @@ func doBenchmarkSetSuite(b *testing.B, flushPer bool) {
187222
b.Fatal(err)
188223
}
189224
initalBlockstoreSize := len(blockstore.data)
190-
b.ResetTimer()
225+
// b.ResetTimer()
191226
blockstore.stats = blockstoreStats{}
192227
// Additional inserts:
193228
b.ReportAllocs()
229+
b.StartTimer()
194230
for j := 0; j < 1000; j++ {
195231
if err := n.Set(context.Background(), r.randString(), r.randValue()); err != nil {
196232
b.Fatal(err)

0 commit comments

Comments
 (0)