@@ -53,6 +53,7 @@ func BenchmarkSerializeNode(b *testing.B) {
53
53
type benchSetCase struct {
54
54
kcount int
55
55
bitwidth int
56
+ // flushInterval int
56
57
}
57
58
58
59
var benchSetCaseTable []benchSetCase
@@ -76,6 +77,9 @@ func init() {
76
77
7 ,
77
78
8 ,
78
79
}
80
+ // flushIntervals := []int{
81
+ // 1,
82
+ // }
79
83
// bucketsize-aka-arraywidth? maybe someday.
80
84
for _ , c := range kCounts {
81
85
for _ , bw := range bitwidths {
@@ -150,9 +154,9 @@ func BenchmarkFill(b *testing.B) {
150
154
//
151
155
// The number of *additional* blocks per entry is reported.
152
156
// This number is usually less than one, because the bulk flush means changes might be amortized.
153
- func BenchmarkSetBulk (b * testing.B ) {
154
- doBenchmarkSetSuite (b , false )
155
- }
157
+ // func BenchmarkSetBulk(b *testing.B) {
158
+ // doBenchmarkSetSuite(b, false)
159
+ // }
156
160
157
161
// BenchmarkSetIndividual is the same as BenchmarkSetBulk, but flushes more.
158
162
// Flush happens per insert.
@@ -166,8 +170,9 @@ func BenchmarkSetIndividual(b *testing.B) {
166
170
}
167
171
168
172
func doBenchmarkSetSuite (b * testing.B , flushPer bool ) {
169
- for _ , t := range benchSetCaseTable {
173
+ for j , t := range benchSetCaseTable {
170
174
b .Run (fmt .Sprintf ("n=%dk/bitwidth=%d" , t .kcount , t .bitwidth ), func (b * testing.B ) {
175
+ fmt .Printf ("Case: %d, b.N=%d\n " , j , b .N )
171
176
for i := 0 ; i < b .N ; i ++ {
172
177
r := rander {rand .New (rand .NewSource (int64 (i )))}
173
178
blockstore := newMockBlocks ()
0 commit comments