@@ -51,47 +51,103 @@ func BenchmarkSerializeNode(b *testing.B) {
51
51
}
52
52
53
53
type benchSetCase struct {
54
- count int
54
+ kcount int
55
55
bitwidth int
56
56
}
57
57
58
- func BenchmarkSet (b * testing.B ) {
59
- kCounts := []int {1 , 10 , 100 }
60
- bitwidths := []int {5 , 8 }
58
+ var benchSetCaseTable []benchSetCase
61
59
62
- var table []benchSetCase
60
+ func init () {
61
+ kCounts := []int {
62
+ 1 ,
63
+ 10 ,
64
+ 100 ,
65
+ 1000 , // aka 1M
66
+ 10000 , // aka 10M -- you'll need a lot of RAM for this. Also, some patience.
67
+ }
68
+ bitwidths := []int {
69
+ 3 ,
70
+ //4,
71
+ 5 ,
72
+ //6,
73
+ //7,
74
+ 8 ,
75
+ }
76
+ // bucketsize-aka-arraywidth? maybe someday.
63
77
for _ , c := range kCounts {
64
-
65
78
for _ , bw := range bitwidths {
66
- table = append (table , benchSetCase {count : c * 1000 , bitwidth : bw })
79
+ benchSetCaseTable = append (benchSetCaseTable , benchSetCase {kcount : c , bitwidth : bw })
67
80
}
81
+ }
82
+ }
68
83
84
+ // BenchmarkFill creates a large HAMT, and measures how long it takes to generate all of this many entries.
85
+ //
86
+ // The number of blocks saved to the blockstore per entry is reported, and the total content size in bytes.
87
+ // The nanoseconds-per-op report on this function is not very useful, because the size of "op" varies with "n" between benchmarks.
88
+ //
89
+ // See "BenchmarkSet" for a probe of how long it takes to set additional entries in an already-large hamt
90
+ // (this gives a more interesting and useful nanoseconds-per-op).
91
+ func BenchmarkFill (b * testing.B ) {
92
+ for _ , t := range benchSetCaseTable {
93
+ b .Run (fmt .Sprintf ("n=%dk/bitwidth=%d" , t .kcount , t .bitwidth ), func (b * testing.B ) {
94
+ for i := 0 ; i < b .N ; i ++ {
95
+ r := rander {rand .New (rand .NewSource (int64 (i )))}
96
+ blockstore := newMockBlocks ()
97
+ n := NewNode (cbor .NewCborStore (blockstore ), UseTreeBitWidth (t .bitwidth ))
98
+ //b.ResetTimer()
99
+ for j := 0 ; j < t .kcount * 1000 ; j ++ {
100
+ if err := n .Set (context .Background (), r .randString (), r .randValue ()); err != nil {
101
+ b .Fatal (err )
102
+ }
103
+ }
104
+ b .StopTimer ()
105
+ b .ReportMetric (float64 (len (blockstore .data ))/ float64 (t .kcount * 1000 ), "blocks/entry" )
106
+ binarySize , _ := n .checkSize (context .Background ())
107
+ b .ReportMetric (float64 (binarySize )/ float64 (t .kcount * 1000 ), "bytes/entry" )
108
+ b .StartTimer ()
109
+ }
110
+ })
69
111
}
70
- r := rander {rand .New (rand .NewSource (int64 (42 )))}
71
- for _ , t := range table {
72
- b .Run (fmt .Sprintf ("%d/%d" , t .count , t .bitwidth ), func (b * testing.B ) {
73
- ctx := context .Background ()
74
- n := NewNode (cbor .NewCborStore (newMockBlocks ()), UseTreeBitWidth (t .bitwidth ))
75
- b .ResetTimer ()
112
+ }
113
+
114
+ // BenchmarkSet creates a large HAMT, then resets the timer, and does another 1000 inserts,
115
+ // measuring the time taken for this second batch of inserts.
116
+ //
117
+ // The number of *additional* blocks per entry is reported.
118
+ func BenchmarkSet (b * testing.B ) {
119
+ for _ , t := range benchSetCaseTable {
120
+ b .Run (fmt .Sprintf ("n=%dk/bitwidth=%d" , t .kcount , t .bitwidth ), func (b * testing.B ) {
76
121
for i := 0 ; i < b .N ; i ++ {
77
- for j := 0 ; j < t .count ; j ++ {
78
- if err := n .Set (ctx , r .randString (), r .randValue ()); err != nil {
122
+ r := rander {rand .New (rand .NewSource (int64 (i )))}
123
+ blockstore := newMockBlocks ()
124
+ n := NewNode (cbor .NewCborStore (blockstore ), UseTreeBitWidth (t .bitwidth ))
125
+ // Initial fill:
126
+ for j := 0 ; j < t .kcount * 1000 ; j ++ {
127
+ if err := n .Set (context .Background (), r .randString (), r .randValue ()); err != nil {
79
128
b .Fatal (err )
80
129
}
81
130
}
131
+ initalBlockstoreSize := len (blockstore .data )
132
+ b .ResetTimer ()
133
+ // Additional inserts:
134
+ b .ReportAllocs ()
135
+ for j := 0 ; j < 1000 ; j ++ {
136
+ if err := n .Set (context .Background (), r .randString (), r .randValue ()); err != nil {
137
+ b .Fatal (err )
138
+ }
139
+ }
140
+ b .ReportMetric (float64 (len (blockstore .data )- initalBlockstoreSize )/ float64 (1000 ), "addntlBlocks/addntlEntry" )
82
141
}
83
142
})
84
143
}
85
144
}
86
145
87
146
func BenchmarkFind (b * testing.B ) {
88
- b .Run ("find-10k" , doBenchmarkEntriesCount (10000 , 8 ))
89
- b .Run ("find-100k" , doBenchmarkEntriesCount (100000 , 8 ))
90
- b .Run ("find-1m" , doBenchmarkEntriesCount (1000000 , 8 ))
91
- b .Run ("find-10k-bitwidth-5" , doBenchmarkEntriesCount (10000 , 5 ))
92
- b .Run ("find-100k-bitwidth-5" , doBenchmarkEntriesCount (100000 , 5 ))
93
- b .Run ("find-1m-bitwidth-5" , doBenchmarkEntriesCount (1000000 , 5 ))
94
-
147
+ for _ , t := range benchSetCaseTable {
148
+ b .Run (fmt .Sprintf ("n=%dk/bitwidth=%d" , t .kcount , t .bitwidth ),
149
+ doBenchmarkEntriesCount (t .kcount * 1000 , t .bitwidth ))
150
+ }
95
151
}
96
152
97
153
func doBenchmarkEntriesCount (num int , bitWidth int ) func (b * testing.B ) {
0 commit comments