Skip to content

Commit 4236c68

Browse files
committed
Yet more benchmark content and reporting.
Do flush explicitly. It is necessary in this library's API. Started measuring byte size in another, more direct mechanism. There's a significant difference between the two forms of measurement (the checkSize method gives much flatter results than the look at the blockstore does!); I don't yet understand why this is or what significance it might (or might not) have. BenchmarkSet now has two variants: bulk flush and individual flush. The behavior in terms of how many new blocks are created varies markedly, as you'd expect. Include more bitsizes. I want to gather enough datapoints to draw interesting charts. Surely we'll expect to see some predictable change in the scaling curves across this dimension? Histograms of the sizes of blocks that appear in storage are now available. I've commented them back out after making some observations; they produce quite a lot of noise in output.
1 parent e34c0ee commit 4236c68

File tree

2 files changed

+121
-11
lines changed

2 files changed

+121
-11
lines changed

hamt_bench_test.go

Lines changed: 60 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -62,15 +62,15 @@ func init() {
6262
1,
6363
10,
6464
100,
65-
1000, // aka 1M
66-
10000, // aka 10M -- you'll need a lot of RAM for this. Also, some patience.
65+
1000, // aka 1M
66+
//10000, // aka 10M -- you'll need a lot of RAM for this. Also, some patience.
6767
}
6868
bitwidths := []int{
6969
3,
70-
//4,
70+
4,
7171
5,
72-
//6,
73-
//7,
72+
6,
73+
7,
7474
8,
7575
}
7676
// bucketsize-aka-arraywidth? maybe someday.
@@ -81,13 +81,20 @@ func init() {
8181
}
8282
}
8383

84-
// BenchmarkFill creates a large HAMT, and measures how long it takes to generate all of this many entries.
84+
// Histograms of blocksizes can be logged from some of the following functions, but are commented out.
85+
// The main thing to check for in those is whether there are any exceptionally small blocks being produced:
86+
// less than 64 bytes is a bit concerning because we assume there's some overhead per block in most operations (even if the exact amount may vary situationally).
87+
// We do see some of these small blocks with small bitwidth parameters (e.g. 3), but almost none with larger bitwidth parameters.
88+
89+
// BenchmarkFill creates a large HAMT, and measures how long it takes to generate all of this many entries;
90+
// the number of entries is varied in sub-benchmarks, denoted by their "n=" label component.
91+
// Flush is done once for the entire structure, meaning the number of blocks generated per entry can be much fewer than 1.
8592
//
8693
// The number of blocks saved to the blockstore per entry is reported, and the total content size in bytes.
8794
// The nanoseconds-per-op report on this function is not very useful, because the size of "op" varies with "n" between benchmarks.
8895
//
89-
// See "BenchmarkSet" for a probe of how long it takes to set additional entries in an already-large hamt
90-
// (this gives a more interesting and useful nanoseconds-per-op).
96+
// See "BenchmarkSet*" for a probe of how long it takes to set additional entries in an already-large hamt
97+
// (this gives a more interesting and useful nanoseconds-per-op indicators).
9198
func BenchmarkFill(b *testing.B) {
9299
for _, t := range benchSetCaseTable {
93100
b.Run(fmt.Sprintf("n=%dk/bitwidth=%d", t.kcount, t.bitwidth), func(b *testing.B) {
@@ -101,21 +108,45 @@ func BenchmarkFill(b *testing.B) {
101108
b.Fatal(err)
102109
}
103110
}
111+
if err := n.Flush(context.Background()); err != nil {
112+
b.Fatal(err)
113+
}
104114
b.StopTimer()
105115
b.ReportMetric(float64(len(blockstore.data))/float64(t.kcount*1000), "blocks/entry")
106116
binarySize, _ := n.checkSize(context.Background())
107-
b.ReportMetric(float64(binarySize)/float64(t.kcount*1000), "bytes/entry")
117+
b.ReportMetric(float64(binarySize)/float64(t.kcount*1000), "bytes(hamtAccnt)/entry")
118+
b.ReportMetric(float64(blockstore.totalBlockSizes())/float64(t.kcount*1000), "bytes(blockstoreAccnt)/entry")
119+
if i < 3 {
120+
//b.Logf("block size histogram: %v\n", blockstore.getBlockSizesHistogram())
121+
}
108122
b.StartTimer()
109123
}
110124
})
111125
}
112126
}
113127

114-
// BenchmarkSet creates a large HAMT, then resets the timer, and does another 1000 inserts,
128+
// BenchmarkSetBulk creates a large HAMT, then resets the timer, and does another 1000 inserts,
115129
// measuring the time taken for this second batch of inserts.
130+
// Flushing happens once after all 1000 inserts.
116131
//
117132
// The number of *additional* blocks per entry is reported.
118-
func BenchmarkSet(b *testing.B) {
133+
// This number is usually less than one, because the bulk flush means changes might be amortized.
134+
func BenchmarkSetBulk(b *testing.B) {
135+
doBenchmarkSetSuite(b, false)
136+
}
137+
138+
// BenchmarkSetIndividual is the same as BenchmarkSetBulk, but flushes more.
139+
// Flush happens per insert.
140+
//
141+
// The number of *additional* blocks per entry is reported.
142+
// Since we flush each insert individually, this number should be at least 1 --
143+
// however, since we choose random keys, it can still turn out lower if keys happen to collide.
144+
// (The Set method does not make it possible to adjust our denominator to compensate for this: it does not yield previous values nor indicators of prior presense.)
145+
func BenchmarkSetIndividual(b *testing.B) {
146+
doBenchmarkSetSuite(b, true)
147+
}
148+
149+
func doBenchmarkSetSuite(b *testing.B, flushPer bool) {
119150
for _, t := range benchSetCaseTable {
120151
b.Run(fmt.Sprintf("n=%dk/bitwidth=%d", t.kcount, t.bitwidth), func(b *testing.B) {
121152
for i := 0; i < b.N; i++ {
@@ -128,6 +159,9 @@ func BenchmarkSet(b *testing.B) {
128159
b.Fatal(err)
129160
}
130161
}
162+
if err := n.Flush(context.Background()); err != nil {
163+
b.Fatal(err)
164+
}
131165
initalBlockstoreSize := len(blockstore.data)
132166
b.ResetTimer()
133167
// Additional inserts:
@@ -136,8 +170,23 @@ func BenchmarkSet(b *testing.B) {
136170
if err := n.Set(context.Background(), r.randString(), r.randValue()); err != nil {
137171
b.Fatal(err)
138172
}
173+
if flushPer {
174+
if err := n.Flush(context.Background()); err != nil {
175+
b.Fatal(err)
176+
}
177+
}
178+
}
179+
if !flushPer {
180+
if err := n.Flush(context.Background()); err != nil {
181+
b.Fatal(err)
182+
}
139183
}
184+
b.StopTimer()
140185
b.ReportMetric(float64(len(blockstore.data)-initalBlockstoreSize)/float64(1000), "addntlBlocks/addntlEntry")
186+
if i < 3 {
187+
// b.Logf("block size histogram: %v\n", blockstore.getBlockSizesHistogram())
188+
}
189+
b.StartTimer()
141190
}
142191
})
143192
}

hamt_test.go

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import (
77
"encoding/hex"
88
"fmt"
99
"math/rand"
10+
"strconv"
1011
"strings"
1112
"testing"
1213
"time"
@@ -38,6 +39,66 @@ func (mb *mockBlocks) Put(b block.Block) error {
3839
return nil
3940
}
4041

42+
func (mb *mockBlocks) totalBlockSizes() int {
43+
sum := 0
44+
for _, v := range mb.data {
45+
sum += len(v.RawData())
46+
}
47+
return sum
48+
}
49+
50+
type blockSizesHistogram [12]int
51+
52+
func (mb *mockBlocks) getBlockSizesHistogram() (h blockSizesHistogram) {
53+
for _, v := range mb.data {
54+
l := len(v.RawData())
55+
switch {
56+
case l <= 2<<2: // 8
57+
h[0]++
58+
case l <= 2<<3: // 16
59+
h[1]++
60+
case l <= 2<<4: // 32
61+
h[2]++
62+
case l <= 2<<5: // 64
63+
h[3]++
64+
case l <= 2<<6: // 128
65+
h[4]++
66+
case l <= 2<<7: // 256
67+
h[5]++
68+
case l <= 2<<8: // 512
69+
h[6]++
70+
case l <= 2<<9: // 1024
71+
h[7]++
72+
case l <= 2<<10: // 2048
73+
h[8]++
74+
case l <= 2<<11: // 4096
75+
h[9]++
76+
case l <= 2<<12: // 8192
77+
h[10]++
78+
default:
79+
h[11]++
80+
}
81+
}
82+
return
83+
}
84+
85+
func (h blockSizesHistogram) String() string {
86+
v := "["
87+
v += "<=" + strconv.Itoa(2<<2) + ":" + strconv.Itoa(h[0]) + ", "
88+
v += "<=" + strconv.Itoa(2<<3) + ":" + strconv.Itoa(h[1]) + ", "
89+
v += "<=" + strconv.Itoa(2<<4) + ":" + strconv.Itoa(h[2]) + ", "
90+
v += "<=" + strconv.Itoa(2<<5) + ":" + strconv.Itoa(h[3]) + ", "
91+
v += "<=" + strconv.Itoa(2<<6) + ":" + strconv.Itoa(h[4]) + ", "
92+
v += "<=" + strconv.Itoa(2<<7) + ":" + strconv.Itoa(h[5]) + ", "
93+
v += "<=" + strconv.Itoa(2<<8) + ":" + strconv.Itoa(h[6]) + ", "
94+
v += "<=" + strconv.Itoa(2<<9) + ":" + strconv.Itoa(h[7]) + ", "
95+
v += "<=" + strconv.Itoa(2<<10) + ":" + strconv.Itoa(h[8]) + ", "
96+
v += "<=" + strconv.Itoa(2<<11) + ":" + strconv.Itoa(h[9]) + ", "
97+
v += "<=" + strconv.Itoa(2<<12) + ":" + strconv.Itoa(h[10]) + ", "
98+
v += ">" + strconv.Itoa(2<<12) + ":" + strconv.Itoa(h[11])
99+
return v + "]"
100+
}
101+
41102
func randString() string {
42103
buf := make([]byte, 18)
43104
rand.Read(buf)

0 commit comments

Comments
 (0)