|
1 | 1 | package test
|
2 | 2 |
|
3 | 3 | import (
|
4 |
| - "context" |
5 | 4 | "fmt"
|
6 |
| - "sort" |
7 | 5 | "testing"
|
8 | 6 |
|
9 | 7 | pstore "github.com/libp2p/go-libp2p/core/peerstore"
|
10 | 8 | )
|
11 | 9 |
|
12 |
| -var peerstoreBenchmarks = map[string]func(pstore.Peerstore, chan *peerpair) func(*testing.B){ |
13 |
| - "AddAddrs": benchmarkAddAddrs, |
14 |
| - "SetAddrs": benchmarkSetAddrs, |
15 |
| - "GetAddrs": benchmarkGetAddrs, |
16 |
| - // The in-between get allows us to benchmark the read-through cache. |
17 |
| - "AddGetAndClearAddrs": benchmarkAddGetAndClearAddrs, |
18 |
| - // Calls PeersWithAddr on a peerstore with 1000 peers. |
19 |
| - "Get1000PeersWithAddrs": benchmarkGet1000PeersWithAddrs, |
20 |
| -} |
21 |
| - |
22 | 10 | func BenchmarkPeerstore(b *testing.B, factory PeerstoreFactory, variant string) {
|
23 |
| - // Parameterises benchmarks to tackle peers with 1, 10, 100 multiaddrs. |
24 |
| - params := []struct { |
25 |
| - n int |
26 |
| - ch chan *peerpair |
27 |
| - }{ |
28 |
| - {1, make(chan *peerpair, 100)}, |
29 |
| - {10, make(chan *peerpair, 100)}, |
30 |
| - {100, make(chan *peerpair, 100)}, |
31 |
| - } |
32 |
| - |
33 |
| - ctx, cancel := context.WithCancel(context.Background()) |
34 |
| - defer cancel() |
35 |
| - |
36 |
| - // Start all test peer producing goroutines, where each produces peers with as many |
37 |
| - // multiaddrs as the n field in the param struct. |
38 |
| - for _, p := range params { |
39 |
| - go AddressProducer(ctx, b, p.ch, p.n) |
40 |
| - } |
41 |
| - |
42 |
| - // So tests are always run in the same order. |
43 |
| - ordernames := make([]string, 0, len(peerstoreBenchmarks)) |
44 |
| - for name := range peerstoreBenchmarks { |
45 |
| - ordernames = append(ordernames, name) |
46 |
| - } |
47 |
| - sort.Strings(ordernames) |
48 |
| - |
49 |
| - for _, name := range ordernames { |
50 |
| - bench := peerstoreBenchmarks[name] |
51 |
| - for _, p := range params { |
52 |
| - // Create a new peerstore. |
53 |
| - ps, closeFunc := factory() |
54 |
| - |
55 |
| - // Run the test. |
56 |
| - b.Run(fmt.Sprintf("%s-%dAddrs-%s", name, p.n, variant), bench(ps, p.ch)) |
57 |
| - |
58 |
| - // Cleanup. |
59 |
| - if closeFunc != nil { |
60 |
| - closeFunc() |
| 11 | + for _, sz := range []int{1, 10, 100} { |
| 12 | + const N = 10000 |
| 13 | + peers := getPeerPairs(b, N, sz) |
| 14 | + |
| 15 | + b.Run(fmt.Sprintf("AddAddrs-%d", sz), func(b *testing.B) { |
| 16 | + ps, cleanup := factory() |
| 17 | + defer cleanup() |
| 18 | + b.ResetTimer() |
| 19 | + for i := 0; i < b.N; i++ { |
| 20 | + pp := peers[i%N] |
| 21 | + ps.AddAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL) |
61 | 22 | }
|
62 |
| - } |
63 |
| - } |
64 |
| -} |
65 |
| - |
66 |
| -func benchmarkAddAddrs(ps pstore.Peerstore, addrs chan *peerpair) func(*testing.B) { |
67 |
| - return func(b *testing.B) { |
68 |
| - b.ResetTimer() |
69 |
| - for i := 0; i < b.N; i++ { |
70 |
| - pp := <-addrs |
71 |
| - ps.AddAddrs(pp.ID, pp.Addr, pstore.PermanentAddrTTL) |
72 |
| - } |
73 |
| - } |
74 |
| -} |
75 |
| - |
76 |
| -func benchmarkSetAddrs(ps pstore.Peerstore, addrs chan *peerpair) func(*testing.B) { |
77 |
| - return func(b *testing.B) { |
78 |
| - b.ResetTimer() |
79 |
| - for i := 0; i < b.N; i++ { |
80 |
| - pp := <-addrs |
81 |
| - ps.SetAddrs(pp.ID, pp.Addr, pstore.PermanentAddrTTL) |
82 |
| - } |
83 |
| - } |
84 |
| -} |
85 |
| - |
86 |
| -func benchmarkGetAddrs(ps pstore.Peerstore, addrs chan *peerpair) func(*testing.B) { |
87 |
| - return func(b *testing.B) { |
88 |
| - pp := <-addrs |
89 |
| - ps.SetAddrs(pp.ID, pp.Addr, pstore.PermanentAddrTTL) |
90 |
| - |
91 |
| - b.ResetTimer() |
92 |
| - for i := 0; i < b.N; i++ { |
93 |
| - _ = ps.Addrs(pp.ID) |
94 |
| - } |
95 |
| - } |
96 |
| -} |
97 |
| - |
98 |
| -func benchmarkAddGetAndClearAddrs(ps pstore.Peerstore, addrs chan *peerpair) func(*testing.B) { |
99 |
| - return func(b *testing.B) { |
100 |
| - b.ResetTimer() |
101 |
| - for i := 0; i < b.N; i++ { |
102 |
| - pp := <-addrs |
103 |
| - ps.AddAddrs(pp.ID, pp.Addr, pstore.PermanentAddrTTL) |
104 |
| - ps.Addrs(pp.ID) |
105 |
| - ps.ClearAddrs(pp.ID) |
106 |
| - } |
107 |
| - } |
108 |
| -} |
| 23 | + }) |
| 24 | + |
| 25 | + b.Run(fmt.Sprintf("GetAddrs-%d", sz), func(b *testing.B) { |
| 26 | + ps, cleanup := factory() |
| 27 | + defer cleanup() |
| 28 | + b.ResetTimer() |
| 29 | + for i := 0; i < b.N; i++ { |
| 30 | + pp := peers[i%N] |
| 31 | + ps.SetAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL) |
| 32 | + } |
| 33 | + }) |
| 34 | + |
| 35 | + b.Run(fmt.Sprintf("GetAndClearAddrs-%d", sz), func(b *testing.B) { |
| 36 | + ps, cleanup := factory() |
| 37 | + defer cleanup() |
| 38 | + b.ResetTimer() |
| 39 | + for i := 0; i < b.N; i++ { |
| 40 | + pp := peers[i%N] |
| 41 | + ps.AddAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL) |
| 42 | + ps.Addrs(pp.ID) |
| 43 | + ps.ClearAddrs(pp.ID) |
| 44 | + } |
| 45 | + }) |
109 | 46 |
|
110 |
| -func benchmarkGet1000PeersWithAddrs(ps pstore.Peerstore, addrs chan *peerpair) func(*testing.B) { |
111 |
| - return func(b *testing.B) { |
112 |
| - var peers = make([]*peerpair, 1000) |
113 |
| - for i := range peers { |
114 |
| - pp := <-addrs |
115 |
| - ps.AddAddrs(pp.ID, pp.Addr, pstore.PermanentAddrTTL) |
116 |
| - peers[i] = pp |
117 |
| - } |
| 47 | + b.Run(fmt.Sprintf("PeersWithAddrs-%d", sz), func(b *testing.B) { |
| 48 | + ps, cleanup := factory() |
| 49 | + defer cleanup() |
| 50 | + for _, pp := range peers { |
| 51 | + ps.AddAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL) |
| 52 | + } |
118 | 53 |
|
119 |
| - b.ResetTimer() |
120 |
| - for i := 0; i < b.N; i++ { |
121 |
| - _ = ps.PeersWithAddrs() |
122 |
| - } |
| 54 | + b.ResetTimer() |
| 55 | + for i := 0; i < b.N; i++ { |
| 56 | + _ = ps.PeersWithAddrs() |
| 57 | + } |
| 58 | + }) |
| 59 | + |
| 60 | + b.Run(fmt.Sprintf("SetAddrs-%d", sz), func(b *testing.B) { |
| 61 | + ps, cleanup := factory() |
| 62 | + defer cleanup() |
| 63 | + b.ResetTimer() |
| 64 | + for i := 0; i < b.N; i++ { |
| 65 | + pp := peers[i%N] |
| 66 | + ps.SetAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL) |
| 67 | + } |
| 68 | + }) |
123 | 69 | }
|
124 | 70 | }
|
0 commit comments