Skip to content

Commit b7949cc

Browse files
authored
test: add envoy bench (#120)
1 parent ca3d5c8 commit b7949cc

File tree

4 files changed

+185
-80
lines changed

4 files changed

+185
-80
lines changed

compose.latency.yaml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,3 +101,12 @@ services:
101101
condition: service_healthy
102102
node9:
103103
condition: service_healthy
104+
envoy:
105+
image: envoyproxy/envoy:v1.23.1
106+
ports:
107+
- "7000:10001"
108+
volumes:
109+
- ./test/proxy/envoy.yaml:/etc/envoy/envoy.yaml
110+
depends_on:
111+
clustering:
112+
condition: service_completed_successfully

test/bench_command.rb

Lines changed: 48 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -1,86 +1,11 @@
11
# frozen_string_literal: true
22

33
require 'benchmark_helper'
4+
require 'benchmark_mixin'
45

56
class BenchCommand
6-
module Mixin
7-
def setup
8-
@client = new_test_client
9-
@client.call('FLUSHDB')
10-
wait_for_replication
11-
end
12-
13-
def teardown
14-
@client.call('FLUSHDB')
15-
wait_for_replication
16-
@client&.close
17-
end
18-
19-
def bench_echo
20-
assert_performance_linear do |n|
21-
n.times do
22-
@client.call('ECHO', 'Hello world')
23-
end
24-
end
25-
end
26-
27-
def bench_set
28-
assert_performance_linear do |n|
29-
n.times do |i|
30-
@client.call('SET', "key#{i}", i)
31-
end
32-
end
33-
end
34-
35-
def bench_get
36-
assert_performance_linear do |n|
37-
n.times do |i|
38-
@client.call('GET', "key#{i}")
39-
end
40-
end
41-
end
42-
43-
def bench_pipeline_echo
44-
assert_performance_linear do |n|
45-
@client.pipelined do |pi|
46-
n.times do
47-
pi.call('ECHO', 'Hello world')
48-
end
49-
end
50-
end
51-
end
52-
53-
def bench_pipeline_set
54-
assert_performance_linear do |n|
55-
@client.pipelined do |pi|
56-
n.times do |i|
57-
pi.call('SET', "key#{i}", i)
58-
end
59-
end
60-
end
61-
end
62-
63-
def bench_pipeline_get
64-
assert_performance_linear do |n|
65-
@client.pipelined do |pi|
66-
n.times do |i|
67-
pi.call('GET', "key#{i}")
68-
end
69-
end
70-
end
71-
end
72-
73-
private
74-
75-
def wait_for_replication
76-
client_side_timeout = TEST_TIMEOUT_SEC + 1.0
77-
server_side_timeout = (TEST_TIMEOUT_SEC * 1000).to_i
78-
@client.blocking_call(client_side_timeout, 'WAIT', TEST_REPLICA_SIZE, server_side_timeout)
79-
end
80-
end
81-
827
class PrimaryOnly < BenchmarkWrapper
83-
include Mixin
8+
include BenchmarkMixin
849

8510
private
8611

@@ -95,7 +20,7 @@ def new_test_client
9520
end
9621

9722
class ScaleReadRandom < BenchmarkWrapper
98-
include Mixin
23+
include BenchmarkMixin
9924

10025
private
10126

@@ -112,7 +37,7 @@ def new_test_client
11237
end
11338

11439
class ScaleReadLatency < BenchmarkWrapper
115-
include Mixin
40+
include BenchmarkMixin
11641

11742
private
11843

@@ -129,7 +54,7 @@ def new_test_client
12954
end
13055

13156
class Pooled < BenchmarkWrapper
132-
include Mixin
57+
include BenchmarkMixin
13358

13459
private
13560

@@ -142,4 +67,47 @@ def new_test_client
14267
::RedisClient::Cluster.new(config, pool: { timeout: TEST_TIMEOUT_SEC, size: 2 })
14368
end
14469
end
70+
71+
class Envoy < BenchmarkWrapper
72+
include BenchmarkMixin
73+
74+
def setup
75+
@client = new_test_client
76+
@cluster_client = new_cluster_client
77+
@cluster_client.call('FLUSHDB')
78+
wait_for_replication
79+
end
80+
81+
def teardown
82+
@cluster_client.call('FLUSHDB')
83+
wait_for_replication
84+
@cluster_client&.close
85+
@client&.close
86+
end
87+
88+
# https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/other_protocols/redis#supported-commands
89+
def bench_echo
90+
skip('Envoy does not support ECHO command.')
91+
end
92+
93+
def bench_pipeline_echo
94+
skip('Envoy does not support ECHO command.')
95+
end
96+
97+
private
98+
99+
def new_test_client
100+
::RedisClient.config(**TEST_GENERIC_OPTIONS.merge(port: 7000, protocol: 2)).new_client
101+
end
102+
103+
def new_cluster_client
104+
::RedisClient.cluster(nodes: TEST_NODE_URIS, fixed_hostname: TEST_FIXED_HOSTNAME, **TEST_GENERIC_OPTIONS).new_client
105+
end
106+
107+
def wait_for_replication
108+
client_side_timeout = TEST_TIMEOUT_SEC + 1.0
109+
server_side_timeout = (TEST_TIMEOUT_SEC * 1000).to_i
110+
@cluster_client.blocking_call(client_side_timeout, 'WAIT', TEST_REPLICA_SIZE, server_side_timeout)
111+
end
112+
end
145113
end

test/benchmark_mixin.rb

Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
# frozen_string_literal: true
2+
3+
module BenchmarkMixin
4+
def setup
5+
@client = new_test_client
6+
@client.call('FLUSHDB')
7+
wait_for_replication
8+
end
9+
10+
def teardown
11+
@client.call('FLUSHDB')
12+
wait_for_replication
13+
@client&.close
14+
end
15+
16+
def bench_echo
17+
assert_performance_linear do |n|
18+
n.times do
19+
@client.call('ECHO', 'Hello world')
20+
end
21+
end
22+
end
23+
24+
def bench_set
25+
assert_performance_linear do |n|
26+
n.times do |i|
27+
@client.call('SET', "key#{i}", i)
28+
end
29+
end
30+
end
31+
32+
def bench_get
33+
assert_performance_linear do |n|
34+
n.times do |i|
35+
@client.call('GET', "key#{i}")
36+
end
37+
end
38+
end
39+
40+
def bench_pipeline_echo
41+
assert_performance_linear do |n|
42+
@client.pipelined do |pi|
43+
n.times do
44+
pi.call('ECHO', 'Hello world')
45+
end
46+
end
47+
end
48+
end
49+
50+
def bench_pipeline_set
51+
assert_performance_linear do |n|
52+
@client.pipelined do |pi|
53+
n.times do |i|
54+
pi.call('SET', "key#{i}", i)
55+
end
56+
end
57+
end
58+
end
59+
60+
def bench_pipeline_get
61+
assert_performance_linear do |n|
62+
@client.pipelined do |pi|
63+
n.times do |i|
64+
pi.call('GET', "key#{i}")
65+
end
66+
end
67+
end
68+
end
69+
70+
private
71+
72+
def wait_for_replication
73+
client_side_timeout = TEST_TIMEOUT_SEC + 1.0
74+
server_side_timeout = (TEST_TIMEOUT_SEC * 1000).to_i
75+
@client.blocking_call(client_side_timeout, 'WAIT', TEST_REPLICA_SIZE, server_side_timeout)
76+
end
77+
end

test/proxy/envoy.yaml

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
---
2+
# https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/examples
3+
# https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/other_protocols/redis
4+
# https://github.com/envoyproxy/envoy/blob/main/examples/redis/envoy.yaml
5+
admin:
6+
address:
7+
socket_address:
8+
protocol: TCP
9+
address: 0.0.0.0
10+
port_value: 10000
11+
static_resources:
12+
listeners:
13+
- name: redis
14+
address:
15+
socket_address:
16+
address: 0.0.0.0
17+
port_value: 10001
18+
filter_chains:
19+
- filters:
20+
# https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/redis_proxy/v3/redis_proxy.proto
21+
- name: envoy.filters.network.redis_proxy
22+
typed_config:
23+
"@type": type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy
24+
stat_prefix: egress_redis
25+
settings:
26+
op_timeout: 5s
27+
enable_hashtagging: true
28+
enable_redirection: true
29+
read_policy: PREFER_REPLICA
30+
prefix_routes:
31+
catch_all_route:
32+
cluster: redis
33+
clusters:
34+
# https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/clusters/redis/v3/redis_cluster.proto
35+
- name: redis
36+
connect_timeout: 5s
37+
dns_lookup_family: V4_ONLY
38+
cluster_type:
39+
name: envoy.clusters.redis
40+
typed_config:
41+
"@type": type.googleapis.com/google.protobuf.Struct
42+
value:
43+
cluster_refresh_rate: 5s
44+
cluster_refresh_timeout: 4.5s
45+
redirect_refresh_interval: 5s
46+
redirect_refresh_threshold: 3
47+
load_assignment:
48+
cluster_name: redis
49+
endpoints:
50+
- lb_endpoints:
51+
- endpoint: { address: { socket_address: { address: node1, port_value: 6379 } } }

0 commit comments

Comments
 (0)