Skip to content

Commit 130f839

Browse files
committed
chore: bring back load testing with nixops
This is just for manual bench tests for now, to ensure a change doesn't cause a performance regression. Adds the following nixops commands to reproduce some bench tests: Note: All of the nixops command need to run on the nix directory (`cd nix`). - max requests for the nginx server: nixops ssh -d pg_net client vegeta-bench-max-requests It's capable of handling ~12k req/s. - reference bench test with the vegeta http client, only using one thread/worker: nixops ssh -d pg_net client vegeta-bench-max-requests Vegeta is able to do ~2.5K req/s. - With a pg_net.batch_size=32000, this uses pg_net for the bench test: nixops ssh -d pg_net client net-bench pg_net reaches 400 req/s max before request errors are reported. Like `Couldn't resolve host name` or `Couldn't connect to server`. These need further investigation, for now keeping the batch_size low is necessary for stability.
1 parent 8f821fb commit 130f839

File tree

4 files changed

+212
-5
lines changed

4 files changed

+212
-5
lines changed

docs/contributing.md

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,48 @@ $ nix-shell
7474
$ sudo with-gdb -p 1145879
7575
```
7676

77+
## Load Testing
78+
79+
This will deploy a client and server on t3a.nano. You must have `default` setup in `.aws/credentials`.
80+
81+
```bash
82+
cd nix
83+
84+
nixops create -d pg_net
85+
86+
# will take a while
87+
nixops deploy -k -d pg_net --allow-reboot --confirm
88+
```
89+
90+
Then you can connect on the client instance and do requests to the server instance through `pg_net`.
91+
92+
```bash
93+
cd nix
94+
95+
nixops ssh -d pg_net client
96+
97+
psql -U postgres
98+
99+
create extension pg_net;
100+
101+
select net.http_get('http://server');
102+
# this the default welcome page of nginx on the server instance
103+
# "server" is already included to /etc/hosts, so `curl http://server` will give the same result
104+
105+
# do some load testing
106+
select net.http_get('http://server') from generate_series(1,1000);
107+
# run `top` on another shell(another `nixops ssh -d pg_net client`) to check the worker behavior
108+
```
109+
110+
To destroy the instances:
111+
112+
```bash
113+
cd nix
114+
115+
nixops destroy -d pg_net --confirm
116+
nixops delete -d pg_net
117+
```
118+
77119
## Documentation
78120
79121
All public API must be documented. Building documentation requires python 3.6+

nix/nginxScript.nix renamed to nix/nginxCustom.nix

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,15 @@ let
77
src = fetchFromGitHub {
88
owner = "steve-chavez";
99
repo = name;
10-
rev = "5eae52b15b0785765c5de17ede774f04cd60729d";
11-
sha256 = "sha256-oDvEZ2OVnM8lePYBUkQa294FLcLnxYMpE40S4XmqdBY=";
10+
rev = "668126a815daaf741433409a5afff5932e2fb2af";
11+
sha256 = "sha256-tl7NoPlQCN9DDYQLRrHA3bP5csqbXUW9ozLKPbH2dfI=";
1212
};
1313
meta = with lib; {
1414
license = with licenses; [ mit ];
1515
};
1616
};
1717
customNginx = nginx.override {
18+
configureFlags = ["--with-cc='c99'"];
1819
modules = [
1920
nginxModules.echo
2021
ngx_pathological
@@ -32,4 +33,7 @@ let
3233
"$@"
3334
'';
3435
in
35-
writeShellScriptBin "net-with-nginx" script
36+
{
37+
customNginx = customNginx;
38+
nginxScript = writeShellScriptBin "net-with-nginx" script;
39+
}

nix/nixops.nix

Lines changed: 160 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,160 @@
1+
let
2+
region = "us-east-2";
3+
accessKeyId = "default";
4+
in {
5+
network.storage.legacy = {
6+
databasefile = ".deployments.nixops";
7+
};
8+
9+
network.description = "pg_net load testing setup";
10+
11+
resources = {
12+
ec2KeyPairs.netKP = { inherit region accessKeyId; };
13+
vpc.netVpc = {
14+
inherit region accessKeyId;
15+
enableDnsSupport = true;
16+
enableDnsHostnames = true;
17+
cidrBlock = "10.0.0.0/24";
18+
};
19+
vpcSubnets.netSubnet = {resources, ...}: {
20+
inherit region accessKeyId;
21+
zone = "${region}a";
22+
vpcId = resources.vpc.netVpc;
23+
cidrBlock = "10.0.0.0/24";
24+
mapPublicIpOnLaunch = true;
25+
};
26+
vpcInternetGateways.netIG = { resources, ... }: {
27+
inherit region accessKeyId;
28+
vpcId = resources.vpc.netVpc;
29+
};
30+
vpcRouteTables.netRT = { resources, ... }: {
31+
inherit region accessKeyId;
32+
vpcId = resources.vpc.netVpc;
33+
};
34+
vpcRoutes.netIGRoute = { resources, ... }: {
35+
inherit region accessKeyId;
36+
routeTableId = resources.vpcRouteTables.netRT;
37+
destinationCidrBlock = "0.0.0.0/0";
38+
gatewayId = resources.vpcInternetGateways.netIG;
39+
};
40+
vpcRouteTableAssociations.netTblAssoc = { resources, ... }: {
41+
inherit region accessKeyId;
42+
subnetId = resources.vpcSubnets.netSubnet;
43+
routeTableId = resources.vpcRouteTables.netRT;
44+
};
45+
ec2SecurityGroups.netSecGroup = {resources, ...}: {
46+
inherit region accessKeyId;
47+
vpcId = resources.vpc.netVpc;
48+
rules = [
49+
{ fromPort = 80; toPort = 80; sourceIp = "0.0.0.0/0"; }
50+
{ fromPort = 22; toPort = 22; sourceIp = "0.0.0.0/0"; }
51+
{ fromPort = 0; toPort = 65535; sourceIp = resources.vpcSubnets.netSubnet.cidrBlock; }
52+
];
53+
};
54+
};
55+
56+
server = { config, pkgs, resources, ... }: {
57+
deployment = {
58+
targetEnv = "ec2";
59+
ec2 = {
60+
inherit region accessKeyId;
61+
instanceType = "t3a.micro";
62+
associatePublicIpAddress = true;
63+
keyPair = resources.ec2KeyPairs.netKP;
64+
subnetId = resources.vpcSubnets.netSubnet;
65+
securityGroupIds = [resources.ec2SecurityGroups.netSecGroup.name];
66+
};
67+
};
68+
69+
services.nginx = {
70+
enable = true;
71+
package = (pkgs.callPackage ./nginxCustom.nix {}).customNginx;
72+
config = ''
73+
worker_processes auto;
74+
events {
75+
worker_connections 1024;
76+
}
77+
http {
78+
server {
79+
listen 0.0.0.0:80 ;
80+
listen [::]:80 ;
81+
server_name localhost;
82+
${builtins.readFile nginx/conf/custom.conf}
83+
}
84+
}
85+
'';
86+
};
87+
networking.firewall.allowedTCPPorts = [ 80 ];
88+
};
89+
90+
client = { config, pkgs, nodes, resources, ... }: {
91+
deployment = {
92+
targetEnv = "ec2";
93+
ec2 = {
94+
inherit region accessKeyId;
95+
instanceType = "t3a.micro";
96+
associatePublicIpAddress = true;
97+
ebsInitialRootDiskSize = 6;
98+
keyPair = resources.ec2KeyPairs.netKP;
99+
subnetId = resources.vpcSubnets.netSubnet;
100+
securityGroupIds = [resources.ec2SecurityGroups.netSecGroup.name];
101+
};
102+
};
103+
104+
services.postgresql = {
105+
enable = true;
106+
package = pkgs.postgresql_15.withPackages (p: [
107+
(pkgs.callPackage ./pg_net.nix { postgresql = pkgs.postgresql_15;})
108+
]);
109+
authentication = pkgs.lib.mkOverride 10 ''
110+
local all all trust
111+
'';
112+
settings = {
113+
shared_preload_libraries = "pg_net";
114+
};
115+
initialScript = pkgs.writeText "init-sql-script" ''
116+
create extension pg_net;
117+
118+
alter system set pg_net.batch_size to 32000;
119+
120+
select net.worker_restart();
121+
122+
create view pg_net_stats as
123+
select
124+
count(*) filter (where error_msg is null) as request_successes,
125+
count(*) filter (where error_msg is not null) as request_failures,
126+
(select error_msg from net._http_response where error_msg is not null order by id desc limit 1) as last_failure_error
127+
from net._http_response;
128+
'';
129+
};
130+
131+
networking.hosts = {
132+
"${nodes.server.config.networking.privateIPv4}" = [ "server" ];
133+
};
134+
135+
environment.systemPackages = [
136+
pkgs.vegeta
137+
(
138+
pkgs.writeShellScriptBin "vegeta-bench" ''
139+
# rate=0 means maximum rate subject to max-workers
140+
echo "GET http://server/pathological?status=200" | vegeta attack -rate=0 -duration=1s -max-workers=1 | tee results.bin | vegeta report
141+
''
142+
)
143+
(
144+
pkgs.writeShellScriptBin "vegeta-bench-max-requests" ''
145+
# rate=0 means maximum rate subject to max-workers
146+
echo "GET http://server/pathological?status=200" | vegeta attack -rate=0 -duration=10s -max-workers=50 | tee results.bin | vegeta report
147+
''
148+
)
149+
(
150+
pkgs.writeShellScriptBin "net-bench" ''
151+
psql -U postgres -c "truncate net._http_response;"
152+
psql -U postgres -c "select net.http_get('http://server/pathological?status=200') from generate_series(1, 400);" > /dev/null
153+
sleep 2
154+
psql -U postgres -c "select * from pg_net_stats;"
155+
''
156+
)
157+
];
158+
};
159+
160+
}

shell.nix

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ mkShell {
1515
];
1616
pgWithExt = { pg }: pg.withPackages (p: [ (callPackage ./nix/pg_net.nix { postgresql = pg;}) ]);
1717
extAll = map (x: callPackage ./nix/pgScript.nix { postgresql = pgWithExt { pg = x;}; }) supportedPgVersions;
18-
nginxScript = callPackage ./nix/nginxScript.nix {};
18+
nginxCustom = callPackage ./nix/nginxCustom.nix {};
1919
gdbScript = callPackage ./nix/gdbScript.nix {};
2020
pythonDeps = with python3Packages; [
2121
pytest
@@ -28,8 +28,9 @@ mkShell {
2828
extAll
2929
pythonDeps
3030
format.do format.doCheck
31-
nginxScript
31+
nginxCustom.nginxScript
3232
gdbScript
33+
(pkgs.nixops_unstable_minimal.withPlugins (ps: [ ps.nixops-aws ]))
3334
];
3435
shellHook = ''
3536
export HISTFILE=.history

0 commit comments

Comments
 (0)