Skip to content

Commit 5b9627f

Browse files
andre-j3susleokondrashov
authored andcommitted
Add tests for stargz snapshotter
Fix LinkCheck job and fix dead links. Signed-off-by: André Jesus <[email protected]>
1 parent 6e036f9 commit 5b9627f

File tree

13 files changed

+79
-29
lines changed

13 files changed

+79
-29
lines changed

.github/workflows/linters.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ jobs:
4040
runs-on: ubuntu-24.04
4141
steps:
4242
- uses: actions/checkout@v4
43-
- uses: gaurav-nelson/github-action-markdown-link-check@v1
43+
- uses: tcort/github-action-markdown-link-check@v1
4444
with:
4545
use-quiet-mode: 'yes'
4646
config-file: 'configs/.linkcheck.json'

.github/workflows/unit_tests.yml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,10 +145,13 @@ jobs:
145145
146146
- name: Build setup scripts
147147
run: pushd scripts && go build -o setup_tool && popd
148-
148+
149149
- name: Pull binaries
150150
run: ./scripts/setup_tool setup_firecracker_containerd
151151

152+
- name: Setup Stargz
153+
run: ./scripts/setup_tool setup_stargz firecracker
154+
152155
- name: Build
153156
run: go build -race -v -a ./...
154157

configs/knative_workloads/helloworld-stargz.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,4 @@ spec:
1414
- name: GUEST_PORT # Port on which the firecracker-containerd container is accepting requests
1515
value: "50051"
1616
- name: GUEST_IMAGE # Container image to use for firecracker-containerd container
17-
value: "ghcr.io/andre-j3sus/helloworld:var_workload-esgz"
17+
value: "ghcr.io/vhive-serverless/helloworld:var_workload-esgz"

ctriface/Makefile

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,18 @@ BENCHFILES:=bench_test.go iface.go orch_options.go orch.go
2828
# WITHLAZY:=-lazy
2929
WITHUPF:=
3030
WITHLAZY:=
31+
STARGZ:=-ss 'proxy'
32+
STARGZ_IMAGE:=-img 'ghcr.io/vhive-serverless/helloworld:var_workload-esgz'
33+
DOCKER_CREDENTIALS:=-dockerCredentials '{"docker-credentials":{"ghcr.io":{"username":"","password":""}}}'
3134
GOBENCH:=-v -timeout 1500s
3235
CTRDLOGDIR:=/tmp/ctrd-logs
3336

3437
test:
38+
./../scripts/clean_fcctr.sh
39+
sudo env "PATH=$(PATH)" /usr/local/bin/http-address-resolver &
40+
sudo env "PATH=$(PATH)" /bin/bash -c 'while true; do /usr/local/bin/demux-snapshotter; done' &
41+
sudo mkdir -m777 -p $(CTRDLOGDIR) && sudo env "PATH=$(PATH)" /usr/local/bin/firecracker-containerd --config /etc/firecracker-containerd/config.toml 1>$(CTRDLOGDIR)/ctriface_log.out 2>$(CTRDLOGDIR)/ctriface_log.err &
42+
sudo env "PATH=$(PATH)" go test $(EXTRATESTFILES) $(EXTRAGOARGS) -args $(STARGZ) $(DOCKER_CREDENTIALS) $(STARGZ_IMAGE)
3543
./../scripts/clean_fcctr.sh
3644
sudo mkdir -m777 -p $(CTRDLOGDIR) && sudo env "PATH=$(PATH)" /usr/local/bin/firecracker-containerd --config /etc/firecracker-containerd/config.toml 1>$(CTRDLOGDIR)/ctriface_log.out 2>$(CTRDLOGDIR)/ctriface_log.err &
3745
sudo env "PATH=$(PATH)" go test $(EXTRATESTFILES) $(EXTRAGOARGS)

ctriface/iface_test.go

Lines changed: 38 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,10 @@ var (
4242
isUPFEnabled = flag.Bool("upf", false, "Set UPF enabled")
4343
isLazyMode = flag.Bool("lazy", false, "Set lazy serving on or off")
4444
//nolint:deadcode,unused,varcheck
45-
isWithCache = flag.Bool("withCache", false, "Do not drop the cache before measurements")
45+
isWithCache = flag.Bool("withCache", false, "Do not drop the cache before measurements")
46+
snapshotter = flag.String("ss", "devmapper", "Snapshotter to use")
47+
dockerCredentials = flag.String("dockerCredentials", "", "Docker credentials for pulling images from inside a microVM")
48+
testImage = flag.String("img", testImageName, "Test image")
4649
)
4750

4851
func TestMain(m *testing.M) {
@@ -71,18 +74,23 @@ func TestStartSnapStopLoad(t *testing.T) {
7174
ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), namespaceName), testTimeout)
7275
defer cancel()
7376

74-
orch := NewOrchestrator("devmapper", "", WithTestModeOn(true))
77+
orch := NewOrchestrator(
78+
*snapshotter,
79+
"",
80+
WithTestModeOn(true),
81+
WithDockerCredentials(*dockerCredentials),
82+
)
7583

7684
vmID := "2"
7785
revision := "myrev-2"
7886

79-
_, _, err := orch.StartVM(ctx, vmID, testImageName)
87+
_, _, err := orch.StartVM(ctx, vmID, *testImage)
8088
require.NoError(t, err, "Failed to start VM")
8189

8290
err = orch.PauseVM(ctx, vmID)
8391
require.NoError(t, err, "Failed to pause VM")
8492

85-
snap := snapshotting.NewSnapshot(revision, "/fccd/snapshots", testImageName)
93+
snap := snapshotting.NewSnapshot(revision, "/fccd/snapshots", *testImage)
8694
err = snap.CreateSnapDir()
8795
require.NoError(t, err, "Failed to create snapshots directory")
8896

@@ -121,23 +129,24 @@ func TestPauseSnapResume(t *testing.T) {
121129
defer cancel()
122130

123131
orch := NewOrchestrator(
124-
"devmapper",
132+
*snapshotter,
125133
"",
126134
WithTestModeOn(true),
127135
WithUPF(*isUPFEnabled),
128136
WithLazyMode(*isLazyMode),
137+
WithDockerCredentials(*dockerCredentials),
129138
)
130139

131140
vmID := "4"
132141
revision := "myrev-4"
133142

134-
_, _, err := orch.StartVM(ctx, vmID, testImageName)
143+
_, _, err := orch.StartVM(ctx, vmID, *testImage)
135144
require.NoError(t, err, "Failed to start VM")
136145

137146
err = orch.PauseVM(ctx, vmID)
138147
require.NoError(t, err, "Failed to pause VM")
139148

140-
snap := snapshotting.NewSnapshot(revision, "/fccd/snapshots", testImageName)
149+
snap := snapshotting.NewSnapshot(revision, "/fccd/snapshots", *testImage)
141150
err = snap.CreateSnapDir()
142151
require.NoError(t, err, "Failed to create snapshots directory")
143152

@@ -170,16 +179,17 @@ func TestStartStopSerial(t *testing.T) {
170179
defer cancel()
171180

172181
orch := NewOrchestrator(
173-
"devmapper",
182+
*snapshotter,
174183
"",
175184
WithTestModeOn(true),
176185
WithUPF(*isUPFEnabled),
177186
WithLazyMode(*isLazyMode),
187+
WithDockerCredentials(*dockerCredentials),
178188
)
179189

180190
vmID := "5"
181191

182-
_, _, err := orch.StartVM(ctx, vmID, testImageName)
192+
_, _, err := orch.StartVM(ctx, vmID, *testImage)
183193
require.NoError(t, err, "Failed to start VM")
184194

185195
err = orch.StopSingleVM(ctx, vmID)
@@ -204,16 +214,17 @@ func TestPauseResumeSerial(t *testing.T) {
204214
defer cancel()
205215

206216
orch := NewOrchestrator(
207-
"devmapper",
217+
*snapshotter,
208218
"",
209219
WithTestModeOn(true),
210220
WithUPF(*isUPFEnabled),
211221
WithLazyMode(*isLazyMode),
222+
WithDockerCredentials(*dockerCredentials),
212223
)
213224

214225
vmID := "6"
215226

216-
_, _, err := orch.StartVM(ctx, vmID, testImageName)
227+
_, _, err := orch.StartVM(ctx, vmID, *testImage)
217228
require.NoError(t, err, "Failed to start VM")
218229

219230
err = orch.PauseVM(ctx, vmID)
@@ -247,16 +258,19 @@ func TestStartStopParallel(t *testing.T) {
247258
vmIDBase := 7
248259

249260
orch := NewOrchestrator(
250-
"devmapper",
261+
*snapshotter,
251262
"",
252263
WithTestModeOn(true),
253264
WithUPF(*isUPFEnabled),
254265
WithLazyMode(*isLazyMode),
266+
WithDockerCredentials(*dockerCredentials),
255267
)
256268

257-
// Pull image
258-
_, err := orch.getImage(ctx, testImageName)
259-
require.NoError(t, err, "Failed to pull image "+testImageName)
269+
if *snapshotter != "proxy" {
270+
// Pull image (with remote snapshotters you can't pull the image before starting the VM)
271+
_, err := orch.getImage(ctx, *testImage)
272+
require.NoError(t, err, "Failed to pull image "+*testImage)
273+
}
260274

261275
{
262276
var vmGroup sync.WaitGroup
@@ -265,7 +279,7 @@ func TestStartStopParallel(t *testing.T) {
265279
go func(i int) {
266280
defer vmGroup.Done()
267281
vmID := fmt.Sprintf("%d", i)
268-
_, _, err := orch.StartVM(ctx, vmID, testImageName)
282+
_, _, err := orch.StartVM(ctx, vmID, *testImage)
269283
require.NoError(t, err, "Failed to start VM "+vmID)
270284
}(i)
271285
}
@@ -308,16 +322,19 @@ func TestPauseResumeParallel(t *testing.T) {
308322
vmIDBase := 17
309323

310324
orch := NewOrchestrator(
311-
"devmapper",
325+
*snapshotter,
312326
"",
313327
WithTestModeOn(true),
314328
WithUPF(*isUPFEnabled),
315329
WithLazyMode(*isLazyMode),
330+
WithDockerCredentials(*dockerCredentials),
316331
)
317332

318-
// Pull image
319-
_, err := orch.getImage(ctx, testImageName)
320-
require.NoError(t, err, "Failed to pull image "+testImageName)
333+
if *snapshotter != "proxy" {
334+
// Pull image (with remote snapshotters you can't pull the image before starting the VM)
335+
_, err := orch.getImage(ctx, *testImage)
336+
require.NoError(t, err, "Failed to pull image "+*testImage)
337+
}
321338

322339
{
323340
var vmGroup sync.WaitGroup
@@ -326,7 +343,7 @@ func TestPauseResumeParallel(t *testing.T) {
326343
go func(i int) {
327344
defer vmGroup.Done()
328345
vmID := fmt.Sprintf("%d", i)
329-
_, _, err := orch.StartVM(ctx, vmID, testImageName)
346+
_, _, err := orch.StartVM(ctx, vmID, *testImage)
330347
require.NoError(t, err, "Failed to start VM")
331348
}(i)
332349
}

ctriface/image/manager_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ import (
3939

4040
const (
4141
TestImageName = "ghcr.io/ease-lab/helloworld:var_workload"
42-
StargzImageName = "ghcr.io/andre-j3sus/helloworld:var_workload-esgz"
42+
StargzImageName = "ghcr.io/vhive-serverless/helloworld:var_workload-esgz"
4343
containerdAddress = "/run/firecracker-containerd/containerd.sock"
4444
NamespaceName = "containerd"
4545
)

docs/profiling.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ in a highly multi-tenant environment. The tool relies on
66
that arise when colocating VMs on a single host. The tool issues requests to VMs in Round-Robin
77
and collects various high-level and low-level metrics, including requests-per-second (RPS)
88
per core, tail latency, hardware counters, plotting the collected metrics in a set of charts.
9-
An example chart is shown in the [section](###TestProfileIncrementConfiguration-function).
9+
An example chart is shown in the [section](#testprofileincrementconfiguration-function).
1010

1111
## Methodology
1212

power_manager/util.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,4 +32,4 @@ func SetPowerProfileToNode(powerprofileName string, nodeName string, minFreq int
3232
return err
3333
}
3434
return nil
35-
}
35+
}

scripts/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ To use other vHive repos locally, provide the `--vhive-repo-dir` option to speci
5959
./setup_tool --vhive-repo-dir <VHIVE REPO PATH> ...
6060
```
6161

62-
If the current directory or the provided path is not a valid vHive repo, the setup_tool will [automatically clone the remote vHive repo and use it](#34-use-with-remote-vhive-repo).
62+
If the current directory or the provided path is not a valid vHive repo, the setup_tool will [automatically clone the remote vHive repo and use it](#34-use-with-remote-vhive-repo-standalone-use).
6363

6464
### 3.4 Use with Remote vHive Repo (Standalone Use)
6565
When the setup_tool is directly downloaded or targeted for standalone use, the setup_tool will automatically clone the remote vHive repo to the temporary directory and then use it during the setup process.

scripts/clean_fcctr.sh

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
2727
echo Killing firecracker agents and VMs
2828
sudo pkill -9 firec
2929
sudo pkill -9 containerd
30+
sudo pkill -f 'while true; do /usr/local/bin/demux-snapshotter'
31+
sudo pkill -f http-address-resolver
3032

3133
echo Resetting nftables
3234
sudo nft flush table ip filter
@@ -73,6 +75,9 @@ sudo rm -rf /run/firecracker-containerd/containerd.sock.ttrpc \
7375
/run/firecracker-containerd/io.containerd.runtime.v2.task \
7476
/run/containerd/*
7577

78+
echo Cleaning /var/lib/demux-snapshotter/*
79+
sudo rm -rf /var/lib/demux-snapshotter/snapshotter.sock
80+
7681
echo Cleaning CNI state, e.g., allocated addresses
7782
sudo rm /var/lib/cni/networks/fcnet*/last_reserved_ip.0 || echo clean already
7883
sudo rm /var/lib/cni/networks/fcnet*/19* || echo clean already

0 commit comments

Comments
 (0)