Skip to content

Commit 6c144bb

Browse files
Merge pull request #66 from Roblox/local_tests
Support running integration tests in local vagrant VM.
2 parents d1c51d3 + 2c28030 commit 6c144bb

File tree

13 files changed

+99
-177
lines changed

13 files changed

+99
-177
lines changed

README.md

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ Docker daemon is not required on the host system.
2424

2525
## Requirements
2626

27-
- [Nomad](https://www.nomadproject.io/downloads.html) >=v0.11
27+
- [Nomad](https://www.nomadproject.io/downloads.html) >=v1.0
2828
- [Go](https://golang.org/doc/install) >=v1.11
2929
- [Containerd](https://containerd.io/downloads/) >=1.3
3030
- [Vagrant](https://www.vagrantup.com/downloads.html) >=v2.2
@@ -90,14 +90,14 @@ More detailed instructions are in the [`example README.md`](https://github.com/R
9090
| **cwd** | string | no | Specify the current working directory for your container process. If the directory does not exist, one will be created for you. |
9191
| **privileged** | bool | no | Run container in privileged mode. Your container will have all linux capabilities when running in privileged mode. |
9292
| **host_dns** | bool | no | Default (`true`). By default, a container launched using `containerd-driver` will use host `/etc/resolv.conf`. This is similar to [`docker behavior`](https://docs.docker.com/config/containers/container-networking/#dns-services). However, if you don't want to use host DNS, you can turn off this flag by setting `host_dns=false`. |
93-
| **seccomp** | bool | no | Enable default seccomp profile. List of [`allowed syscalls`](https://github.com/containerd/containerd/blob/master/contrib/seccomp/seccomp_default.go#L51-L390). |
93+
| **seccomp** | bool | no | Enable default seccomp profile. List of [`allowed syscalls`](https://github.com/containerd/containerd/blob/master/contrib/seccomp/seccomp_default.go#L51-L395). |
9494
| **seccomp_profile** | string | no | Path to custom seccomp profile. `seccomp` must be set to `true` in order to use `seccomp_profile`. The default `docker` seccomp profile found [`here`](https://github.com/moby/moby/blob/master/profiles/seccomp/default.json) can be used as a reference, and modified to create a custom seccomp profile. |
9595
| **readonly_rootfs** | bool | no | Container root filesystem will be read-only. |
9696
| **host_network** | bool | no | Enable host network. This is equivalent to `--net=host` in docker. |
9797
| **cap_add** | []string | no | Add individual capabilities. |
9898
| **cap_drop** | []string | no | Drop invidual capabilities. |
9999
| **devices** | []string | no | A list of devices to be exposed to the container. |
100-
| **mounts** | []block | no | A list of mounts to be mounted in the container. Volume, bind and tmpfs type mounts are supported. fstab style [`mount options`](https://github.com/containerd/containerd/blob/master/mount/mount_linux.go#L187-L211) are supported. |
100+
| **mounts** | []block | no | A list of mounts to be mounted in the container. Volume, bind and tmpfs type mounts are supported. fstab style [`mount options`](https://github.com/containerd/containerd/blob/master/mount/mount_linux.go#L211-L235) are supported. |
101101

102102
**Mount block**<br/>
103103
&emsp;&emsp;\{<br/>
@@ -219,12 +219,17 @@ A [`service`](https://www.nomadproject.io/docs/job-specification/service) stanza
219219
The service stanza instructs Nomad to register a service with Consul.
220220

221221
## Tests
222+
223+
If you are running the tests locally, use the [`vagrant VM`](Vagrantfile) provided in the repository.
224+
222225
```
223-
$ make test
226+
$ vagrant up
227+
$ vagrant ssh containerd-linux
228+
$ sudo make test
224229
```
225230
**NOTE**: These are destructive tests and can leave the system in a changed state.<br/>
226-
It is highly recommended to run these tests either as part of a CI/CD system or on
227-
a immutable infrastructure e.g VMs.
231+
It is highly recommended to run these tests either as part of a CI/CD system e.g. circleci or on
232+
a immutable infrastructure e.g vagrant VMs.
228233

229234
## Cleanup
230235
```

Vagrantfile

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@ VAGRANTFILE_API_VERSION = "2"
66
Vagrant.configure("2") do |config|
77
config.vm.define "containerd-linux"
88
config.vm.box = "hashicorp/bionic64"
9-
config.vm.synced_folder ".", "/home/vagrant/go/src/nomad-driver-containerd"
10-
config.ssh.extra_args = ["-t", "cd /home/vagrant/go/src/nomad-driver-containerd; bash --login"]
9+
config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/Roblox/nomad-driver-containerd"
10+
config.ssh.extra_args = ["-t", "cd /home/vagrant/go/src/github.com/Roblox/nomad-driver-containerd; bash --login"]
1111
config.vm.network "forwarded_port", guest: 4646, host: 4646, host_ip: "127.0.0.1"
1212
config.vm.provider "virtualbox" do |vb|
1313
vb.name = "containerd-linux"
@@ -16,7 +16,7 @@ Vagrant.configure("2") do |config|
1616
end
1717
config.vm.provision "shell", inline: <<-SHELL
1818
apt-get update
19-
apt-get install -y unzip gcc runc
19+
apt-get install -y unzip gcc runc jq
2020
echo "export GOPATH=/home/vagrant/go" >> /home/vagrant/.bashrc
2121
echo "export PATH=$PATH:/usr/local/go/bin" >> /home/vagrant/.bashrc
2222
source /home/vagrant/.bashrc
@@ -29,12 +29,12 @@ Vagrant.configure("2") do |config|
2929
rm -f go1.14.3.linux-amd64.tar.gz
3030
fi
3131
32-
# Install nomad-0.11.3
32+
# Install nomad-1.0.2
3333
if [ ! -f "/usr/bin/nomad" ]; then
34-
wget --quiet https://releases.hashicorp.com/nomad/0.11.3/nomad_0.11.3_linux_amd64.zip
35-
unzip nomad_0.11.3_linux_amd64.zip -d /usr/bin
34+
wget --quiet https://releases.hashicorp.com/nomad/1.0.2/nomad_1.0.2_linux_amd64.zip
35+
unzip nomad_1.0.2_linux_amd64.zip -d /usr/bin
3636
chmod +x /usr/bin/nomad
37-
rm -f nomad_0.11.3_linux_amd64.zip
37+
rm -f nomad_1.0.2_linux_amd64.zip
3838
fi
3939
4040
# Install containerd-1.3.4
@@ -47,8 +47,11 @@ Vagrant.configure("2") do |config|
4747
# Create source directory for privileged.nomad example job.
4848
mkdir -p /tmp/s1
4949
50+
# Prepare nomad host volume
51+
mkdir -p /tmp/host_volume/s1
52+
5053
# Run setup
51-
cd /home/vagrant/go/src/nomad-driver-containerd/vagrant
54+
cd /home/vagrant/go/src/github.com/Roblox/nomad-driver-containerd/vagrant
5255
./setup.sh
5356
SHELL
5457
end

example/agent.hcl

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,3 +7,10 @@ plugin "containerd-driver" {
77
stats_interval = "5s"
88
}
99
}
10+
11+
client {
12+
host_volume "s1" {
13+
path = "/tmp/host_volume/s1"
14+
read_only = false
15+
}
16+
}

example/agent_tests.hcl

Lines changed: 0 additions & 16 deletions
This file was deleted.

tests/001-test-redis.sh

Lines changed: 3 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
#!/bin/bash
22

3+
source $SRCDIR/utils.sh
4+
35
test_redis_nomad_job() {
46
pushd ~/go/src/github.com/Roblox/nomad-driver-containerd/example
57

@@ -16,7 +18,7 @@ test_redis_nomad_job() {
1618
# The actual container process might not be running yet.
1719
# We need to wait for actual container to start running before trying exec.
1820
echo "INFO: Wait for redis container to get into RUNNING state, before trying exec."
19-
is_redis_container_active
21+
is_container_active redis false
2022

2123
echo "INFO: Inspecting redis job."
2224
redis_status=$(nomad job inspect redis|jq -r '.Job .Status')
@@ -53,24 +55,4 @@ test_redis_nomad_job() {
5355
popd
5456
}
5557

56-
is_redis_container_active() {
57-
i="0"
58-
while test $i -lt 5
59-
do
60-
sudo CONTAINERD_NAMESPACE=nomad ctr task ls|grep -q RUNNING
61-
if [ $? -eq 0 ]; then
62-
echo "INFO: redis container is up and running"
63-
break
64-
fi
65-
echo "INFO: redis container is down, sleep for 4 seconds."
66-
sleep 4s
67-
i=$[$i+1]
68-
done
69-
70-
if [ $i -ge 5 ]; then
71-
echo "ERROR: redis container didn't come up. exit 1."
72-
exit 1
73-
fi
74-
}
75-
7658
test_redis_nomad_job

tests/002-test-signal-handler.sh

Lines changed: 3 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
#!/bin/bash
22

3+
source $SRCDIR/utils.sh
4+
35
test_signal_handler_nomad_job() {
46
pushd ~/go/src/github.com/Roblox/nomad-driver-containerd/example
57

@@ -24,7 +26,7 @@ test_signal_handler_nomad_job() {
2426
# The actual container process might not be running yet.
2527
# We need to wait for actual container to start running before trying to send invalid signal.
2628
echo "INFO: Wait for signal container to get into RUNNING state, before trying to send invalid signal."
27-
is_signal_container_active
29+
is_container_active signal false
2830

2931
echo "INFO: Test invalid signal."
3032
alloc_id=$(nomad job status signal|awk 'END{print}'|cut -d ' ' -f 1)
@@ -55,24 +57,4 @@ cleanup() {
5557
rm $tmpfile > /dev/null 2>&1
5658
}
5759

58-
is_signal_container_active() {
59-
i="0"
60-
while test $i -lt 5
61-
do
62-
sudo CONTAINERD_NAMESPACE=nomad ctr task ls|grep -q RUNNING
63-
if [ $? -eq 0 ]; then
64-
echo "INFO: signal container is up and running"
65-
break
66-
fi
67-
echo "INFO: signal container is down, sleep for 4 seconds."
68-
sleep 4s
69-
i=$[$i+1]
70-
done
71-
72-
if [ $i -ge 5 ]; then
73-
echo "ERROR: signal container didn't come up. exit 1."
74-
exit 1
75-
fi
76-
}
77-
7860
test_signal_handler_nomad_job

tests/003-test-capabilities.sh

Lines changed: 14 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
#!/bin/bash
22

3+
source $SRCDIR/utils.sh
4+
35
# readonly_rootfs, cap_add and cap_drop flags are tested as part of this test.
46
test_capabilities_nomad_job() {
57
pushd ~/go/src/github.com/Roblox/nomad-driver-containerd/example
@@ -18,7 +20,7 @@ test_capabilities_nomad_job() {
1820
# The actual container process might not be running yet.
1921
# We need to wait for actual container to start running before trying exec.
2022
echo "INFO: Wait for capabilities container to get into RUNNING state, before trying exec."
21-
is_capabilities_container_active
23+
is_container_active capabilities true
2224

2325
echo "INFO: Inspecting capabilities job."
2426
cap_status=$(nomad job inspect capabilities|jq -r '.Job .Status')
@@ -27,17 +29,6 @@ test_capabilities_nomad_job() {
2729
exit 1
2830
fi
2931

30-
# Check if readonly_rootfs is set to true.
31-
echo "INFO: Checking if readonly_rootfs is set to true."
32-
local outfile=$(mktemp /tmp/capabilities.XXXXXX)
33-
nomad alloc exec -job capabilities touch /tmp/file.txt >> $outfile 2>&1
34-
if ! grep -q "Read-only file system" $outfile; then
35-
echo "ERROR: readonly_rootfs is not set to true."
36-
cleanup "$outfile"
37-
exit 1
38-
fi
39-
cleanup "$outfile"
40-
4132
# Check if CAP_SYS_ADMIN was added.
4233
echo "INFO: Checking if CAP_SYS_ADMIN is added."
4334
nomad alloc exec -job capabilities capsh --print|grep cap_sys_admin >/dev/null 2>&1
@@ -56,6 +47,17 @@ test_capabilities_nomad_job() {
5647
exit 1
5748
fi
5849

50+
# Check if readonly_rootfs is set to true.
51+
echo "INFO: Checking if readonly_rootfs is set to true."
52+
local outfile=$(mktemp /tmp/capabilities.XXXXXX)
53+
nomad alloc exec -job capabilities touch /tmp/file.txt >> $outfile 2>&1
54+
if ! grep -q "Read-only file system" $outfile; then
55+
echo "ERROR: readonly_rootfs is not set to true."
56+
cleanup "$outfile"
57+
exit 1
58+
fi
59+
cleanup "$outfile"
60+
5961
echo "INFO: Stopping nomad capabilities job."
6062
nomad job stop capabilities
6163
cap_status=$(nomad job status -short capabilities|grep Status|awk '{split($0,a,"="); print a[2]}'|tr -d ' ')
@@ -74,25 +76,4 @@ cleanup() {
7476
rm $tmpfile > /dev/null 2>&1
7577
}
7678

77-
is_capabilities_container_active() {
78-
i="0"
79-
while test $i -lt 5
80-
do
81-
sudo CONTAINERD_NAMESPACE=nomad ctr task ls|grep -q RUNNING
82-
if [ $? -eq 0 ]; then
83-
echo "INFO: capabilities container is up and running"
84-
sleep 5s
85-
break
86-
fi
87-
echo "INFO: capabilities container is down, sleep for 4 seconds."
88-
sleep 4s
89-
i=$[$i+1]
90-
done
91-
92-
if [ $i -ge 5 ]; then
93-
echo "ERROR: capabilities container didn't come up. exit 1."
94-
exit 1
95-
fi
96-
}
97-
9879
test_capabilities_nomad_job

tests/004-test-privileged.sh

Lines changed: 3 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
#!/bin/bash
22

3+
source $SRCDIR/utils.sh
4+
35
# privileged mode, devices and mounts are tested as part of this test.
46
test_privileged_nomad_job() {
57
pushd ~/go/src/github.com/Roblox/nomad-driver-containerd/example
@@ -20,7 +22,7 @@ test_privileged_nomad_job() {
2022
# The actual container process might not be running yet.
2123
# We need to wait for actual container to start running before trying exec.
2224
echo "INFO: Wait for privileged container to get into RUNNING state, before trying exec."
23-
is_privileged_container_active
25+
is_container_active privileged true
2426

2527
echo "INFO: Inspecting privileged job."
2628
job_status=$(nomad job inspect privileged|jq -r '.Job .Status')
@@ -73,25 +75,4 @@ setup_bind_source() {
7375
echo hello > /tmp/s1/bind.txt
7476
}
7577

76-
is_privileged_container_active() {
77-
i="0"
78-
while test $i -lt 5
79-
do
80-
sudo CONTAINERD_NAMESPACE=nomad ctr task ls|grep -q RUNNING
81-
if [ $? -eq 0 ]; then
82-
echo "INFO: privileged container is up and running"
83-
sleep 5s
84-
break
85-
fi
86-
echo "INFO: privileged container is down, sleep for 4 seconds."
87-
sleep 4s
88-
i=$[$i+1]
89-
done
90-
91-
if [ $i -ge 5 ]; then
92-
echo "ERROR: privileged container didn't come up. exit 1."
93-
exit 1
94-
fi
95-
}
96-
9778
test_privileged_nomad_job

tests/005-test-volume_mount.sh

Lines changed: 3 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
#!/bin/bash
22

3+
source $SRCDIR/utils.sh
4+
35
job_name=volume_mount
46
host_volume_path=/tmp/host_volume/s1
57

@@ -16,7 +18,7 @@ test_volume_mount_nomad_job() {
1618
# The actual container process might not be running yet.
1719
# We need to wait for actual container to start running before trying exec.
1820
echo "INFO: Wait for ${job_name} container to get into RUNNING state, before trying exec."
19-
is_${job_name}_container_active
21+
is_container_active ${job_name} true
2022

2123
echo "INFO: Checking status of $job_name job."
2224
job_status=$(nomad job status -short $job_name|grep Status|awk '{split($0,a,"="); print a[2]}'|tr -d ' ')
@@ -71,25 +73,4 @@ setup_bind_source() {
7173
echo hello > ${host_volume_path}/bind.txt
7274
}
7375

74-
is_volume_mount_container_active() {
75-
i="0"
76-
while test $i -lt 5
77-
do
78-
sudo CONTAINERD_NAMESPACE=nomad ctr task ls|grep -q RUNNING
79-
if [ $? -eq 0 ]; then
80-
echo "INFO: ${job_name} container is up and running"
81-
sleep 5s
82-
break
83-
fi
84-
echo "INFO: ${job_name} container is down, sleep for 4 seconds."
85-
sleep 4s
86-
i=$[$i+1]
87-
done
88-
89-
if [ $i -ge 5 ]; then
90-
echo "ERROR: ${job_name} container didn't come up. exit 1."
91-
exit 1
92-
fi
93-
}
94-
9576
test_volume_mount_nomad_job

0 commit comments

Comments
 (0)