diff --git a/.buildkite/scripts/buildkite-k8s-integration-tests.sh b/.buildkite/scripts/buildkite-k8s-integration-tests.sh index 892ba110ad7..93054278d14 100755 --- a/.buildkite/scripts/buildkite-k8s-integration-tests.sh +++ b/.buildkite/scripts/buildkite-k8s-integration-tests.sh @@ -95,10 +95,14 @@ EOF pod_logs_base="${PWD}/build/${fully_qualified_group_name}.pod_logs_dump" set +e - K8S_TESTS_POD_LOGS_BASE="${pod_logs_base}" AGENT_IMAGE="${image}" DOCKER_VARIANT="${variant}" gotestsum --hide-summary=skipped --format testname --no-color -f standard-quiet --junitfile-hide-skipped-tests --junitfile "${outputXML}" --jsonfile "${outputJSON}" -- -tags kubernetes,integration -test.shuffle on -test.timeout 2h0m0s github.com/elastic/elastic-agent/testing/integration -v -args -integration.groups="${group_name}" -integration.sudo="false" + K8S_TESTS_POD_LOGS_BASE="${pod_logs_base}" AGENT_IMAGE="${image}" DOCKER_VARIANT="${variant}" gotestsum --hide-summary=skipped --format testname --no-color -f standard-quiet --junitfile-hide-skipped-tests --junitfile "${outputXML}" --jsonfile "${outputJSON}" -- -tags kubernetes,integration -test.shuffle on -test.timeout 2h0m0s github.com/elastic/elastic-agent/testing/integration/k8s -v -args -integration.groups="${group_name}" -integration.sudo="false" exit_status=$? set -e + if [[ $exit_status -ne 0 ]]; then + echo "^^^ +++" + fi + if [[ $TESTS_EXIT_STATUS -eq 0 && $exit_status -ne 0 ]]; then TESTS_EXIT_STATUS=$exit_status fi diff --git a/.buildkite/scripts/steps/k8s-extended-tests.sh b/.buildkite/scripts/steps/k8s-extended-tests.sh index 8eba5ea667f..85064ebbab9 100755 --- a/.buildkite/scripts/steps/k8s-extended-tests.sh +++ b/.buildkite/scripts/steps/k8s-extended-tests.sh @@ -26,7 +26,7 @@ else fi SNAPSHOT=true EXTERNAL=true PACKAGES=docker mage -v package -TEST_INTEG_CLEAN_ON_EXIT=true INSTANCE_PROVISIONER=kind STACK_PROVISIONER=stateful SNAPSHOT=true mage integration:kubernetesMatrix +TEST_INTEG_CLEAN_ON_EXIT=true INSTANCE_PROVISIONER=kind STACK_PROVISIONER=stateful SNAPSHOT=true mage integration:testKubernetesMatrix TESTS_EXIT_STATUS=$? set -e diff --git a/NOTICE.txt b/NOTICE.txt index 757d88b9960..b1f913b2cc7 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -83258,11 +83258,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : sigs.k8s.io/structured-merge-diff/v4 -Version: v4.4.2 +Version: v4.4.3 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/sigs.k8s.io/structured-merge-diff/v4@v4.4.2/LICENSE: +Contents of probable licence file $GOMODCACHE/sigs.k8s.io/structured-merge-diff/v4@v4.4.3/LICENSE: Apache License Version 2.0, January 2004 diff --git a/docs/test-framework-dev-guide.md b/docs/test-framework-dev-guide.md index dd239643ce8..5f1eed35cda 100644 --- a/docs/test-framework-dev-guide.md +++ b/docs/test-framework-dev-guide.md @@ -76,9 +76,9 @@ The test are run with mage using the `integration` namespace: - `mage integration:matrix` to run all tests on the complete matrix of supported operating systems and architectures of the Elastic Agent. -- `mage integration:kubernetes` to run kubernetes tests for the default image on the default version of kubernetes (all previous commands will not run any kubernetes tests). +- `mage integration:testKubernetes` to run kubernetes tests for the default image on the default version of kubernetes (all previous commands will not run any kubernetes tests). -- `mage integration:kubernetesMatrix` to run a matrix of kubernetes tests for all image types and supported versions of kubernetes. +- `mage integration:testKubernetesMatrix` to run a matrix of kubernetes tests for all image types and supported versions of kubernetes. #### Selecting specific platform @@ -94,7 +94,7 @@ between, and it can be very specific or not very specific. - `TEST_PLATFORMS="linux/amd64/ubuntu/20.04 mage integration:test` to execute tests only on Ubuntu 20.04 ARM64. - `TEST_PLATFORMS="windows/amd64/2022 mage integration:test` to execute tests only on Windows Server 2022. - `TEST_PLATFORMS="linux/amd64 windows/amd64/2022 mage integration:test` to execute tests on Linux AMD64 and Windows Server 2022. -- `TEST_PLATFORMS="kubernetes/arm64/1.31.0/wolfi" mage integration:kubernetes` to execute kubernetes tests on Kubernetes version 1.31.0 with wolfi docker variant. +- `INSTANCE_PROVISIONER="kind" TEST_PLATFORMS="kubernetes/arm64/1.33.0/wolfi" mage integration:testKubernetes` to execute kubernetes tests on Kubernetes version 1.33.0 with wolfi docker variant under kind cluster. > **_NOTE:_** This only filters down the tests based on the platform. It will not execute a tests on a platform unless > the test defines as supporting it. @@ -417,7 +417,7 @@ not cause already provisioned resources to be replaced with an instance created ### Kind Instance Provisioner Use only when running Kubernetes tests. Uses local installed kind to create Kubernetes clusters on the fly. -- `INSTANCE_PROVISIONER="kind" mage integration:kubernetes` +- `INSTANCE_PROVISIONER="kind" mage integration:testKubernetes` ## Troubleshooting Tips diff --git a/go.mod b/go.mod index 6352ffab7f6..a01605ff905 100644 --- a/go.mod +++ b/go.mod @@ -98,8 +98,7 @@ require ( k8s.io/client-go v0.32.3 kernel.org/pub/linux/libs/security/libcap/cap v1.2.70 sigs.k8s.io/e2e-framework v0.4.0 - sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 - sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 + sigs.k8s.io/kustomize/api v0.18.0 ) require ( @@ -384,7 +383,6 @@ require ( github.com/huandu/xstrings v1.5.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/icholy/digest v0.1.22 // indirect - github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ionos-cloud/sdk-go/v6 v6.2.1 // indirect github.com/jaegertracing/jaeger v1.66.0 // indirect @@ -621,7 +619,6 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect go.opentelemetry.io/otel/trace v1.34.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect - go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/oauth2 v0.27.0 // indirect @@ -646,7 +643,8 @@ require ( oras.land/oras-go v1.2.5 // indirect sigs.k8s.io/controller-runtime v0.20.4 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.3 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 8761df050a6..e1b57c86d53 100644 --- a/go.sum +++ b/go.sum @@ -879,8 +879,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/icholy/digest v0.1.22 h1:dRIwCjtAcXch57ei+F0HSb5hmprL873+q7PoVojdMzM= github.com/icholy/digest v0.1.22/go.mod h1:uLAeDdWKIWNFMH0wqbwchbTQOmJWhzSnL7zmqSPqEEc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY= @@ -1791,8 +1789,6 @@ go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= -go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= -go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -2039,7 +2035,6 @@ golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXR golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= @@ -2330,14 +2325,14 @@ sigs.k8s.io/e2e-framework v0.4.0 h1:4yYmFDNNoTnazqmZJXQ6dlQF1vrnDbutmxlyvBpC5rY= sigs.k8s.io/e2e-framework v0.4.0/go.mod h1:JilFQPF1OL1728ABhMlf9huse7h+uBJDXl9YeTs49A8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= -sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= -sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= -sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= +sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= +sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= +sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= +sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.3 h1:sCP7Vv3xx/CWIuTPVN38lUPx0uw0lcLfzaiDa8Ja01A= +sigs.k8s.io/structured-merge-diff/v4 v4.4.3/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/magefile.go b/magefile.go index c5907b18299..efd29000121 100644 --- a/magefile.go +++ b/magefile.go @@ -1993,6 +1993,7 @@ func (Integration) Check() error { define.ValidateDir("testing/integration"), define.ValidateDir("testing/integration/serverless"), define.ValidateDir("testing/integration/leak"), + define.ValidateDir("testing/integration/k8s"), ) } @@ -2073,34 +2074,34 @@ func (Integration) TestServerless(ctx context.Context) error { return integRunner(ctx, "testing/integration/serverless", false, "") } -// Kubernetes runs kubernetes integration tests -func (Integration) Kubernetes(ctx context.Context) error { +// TestKubernetes runs kubernetes integration tests +func (Integration) TestKubernetes(ctx context.Context) error { // invoke integration tests if err := os.Setenv("TEST_GROUPS", "kubernetes"); err != nil { return err } - return integRunner(ctx, "testing/integration", false, "") + return integRunner(ctx, "testing/integration/k8s", false, "") } -// KubernetesSingle runs a single Kubernetes integration test -func (Integration) KubernetesSingle(ctx context.Context, testName string) error { +// TestKubernetesSingle runs single k8s integration test +func (Integration) TestKubernetesSingle(ctx context.Context, testName string) error { // invoke integration tests if err := os.Setenv("TEST_GROUPS", "kubernetes"); err != nil { return err } - return integRunner(ctx, "testing/integration", false, testName) + return integRunner(ctx, "testing/integration/k8s", false, testName) } -// KubernetesMatrix runs a matrix of kubernetes integration tests -func (Integration) KubernetesMatrix(ctx context.Context) error { +// TestKubernetesMatrix runs a matrix of kubernetes integration tests +func (Integration) TestKubernetesMatrix(ctx context.Context) error { // invoke integration tests if err := os.Setenv("TEST_GROUPS", "kubernetes"); err != nil { return err } - return integRunner(ctx, "testing/integration", true, "") + return integRunner(ctx, "testing/integration/k8s", true, "") } // UpdateVersions runs an update on the `.agent-versions.yml` fetching diff --git a/pkg/testing/define/define.go b/pkg/testing/define/define.go index 989f12d2feb..cc1f662634a 100644 --- a/pkg/testing/define/define.go +++ b/pkg/testing/define/define.go @@ -37,6 +37,7 @@ var osInfo *types.OSInfo var osInfoErr error var osInfoOnce sync.Once var noSpecialCharsRegexp = regexp.MustCompile("[^a-zA-Z0-9]+") +var kubernetesSupported = false // Require defines what this test requires for it to be run by the test runner. // @@ -46,6 +47,12 @@ func Require(t *testing.T, req Requirements) *Info { return defineAction(t, req) } +// SetKubernetesSupported sets the kubernetesSupported flag to true +// to allow kubernetes tests to be run. +func SetKubernetesSupported() { + kubernetesSupported = true +} + type Info struct { // ESClient is the elasticsearch client to communicate with elasticsearch. // This is only present if you say a cloud is required in the `define.Require`. @@ -139,7 +146,7 @@ func findProjectRoot() (string, error) { } } -func runOrSkip(t *testing.T, req Requirements, local bool, kubernetes bool) *Info { +func runOrSkip(t *testing.T, req Requirements, local bool) *Info { // always validate requirement is valid if err := req.Validate(); err != nil { panic(fmt.Sprintf("test %s has invalid requirements: %s", t.Name(), err)) @@ -165,7 +172,7 @@ func runOrSkip(t *testing.T, req Requirements, local bool, kubernetes bool) *Inf return nil } for _, o := range req.OS { - if o.Type == Kubernetes && !kubernetes { + if o.Type == Kubernetes && !kubernetesSupported { t.Skip("test requires kubernetes") return nil } diff --git a/pkg/testing/define/define_all.go b/pkg/testing/define/define_all.go index 53aa5c02acc..febc52786d1 100644 --- a/pkg/testing/define/define_all.go +++ b/pkg/testing/define/define_all.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License 2.0; // you may not use this file except in compliance with the Elastic License 2.0. -//go:build !define && !local && !kubernetes +//go:build !define && !local package define @@ -11,5 +11,5 @@ import ( ) func defineAction(t *testing.T, req Requirements) *Info { - return runOrSkip(t, req, false, false) + return runOrSkip(t, req, false) } diff --git a/pkg/testing/define/define_kubernetes.go b/pkg/testing/define/define_kubernetes.go deleted file mode 100644 index cf39e7f20a6..00000000000 --- a/pkg/testing/define/define_kubernetes.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License 2.0; -// you may not use this file except in compliance with the Elastic License 2.0. - -//go:build kubernetes && !define && !local - -package define - -import ( - "testing" -) - -func defineAction(t *testing.T, req Requirements) *Info { - return runOrSkip(t, req, false, true) -} diff --git a/pkg/testing/define/define_local.go b/pkg/testing/define/define_local.go index 5ae211b8e7e..270b7000281 100644 --- a/pkg/testing/define/define_local.go +++ b/pkg/testing/define/define_local.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License 2.0; // you may not use this file except in compliance with the Elastic License 2.0. -//go:build local && !define && !kubernetes +//go:build local && !define package define @@ -11,5 +11,5 @@ import ( ) func defineAction(t *testing.T, req Requirements) *Info { - return runOrSkip(t, req, true, false) + return runOrSkip(t, req, true) } diff --git a/testing/integration/common.go b/testing/integration/common.go new file mode 100644 index 00000000000..4874be40549 --- /dev/null +++ b/testing/integration/common.go @@ -0,0 +1,53 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build integration + +package integration + +import ( + "fmt" + "net/url" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent-libs/testing/estools" +) + +func GetESHost() (string, error) { + fixedESHost := os.Getenv("ELASTICSEARCH_HOST") + parsedES, err := url.Parse(fixedESHost) + if err != nil { + return "", err + } + if parsedES.Port() == "" { + fixedESHost = fmt.Sprintf("%s:443", fixedESHost) + } + return fixedESHost, nil +} + +// FindESDocs runs `findFn` until at least one document is returned and there is no error +func FindESDocs(t *testing.T, findFn func() (estools.Documents, error)) estools.Documents { + var docs estools.Documents + require.Eventually( + t, + func() bool { + var err error + docs, err = findFn() + if err != nil { + t.Logf("got an error querying ES, retrying. Error: %s", err) + return false + } + + return docs.Hits.Total.Value != 0 + }, + 3*time.Minute, + 15*time.Second, + ) + + return docs +} diff --git a/testing/integration/k8s/common.go b/testing/integration/k8s/common.go new file mode 100644 index 00000000000..65667ed2aa5 --- /dev/null +++ b/testing/integration/k8s/common.go @@ -0,0 +1,587 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build integration + +package k8s + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "regexp" + "strings" + "testing" + "time" + + "github.com/gofrs/uuid/v5" + "github.com/stretchr/testify/require" + helmKube "helm.sh/helm/v3/pkg/kube" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + cliResource "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/e2e-framework/klient" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/kustomize/api/filesys" + "sigs.k8s.io/kustomize/api/krusty" + + "github.com/elastic/elastic-agent-libs/kibana" + "github.com/elastic/elastic-agent-libs/testing/estools" + aclient "github.com/elastic/elastic-agent/pkg/control/v2/client" + atesting "github.com/elastic/elastic-agent/pkg/testing" + "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/testing/tools/fleettools" + "github.com/elastic/elastic-agent/testing/integration" + "github.com/elastic/go-elasticsearch/v8" +) + +var noSpecialCharsRegexp = regexp.MustCompile("[^a-zA-Z0-9]+") + +// k8sContext contains all the information needed to run a k8s test +type k8sContext struct { + client klient.Client + clientSet *kubernetes.Clientset + // logsBasePath is the path that will be used to store the pod logs in a case a test fails + logsBasePath string + // agentImage is the full image of elastic-agent to use in the test + agentImage string + // agentImageRepo is the repository of elastic-agent image to use in the test + agentImageRepo string + // agentImageTag is the tag of elastic-agent image to use in the test + agentImageTag string + // esHost is the host of the elasticsearch to use in the test + esHost string + // esAPIKey is the API key of the elasticsearch to use in the test + esAPIKey string + // esEncodedAPIKey is the encoded API key of the elasticsearch to use in the test + esEncodedAPIKey string + // enrollParams contains the information needed to enroll an agent with Fleet in the test + enrollParams *fleettools.EnrollParams + // createdAt is the time when the k8sContext was created + createdAt time.Time +} + +// getNamespace returns a unique namespace for the current test +func (k k8sContext) getNamespace(t *testing.T) string { + if ns := os.Getenv("K8S_TESTS_NAMESPACE"); ns != "" { + return ns + } + + nsUUID, err := uuid.NewV4() + if err != nil { + t.Fatalf("error generating namespace UUID: %v", err) + } + hasher := sha256.New() + hasher.Write([]byte(nsUUID.String())) + testNamespace := strings.ToLower(base64.URLEncoding.EncodeToString(hasher.Sum(nil))) + return noSpecialCharsRegexp.ReplaceAllString(testNamespace, "") +} + +// k8sGetContext performs all the necessary checks to get a k8sContext for the current test +func k8sGetContext(t *testing.T, info *define.Info) k8sContext { + agentImage := os.Getenv("AGENT_IMAGE") + require.NotEmpty(t, agentImage, "AGENT_IMAGE must be set") + + agentImageParts := strings.SplitN(agentImage, ":", 2) + require.Len(t, agentImageParts, 2, "AGENT_IMAGE must be in the form ':'") + agentImageRepo := agentImageParts[0] + agentImageTag := agentImageParts[1] + + client, err := info.KubeClient() + require.NoError(t, err) + require.NotNil(t, client) + + clientSet, err := kubernetes.NewForConfig(client.RESTConfig()) + require.NoError(t, err) + require.NotNil(t, clientSet) + + testLogsBasePath := os.Getenv("K8S_TESTS_POD_LOGS_BASE") + require.NotEmpty(t, testLogsBasePath, "K8S_TESTS_POD_LOGS_BASE must be set") + + err = os.MkdirAll(testLogsBasePath, 0o755) + require.NoError(t, err, "failed to create test logs directory") + + esHost, err := integration.GetESHost() + require.NoError(t, err, "cannot parse ELASTICSEARCH_HOST") + + esAPIKey, err := generateESAPIKey(info.ESClient, info.Namespace) + require.NoError(t, err, "failed to generate ES API key") + require.NotEmpty(t, esAPIKey, "failed to generate ES API key") + + enrollParams, err := fleettools.NewEnrollParams(context.Background(), info.KibanaClient) + require.NoError(t, err, "failed to create fleet enroll params") + + return k8sContext{ + client: client, + clientSet: clientSet, + agentImage: agentImage, + agentImageRepo: agentImageRepo, + agentImageTag: agentImageTag, + logsBasePath: testLogsBasePath, + esHost: esHost, + esAPIKey: esAPIKey.APIKey, + esEncodedAPIKey: esAPIKey.Encoded, + enrollParams: enrollParams, + createdAt: time.Now(), + } +} + +// generateESAPIKey generates an API key for the given Elasticsearch. +func generateESAPIKey(esClient *elasticsearch.Client, keyName string) (estools.APIKeyResponse, error) { + return estools.CreateAPIKey(context.Background(), esClient, estools.APIKeyRequest{Name: keyName, Expiration: "1d"}) +} + +// int64Ptr returns a pointer to the given int64 +func int64Ptr(val int64) *int64 { + valPtr := val + return &valPtr +} + +// k8sCheckAgentStatus checks that the agent reports healthy. +func k8sCheckAgentStatus(ctx context.Context, client klient.Client, stdout *bytes.Buffer, stderr *bytes.Buffer, + namespace string, agentPodName string, containerName string, componentPresence map[string]bool, +) error { + command := []string{"elastic-agent", "status", "--output=json"} + stopCheck := errors.New("stop check") + + // we will wait maximum 120 seconds for the agent to report healthy + ctx, cancel := context.WithTimeout(ctx, 2*time.Minute) + defer cancel() + + checkStatus := func() error { + pod := corev1.Pod{} + if err := client.Resources(namespace).Get(ctx, agentPodName, namespace, &pod); err != nil { + return err + } + + for _, container := range pod.Status.ContainerStatuses { + if container.Name != containerName { + continue + } + + if restarts := container.RestartCount; restarts != 0 { + return fmt.Errorf("container %q of pod %q has restarted %d times: %w", containerName, agentPodName, restarts, stopCheck) + } + } + + status := atesting.AgentStatusOutput{} // clear status output + stdout.Reset() + stderr.Reset() + if err := client.Resources().ExecInPod(ctx, namespace, agentPodName, containerName, command, stdout, stderr); err != nil { + return err + } + + if err := json.Unmarshal(stdout.Bytes(), &status); err != nil { + return err + } + + var err error + // validate that the components defined are also healthy if they should exist + for component, shouldBePresent := range componentPresence { + compState, ok := getAgentComponentState(status, component) + if shouldBePresent { + if !ok { + // doesn't exist + err = errors.Join(err, fmt.Errorf("required component %s not found", component)) + } else if compState != int(aclient.Healthy) { + // not healthy + err = errors.Join(err, fmt.Errorf("required component %s is not healthy", component)) + } + } else if ok { + // should not be present + err = errors.Join(err, fmt.Errorf("component %s should not be present", component)) + } + } + return err + } + for { + err := checkStatus() + if err == nil { + return nil + } else if errors.Is(err, stopCheck) { + return err + } + if ctx.Err() != nil { + // timeout waiting for agent to become healthy + return errors.Join(err, errors.New("timeout waiting for agent to become healthy")) + } + time.Sleep(100 * time.Millisecond) + } +} + +// k8sGetAgentID returns the agent ID for the given agent pod +func k8sGetAgentID(ctx context.Context, client klient.Client, stdout *bytes.Buffer, stderr *bytes.Buffer, + namespace string, agentPodName string, containerName string, +) (string, error) { + command := []string{"elastic-agent", "status", "--output=json"} + + status := atesting.AgentStatusOutput{} // clear status output + stdout.Reset() + stderr.Reset() + ctx, cancel := context.WithTimeout(ctx, 2*time.Minute) + err := client.Resources().ExecInPod(ctx, namespace, agentPodName, containerName, command, stdout, stderr) + cancel() + if err != nil { + return "", err + } + + if err := json.Unmarshal(stdout.Bytes(), &status); err != nil { + return "", err + } + + return status.Info.ID, nil +} + +// getAgentComponentState returns the component state for the given component name and a bool indicating if it exists. +func getAgentComponentState(status atesting.AgentStatusOutput, componentName string) (int, bool) { + for _, comp := range status.Components { + if comp.Name == componentName { + return comp.State, true + } + } + return -1, false +} + +// k8sKustomizeAdjustObjects adjusts the namespace of given k8s objects and calls the given callbacks for the containers and the pod +func k8sKustomizeAdjustObjects(objects []k8s.Object, namespace string, containerName string, cbContainer func(container *corev1.Container), cbPod func(pod *corev1.PodSpec)) { + // Update the agent image and image pull policy as it is already loaded in kind cluster + for _, obj := range objects { + obj.SetNamespace(namespace) + var podSpec *corev1.PodSpec + switch objWithType := obj.(type) { + case *appsv1.DaemonSet: + podSpec = &objWithType.Spec.Template.Spec + case *appsv1.StatefulSet: + podSpec = &objWithType.Spec.Template.Spec + case *appsv1.Deployment: + podSpec = &objWithType.Spec.Template.Spec + case *appsv1.ReplicaSet: + podSpec = &objWithType.Spec.Template.Spec + case *batchv1.Job: + podSpec = &objWithType.Spec.Template.Spec + case *batchv1.CronJob: + podSpec = &objWithType.Spec.JobTemplate.Spec.Template.Spec + default: + continue + } + + if cbPod != nil { + cbPod(podSpec) + } + + for idx, container := range podSpec.Containers { + if container.Name != containerName { + continue + } + if cbContainer != nil { + cbContainer(&podSpec.Containers[idx]) + } + } + } +} + +// k8sRenderKustomize renders the given kustomize directory to YAML +func k8sRenderKustomize(kustomizePath string) ([]byte, error) { + // Create a file system pointing to the kustomize directory + fSys := filesys.MakeFsOnDisk() + + // Create a kustomizer + k := krusty.MakeKustomizer(krusty.MakeDefaultOptions()) + + // Run the kustomizer on the given directory + resMap, err := k.Run(fSys, kustomizePath) + if err != nil { + return nil, err + } + + // Convert the result to YAML + renderedManifest, err := resMap.AsYaml() + if err != nil { + return nil, err + } + + return renderedManifest, nil +} + +// k8sDeleteOpts contains options for deleting k8s objects +type k8sDeleteOpts struct { + // wait for the objects to be deleted + wait bool + // timeout for waiting for the objects to be deleted + waitTimeout time.Duration +} + +// k8sDeleteObjects deletes the given k8s objects and waits for them to be deleted if wait is true. +func k8sDeleteObjects(ctx context.Context, client klient.Client, opts k8sDeleteOpts, objects ...k8s.Object) error { + if len(objects) == 0 { + return nil + } + + // Delete the objects + for _, obj := range objects { + _ = client.Resources(obj.GetNamespace()).Delete(ctx, obj) + } + + if !opts.wait { + // no need to wait + return nil + } + + if opts.waitTimeout == 0 { + // default to 20 seconds + opts.waitTimeout = 20 * time.Second + } + + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, opts.waitTimeout) + defer timeoutCancel() + for _, obj := range objects { + for { + if timeoutCtx.Err() != nil { + return errors.New("timeout waiting for k8s objects to be deleted") + } + + err := client.Resources().Get(timeoutCtx, obj.GetName(), obj.GetNamespace(), obj) + if err != nil { + // object has been deleted + break + } + + time.Sleep(100 * time.Millisecond) + } + } + + return nil +} + +// k8sCreateOpts contains options for k8sCreateObjects +type k8sCreateOpts struct { + // namespace is the namespace to create the objects in + namespace string + // wait specifies whether to wait for the objects to be ready + wait bool + // waitTimeout is the timeout for waiting for the objects to be ready if wait is true + waitTimeout time.Duration +} + +// k8sCreateObjects creates k8s objects and waits for them to be ready if specified in opts. +// Note that if opts.namespace is not empty, all objects will be created and updated to reference +// the given namespace. +func k8sCreateObjects(ctx context.Context, client klient.Client, opts k8sCreateOpts, objects ...k8s.Object) error { + // Create the objects + for _, obj := range objects { + if opts.namespace != "" { + // update the namespace + obj.SetNamespace(opts.namespace) + + // special case for ClusterRoleBinding and RoleBinding + // update the subjects to reference the given namespace + switch objWithType := obj.(type) { + case *rbacv1.ClusterRoleBinding: + for idx := range objWithType.Subjects { + objWithType.Subjects[idx].Namespace = opts.namespace + } + case *rbacv1.RoleBinding: + for idx := range objWithType.Subjects { + objWithType.Subjects[idx].Namespace = opts.namespace + } + } + } + if err := client.Resources().Create(ctx, obj); err != nil { + return fmt.Errorf("failed to create object %s: %w", obj.GetName(), err) + } + } + + if !opts.wait { + // no need to wait + return nil + } + + if opts.waitTimeout == 0 { + // default to 120 seconds + opts.waitTimeout = 120 * time.Second + } + + return k8sWaitForReady(ctx, client, opts.waitTimeout, objects...) +} + +// k8sWaitForReady waits for the given k8s objects to be ready +func k8sWaitForReady(ctx context.Context, client klient.Client, waitDuration time.Duration, objects ...k8s.Object) error { + // use ready checker from helm kube + clientSet, err := kubernetes.NewForConfig(client.RESTConfig()) + if err != nil { + return fmt.Errorf("error creating clientset: %w", err) + } + readyChecker := helmKube.NewReadyChecker(clientSet, func(s string, i ...interface{}) {}) + + ctxTimeout, cancel := context.WithTimeout(ctx, waitDuration) + defer cancel() + + waitFn := func(ri *cliResource.Info) error { + // here we wait for the k8s object (e.g. deployment, daemonset, pod) to be ready + for { + ready, readyErr := readyChecker.IsReady(ctxTimeout, ri) + if ready { + // k8s object is ready + return nil + } + // k8s object is not ready yet + readyErr = errors.Join(fmt.Errorf("k8s object %s is not ready", ri.Name), readyErr) + + if ctxTimeout.Err() != nil { + // timeout + return errors.Join(fmt.Errorf("timeout waiting for k8s object %s to be ready", ri.Name), readyErr) + } + time.Sleep(100 * time.Millisecond) + } + } + + for _, o := range objects { + // convert k8s.Object to resource.Info for ready checker + runtimeObj, ok := o.(runtime.Object) + if !ok { + return fmt.Errorf("unable to convert k8s.Object %s to runtime.Object", o.GetName()) + } + + if err := waitFn(&cliResource.Info{ + Object: runtimeObj, + Name: o.GetName(), + Namespace: o.GetNamespace(), + }); err != nil { + return err + } + // extract pod label selector for all k8s objects that have underlying pods + oPodsLabelSelector, err := helmKube.SelectorsForObject(runtimeObj) + if err != nil { + // k8s object does not have pods + continue + } + + podList, err := clientSet.CoreV1().Pods(o.GetNamespace()).List(ctx, metav1.ListOptions{ + LabelSelector: oPodsLabelSelector.String(), + }) + if err != nil { + return fmt.Errorf("error listing pods: %w", err) + } + + // here we wait for the all pods to be ready + for _, pod := range podList.Items { + if err := waitFn(&cliResource.Info{ + Object: &pod, + Name: pod.Name, + Namespace: pod.Namespace, + }); err != nil { + return err + } + } + } + + return nil +} + +func k8sSchedulableNodeCount(ctx context.Context, kCtx k8sContext) (int, error) { + nodeList := corev1.NodeList{} + err := kCtx.client.Resources().List(ctx, &nodeList) + if err != nil { + return 0, err + } + + totalSchedulableNodes := 0 + + for _, node := range nodeList.Items { + if node.Spec.Unschedulable { + continue + } + + hasNoScheduleTaint := false + for _, taint := range node.Spec.Taints { + if taint.Effect == corev1.TaintEffectNoSchedule { + hasNoScheduleTaint = true + break + } + } + + if hasNoScheduleTaint { + continue + } + + totalSchedulableNodes++ + } + + return totalSchedulableNodes, err +} + +// GetAgentResponse extends kibana.GetAgentResponse and includes the EnrolledAt field +type GetAgentResponse struct { + kibana.GetAgentResponse `json:",inline"` + EnrolledAt time.Time `json:"enrolled_at"` +} + +// kibanaGetAgent essentially re-implements kibana.GetAgent to extract also GetAgentResponse.EnrolledAt +func kibanaGetAgent(ctx context.Context, kc *kibana.Client, id string) (*GetAgentResponse, error) { + apiURL := fmt.Sprintf("/api/fleet/agents/%s", id) + r, err := kc.Connection.SendWithContext(ctx, http.MethodGet, apiURL, nil, nil, nil) + if err != nil { + return nil, fmt.Errorf("error calling get agent API: %w", err) + } + defer r.Body.Close() + var agentResp struct { + Item GetAgentResponse `json:"item"` + } + b, err := io.ReadAll(r.Body) + if err != nil { + return nil, fmt.Errorf("reading response body: %w", err) + } + if r.StatusCode != http.StatusOK { + return nil, fmt.Errorf("error calling get agent API: %s", string(b)) + } + err = json.Unmarshal(b, &agentResp) + if err != nil { + return nil, fmt.Errorf("unmarshalling response json: %w", err) + } + return &agentResp.Item, nil +} + +func queryK8sNamespaceDataStream(dsType, dataset, datastreamNamespace, k8snamespace string) map[string]any { + return map[string]any{ + "_source": []string{"message"}, + "query": map[string]any{ + "bool": map[string]any{ + "filter": []any{ + map[string]any{ + "term": map[string]any{ + "data_stream.dataset": dataset, + }, + }, + map[string]any{ + "term": map[string]any{ + "data_stream.namespace": datastreamNamespace, + }, + }, + map[string]any{ + "term": map[string]any{ + "data_stream.type": dsType, + }, + }, + map[string]any{ + "term": map[string]any{ + "resource.attributes.k8s.namespace.name": k8snamespace, + }, + }, + }, + }, + }, + } +} diff --git a/testing/integration/journald_test.go b/testing/integration/k8s/journald_test.go similarity index 96% rename from testing/integration/journald_test.go rename to testing/integration/k8s/journald_test.go index 13f6ac0c8c7..86485f2e48f 100644 --- a/testing/integration/journald_test.go +++ b/testing/integration/k8s/journald_test.go @@ -4,7 +4,7 @@ //go:build integration -package integration +package k8s import ( "context" @@ -19,6 +19,7 @@ import ( "github.com/elastic/elastic-agent-libs/testing/estools" "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/testing/integration" "github.com/elastic/go-elasticsearch/v8" ) @@ -136,7 +137,7 @@ func journaldTest( } // Query the index and filter by the input type - docs := findESDocs(t, func() (estools.Documents, error) { + docs := integration.FindESDocs(t, func() (estools.Documents, error) { return estools.GetLogsForIndexWithContext( ctx, esClient, diff --git a/testing/integration/kubernetes_agent_service_test.go b/testing/integration/k8s/kubernetes_agent_service_test.go similarity index 98% rename from testing/integration/kubernetes_agent_service_test.go rename to testing/integration/k8s/kubernetes_agent_service_test.go index 6c2049c5106..d14f42b8750 100644 --- a/testing/integration/kubernetes_agent_service_test.go +++ b/testing/integration/k8s/kubernetes_agent_service_test.go @@ -4,7 +4,7 @@ //go:build integration -package integration +package k8s import ( "context" diff --git a/testing/integration/kubernetes_agent_standalone_test.go b/testing/integration/k8s/kubernetes_agent_standalone_test.go similarity index 74% rename from testing/integration/kubernetes_agent_standalone_test.go rename to testing/integration/k8s/kubernetes_agent_standalone_test.go index c7f5a9ac531..1769a2010f2 100644 --- a/testing/integration/kubernetes_agent_standalone_test.go +++ b/testing/integration/k8s/kubernetes_agent_standalone_test.go @@ -4,57 +4,38 @@ //go:build integration -package integration +package k8s import ( "archive/tar" "bufio" "bytes" "context" - "crypto/sha256" - "encoding/base64" "encoding/json" - "errors" "fmt" "io" - "net/http" "os" "path/filepath" - "regexp" - "strings" "testing" "time" - "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/kibana" - "github.com/elastic/elastic-agent-libs/testing/estools" - "github.com/elastic/go-elasticsearch/v8" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - cliResource "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/kubernetes" "sigs.k8s.io/e2e-framework/klient" "sigs.k8s.io/e2e-framework/klient/k8s" - "sigs.k8s.io/kustomize/api/krusty" - "sigs.k8s.io/kustomize/kyaml/filesys" "helm.sh/helm/v3/pkg/action" "helm.sh/helm/v3/pkg/chart/loader" "helm.sh/helm/v3/pkg/cli" - helmKube "helm.sh/helm/v3/pkg/kube" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" - aclient "github.com/elastic/elastic-agent/pkg/control/v2/client" - atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" "github.com/elastic/elastic-agent/pkg/testing/helm" testK8s "github.com/elastic/elastic-agent/pkg/testing/kubernetes" @@ -62,12 +43,10 @@ import ( ) const ( - agentK8SKustomize = "../../deploy/kubernetes/elastic-agent-kustomize/default/elastic-agent-standalone" - agentK8SHelm = "../../deploy/helm/elastic-agent" + agentK8SKustomize = "../../../deploy/kubernetes/elastic-agent-kustomize/default/elastic-agent-standalone" + agentK8SHelm = "../../../deploy/helm/elastic-agent" ) -var noSpecialCharsRegexp = regexp.MustCompile("[^a-zA-Z0-9]+") - func TestKubernetesAgentStandaloneKustomize(t *testing.T) { info := define.Require(t, define.Requirements{ Stack: &define.Stack{}, @@ -789,111 +768,6 @@ func TestKubernetesAgentHelm(t *testing.T) { } } -// k8sCheckAgentStatus checks that the agent reports healthy. -func k8sCheckAgentStatus(ctx context.Context, client klient.Client, stdout *bytes.Buffer, stderr *bytes.Buffer, - namespace string, agentPodName string, containerName string, componentPresence map[string]bool, -) error { - command := []string{"elastic-agent", "status", "--output=json"} - stopCheck := errors.New("stop check") - - // we will wait maximum 120 seconds for the agent to report healthy - ctx, cancel := context.WithTimeout(ctx, 2*time.Minute) - defer cancel() - - checkStatus := func() error { - pod := corev1.Pod{} - if err := client.Resources(namespace).Get(ctx, agentPodName, namespace, &pod); err != nil { - return err - } - - for _, container := range pod.Status.ContainerStatuses { - if container.Name != containerName { - continue - } - - if restarts := container.RestartCount; restarts != 0 { - return fmt.Errorf("container %q of pod %q has restarted %d times: %w", containerName, agentPodName, restarts, stopCheck) - } - } - - status := atesting.AgentStatusOutput{} // clear status output - stdout.Reset() - stderr.Reset() - if err := client.Resources().ExecInPod(ctx, namespace, agentPodName, containerName, command, stdout, stderr); err != nil { - return err - } - - if err := json.Unmarshal(stdout.Bytes(), &status); err != nil { - return err - } - - var err error - // validate that the components defined are also healthy if they should exist - for component, shouldBePresent := range componentPresence { - compState, ok := getAgentComponentState(status, component) - if shouldBePresent { - if !ok { - // doesn't exist - err = errors.Join(err, fmt.Errorf("required component %s not found", component)) - } else if compState != int(aclient.Healthy) { - // not healthy - err = errors.Join(err, fmt.Errorf("required component %s is not healthy", component)) - } - } else if ok { - // should not be present - err = errors.Join(err, fmt.Errorf("component %s should not be present", component)) - } - } - return err - } - for { - err := checkStatus() - if err == nil { - return nil - } else if errors.Is(err, stopCheck) { - return err - } - if ctx.Err() != nil { - // timeout waiting for agent to become healthy - return errors.Join(err, errors.New("timeout waiting for agent to become healthy")) - } - time.Sleep(100 * time.Millisecond) - } -} - -// k8sGetAgentID returns the agent ID for the given agent pod -func k8sGetAgentID(ctx context.Context, client klient.Client, stdout *bytes.Buffer, stderr *bytes.Buffer, - namespace string, agentPodName string, containerName string, -) (string, error) { - command := []string{"elastic-agent", "status", "--output=json"} - - status := atesting.AgentStatusOutput{} // clear status output - stdout.Reset() - stderr.Reset() - ctx, cancel := context.WithTimeout(ctx, 2*time.Minute) - err := client.Resources().ExecInPod(ctx, namespace, agentPodName, containerName, command, stdout, stderr) - cancel() - if err != nil { - return "", err - } - - if err := json.Unmarshal(stdout.Bytes(), &status); err != nil { - return "", err - } - - return status.Info.ID, nil -} - -// getAgentComponentState returns the component state for the given component name and a bool indicating if it exists. -func getAgentComponentState(status atesting.AgentStatusOutput, componentName string) (int, bool) { - for _, comp := range status.Components { - if comp.Name == componentName { - return comp.State, true - } - } - return -1, false -} - // k8sDumpPods creates an archive that contains logs of all pods in the given namespace and kube-system to the given target directory func k8sDumpPods(t *testing.T, ctx context.Context, client klient.Client, testName string, namespace string, targetDir string, testStartTime time.Time) { // Create the tar file @@ -1040,376 +914,6 @@ func k8sDumpPods(t *testing.T, ctx context.Context, client klient.Client, testNa } } -// k8sKustomizeAdjustObjects adjusts the namespace of given k8s objects and calls the given callbacks for the containers and the pod -func k8sKustomizeAdjustObjects(objects []k8s.Object, namespace string, containerName string, cbContainer func(container *corev1.Container), cbPod func(pod *corev1.PodSpec)) { - // Update the agent image and image pull policy as it is already loaded in kind cluster - for _, obj := range objects { - obj.SetNamespace(namespace) - var podSpec *corev1.PodSpec - switch objWithType := obj.(type) { - case *appsv1.DaemonSet: - podSpec = &objWithType.Spec.Template.Spec - case *appsv1.StatefulSet: - podSpec = &objWithType.Spec.Template.Spec - case *appsv1.Deployment: - podSpec = &objWithType.Spec.Template.Spec - case *appsv1.ReplicaSet: - podSpec = &objWithType.Spec.Template.Spec - case *batchv1.Job: - podSpec = &objWithType.Spec.Template.Spec - case *batchv1.CronJob: - podSpec = &objWithType.Spec.JobTemplate.Spec.Template.Spec - default: - continue - } - - if cbPod != nil { - cbPod(podSpec) - } - - for idx, container := range podSpec.Containers { - if container.Name != containerName { - continue - } - if cbContainer != nil { - cbContainer(&podSpec.Containers[idx]) - } - } - } -} - -// k8sRenderKustomize renders the given kustomize directory to YAML -func k8sRenderKustomize(kustomizePath string) ([]byte, error) { - // Create a file system pointing to the kustomize directory - fSys := filesys.MakeFsOnDisk() - - // Create a kustomizer - k := krusty.MakeKustomizer(krusty.MakeDefaultOptions()) - - // Run the kustomizer on the given directory - resMap, err := k.Run(fSys, kustomizePath) - if err != nil { - return nil, err - } - - // Convert the result to YAML - renderedManifest, err := resMap.AsYaml() - if err != nil { - return nil, err - } - - return renderedManifest, nil -} - -// generateESAPIKey generates an API key for the given Elasticsearch. -func generateESAPIKey(esClient *elasticsearch.Client, keyName string) (estools.APIKeyResponse, error) { - return estools.CreateAPIKey(context.Background(), esClient, estools.APIKeyRequest{Name: keyName, Expiration: "1d"}) -} - -// k8sDeleteOpts contains options for deleting k8s objects -type k8sDeleteOpts struct { - // wait for the objects to be deleted - wait bool - // timeout for waiting for the objects to be deleted - waitTimeout time.Duration -} - -// k8sDeleteObjects deletes the given k8s objects and waits for them to be deleted if wait is true. -func k8sDeleteObjects(ctx context.Context, client klient.Client, opts k8sDeleteOpts, objects ...k8s.Object) error { - if len(objects) == 0 { - return nil - } - - // Delete the objects - for _, obj := range objects { - _ = client.Resources(obj.GetNamespace()).Delete(ctx, obj) - } - - if !opts.wait { - // no need to wait - return nil - } - - if opts.waitTimeout == 0 { - // default to 20 seconds - opts.waitTimeout = 20 * time.Second - } - - timeoutCtx, timeoutCancel := context.WithTimeout(ctx, opts.waitTimeout) - defer timeoutCancel() - for _, obj := range objects { - for { - if timeoutCtx.Err() != nil { - return errors.New("timeout waiting for k8s objects to be deleted") - } - - err := client.Resources().Get(timeoutCtx, obj.GetName(), obj.GetNamespace(), obj) - if err != nil { - // object has been deleted - break - } - - time.Sleep(100 * time.Millisecond) - } - } - - return nil -} - -// int64Ptr returns a pointer to the given int64 -func int64Ptr(val int64) *int64 { - valPtr := val - return &valPtr -} - -// k8sCreateOpts contains options for k8sCreateObjects -type k8sCreateOpts struct { - // namespace is the namespace to create the objects in - namespace string - // wait specifies whether to wait for the objects to be ready - wait bool - // waitTimeout is the timeout for waiting for the objects to be ready if wait is true - waitTimeout time.Duration -} - -// k8sCreateObjects creates k8s objects and waits for them to be ready if specified in opts. -// Note that if opts.namespace is not empty, all objects will be created and updated to reference -// the given namespace. -func k8sCreateObjects(ctx context.Context, client klient.Client, opts k8sCreateOpts, objects ...k8s.Object) error { - // Create the objects - for _, obj := range objects { - if opts.namespace != "" { - // update the namespace - obj.SetNamespace(opts.namespace) - - // special case for ClusterRoleBinding and RoleBinding - // update the subjects to reference the given namespace - switch objWithType := obj.(type) { - case *rbacv1.ClusterRoleBinding: - for idx := range objWithType.Subjects { - objWithType.Subjects[idx].Namespace = opts.namespace - } - case *rbacv1.RoleBinding: - for idx := range objWithType.Subjects { - objWithType.Subjects[idx].Namespace = opts.namespace - } - } - } - if err := client.Resources().Create(ctx, obj); err != nil { - return fmt.Errorf("failed to create object %s: %w", obj.GetName(), err) - } - } - - if !opts.wait { - // no need to wait - return nil - } - - if opts.waitTimeout == 0 { - // default to 120 seconds - opts.waitTimeout = 120 * time.Second - } - - return k8sWaitForReady(ctx, client, opts.waitTimeout, objects...) -} - -// k8sWaitForReady waits for the given k8s objects to be ready -func k8sWaitForReady(ctx context.Context, client klient.Client, waitDuration time.Duration, objects ...k8s.Object) error { - // use ready checker from helm kube - clientSet, err := kubernetes.NewForConfig(client.RESTConfig()) - if err != nil { - return fmt.Errorf("error creating clientset: %w", err) - } - readyChecker := helmKube.NewReadyChecker(clientSet, func(s string, i ...interface{}) {}) - - ctxTimeout, cancel := context.WithTimeout(ctx, waitDuration) - defer cancel() - - waitFn := func(ri *cliResource.Info) error { - // here we wait for the k8s object (e.g. deployment, daemonset, pod) to be ready - for { - ready, readyErr := readyChecker.IsReady(ctxTimeout, ri) - if ready { - // k8s object is ready - return nil - } - // k8s object is not ready yet - readyErr = errors.Join(fmt.Errorf("k8s object %s is not ready", ri.Name), readyErr) - - if ctxTimeout.Err() != nil { - // timeout - return errors.Join(fmt.Errorf("timeout waiting for k8s object %s to be ready", ri.Name), readyErr) - } - time.Sleep(100 * time.Millisecond) - } - } - - for _, o := range objects { - // convert k8s.Object to resource.Info for ready checker - runtimeObj, ok := o.(runtime.Object) - if !ok { - return fmt.Errorf("unable to convert k8s.Object %s to runtime.Object", o.GetName()) - } - - if err := waitFn(&cliResource.Info{ - Object: runtimeObj, - Name: o.GetName(), - Namespace: o.GetNamespace(), - }); err != nil { - return err - } - // extract pod label selector for all k8s objects that have underlying pods - oPodsLabelSelector, err := helmKube.SelectorsForObject(runtimeObj) - if err != nil { - // k8s object does not have pods - continue - } - - podList, err := clientSet.CoreV1().Pods(o.GetNamespace()).List(ctx, metav1.ListOptions{ - LabelSelector: oPodsLabelSelector.String(), - }) - if err != nil { - return fmt.Errorf("error listing pods: %w", err) - } - - // here we wait for the all pods to be ready - for _, pod := range podList.Items { - if err := waitFn(&cliResource.Info{ - Object: &pod, - Name: pod.Name, - Namespace: pod.Namespace, - }); err != nil { - return err - } - } - } - - return nil -} - -// k8sContext contains all the information needed to run a k8s test -type k8sContext struct { - client klient.Client - clientSet *kubernetes.Clientset - // logsBasePath is the path that will be used to store the pod logs in a case a test fails - logsBasePath string - // agentImage is the full image of elastic-agent to use in the test - agentImage string - // agentImageRepo is the repository of elastic-agent image to use in the test - agentImageRepo string - // agentImageTag is the tag of elastic-agent image to use in the test - agentImageTag string - // esHost is the host of the elasticsearch to use in the test - esHost string - // esAPIKey is the API key of the elasticsearch to use in the test - esAPIKey string - // esEncodedAPIKey is the encoded API key of the elasticsearch to use in the test - esEncodedAPIKey string - // enrollParams contains the information needed to enroll an agent with Fleet in the test - enrollParams *fleettools.EnrollParams - // createdAt is the time when the k8sContext was created - createdAt time.Time -} - -// getNamespace returns a unique namespace on every call. -// If K8S_TESTS_NAMESPACE is set, then its value is returned, -// otherwise a unique namespace is generated. -func (k k8sContext) getNamespace(t *testing.T) string { - if ns := os.Getenv("K8S_TESTS_NAMESPACE"); ns != "" { - return ns - } - - nsUUID, err := uuid.NewV4() - if err != nil { - t.Fatalf("error generating namespace UUID: %v", err) - } - hasher := sha256.New() - hasher.Write([]byte(nsUUID.String())) - testNamespace := strings.ToLower(base64.URLEncoding.EncodeToString(hasher.Sum(nil))) - return noSpecialCharsRegexp.ReplaceAllString(testNamespace, "") -} - -func k8sSchedulableNodeCount(ctx context.Context, kCtx k8sContext) (int, error) { - nodeList := corev1.NodeList{} - err := kCtx.client.Resources().List(ctx, &nodeList) - if err != nil { - return 0, err - } - - totalSchedulableNodes := 0 - - for _, node := range nodeList.Items { - if node.Spec.Unschedulable { - continue - } - - hasNoScheduleTaint := false - for _, taint := range node.Spec.Taints { - if taint.Effect == corev1.TaintEffectNoSchedule { - hasNoScheduleTaint = true - break - } - } - - if hasNoScheduleTaint { - continue - } - - totalSchedulableNodes++ - } - - return totalSchedulableNodes, err -} - -// k8sGetContext performs all the necessary checks to get a k8sContext for the current test -func k8sGetContext(t *testing.T, info *define.Info) k8sContext { - agentImage := os.Getenv("AGENT_IMAGE") - require.NotEmpty(t, agentImage, "AGENT_IMAGE must be set") - - agentImageParts := strings.SplitN(agentImage, ":", 2) - require.Len(t, agentImageParts, 2, "AGENT_IMAGE must be in the form ':'") - agentImageRepo := agentImageParts[0] - agentImageTag := agentImageParts[1] - - client, err := info.KubeClient() - require.NoError(t, err) - require.NotNil(t, client) - - clientSet, err := kubernetes.NewForConfig(client.RESTConfig()) - require.NoError(t, err) - require.NotNil(t, clientSet) - - testLogsBasePath := os.Getenv("K8S_TESTS_POD_LOGS_BASE") - require.NotEmpty(t, testLogsBasePath, "K8S_TESTS_POD_LOGS_BASE must be set") - - err = os.MkdirAll(testLogsBasePath, 0o755) - require.NoError(t, err, "failed to create test logs directory") - - esHost, err := getESHost() - require.NoError(t, err, "cannot parse ELASTICSEARCH_HOST") - - esAPIKey, err := generateESAPIKey(info.ESClient, info.Namespace) - require.NoError(t, err, "failed to generate ES API key") - require.NotEmpty(t, esAPIKey, "failed to generate ES API key") - - enrollParams, err := fleettools.NewEnrollParams(context.Background(), info.KibanaClient) - require.NoError(t, err, "failed to create fleet enroll params") - - return k8sContext{ - client: client, - clientSet: clientSet, - agentImage: agentImage, - agentImageRepo: agentImageRepo, - agentImageTag: agentImageTag, - logsBasePath: testLogsBasePath, - esHost: esHost, - esAPIKey: esAPIKey.APIKey, - esEncodedAPIKey: esAPIKey.Encoded, - enrollParams: enrollParams, - createdAt: time.Now(), - } -} - // k8sTestStep is a function that performs a single step in a k8s integration test type k8sTestStep func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) @@ -1784,34 +1288,3 @@ func k8sStepCheckRestrictUpgrade(agentPodLabelSelector string, expectedPodNumber } } } - -// GetAgentResponse extends kibana.GetAgentResponse and includes the EnrolledAt field -type GetAgentResponse struct { - kibana.GetAgentResponse `json:",inline"` - EnrolledAt time.Time `json:"enrolled_at"` -} - -// kibanaGetAgent essentially re-implements kibana.GetAgent to extract also GetAgentResponse.EnrolledAt -func kibanaGetAgent(ctx context.Context, kc *kibana.Client, id string) (*GetAgentResponse, error) { - apiURL := fmt.Sprintf("/api/fleet/agents/%s", id) - r, err := kc.Connection.SendWithContext(ctx, http.MethodGet, apiURL, nil, nil, nil) - if err != nil { - return nil, fmt.Errorf("error calling get agent API: %w", err) - } - defer r.Body.Close() - var agentResp struct { - Item GetAgentResponse `json:"item"` - } - b, err := io.ReadAll(r.Body) - if err != nil { - return nil, fmt.Errorf("reading response body: %w", err) - } - if r.StatusCode != http.StatusOK { - return nil, fmt.Errorf("error calling get agent API: %s", string(b)) - } - err = json.Unmarshal(b, &agentResp) - if err != nil { - return nil, fmt.Errorf("unmarshalling response json: %w", err) - } - return &agentResp.Item, nil -} diff --git a/testing/integration/k8s/main_test.go b/testing/integration/k8s/main_test.go new file mode 100644 index 00000000000..9fc1e04fd71 --- /dev/null +++ b/testing/integration/k8s/main_test.go @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build integration + +package k8s + +import ( + "flag" + "log" + "os" + "testing" + + "github.com/elastic/elastic-agent/pkg/testing/define" +) + +var flagSet = flag.CommandLine + +func init() { + define.RegisterFlags("integration.", flagSet) +} + +func TestMain(m *testing.M) { + define.SetKubernetesSupported() + flag.Parse() + + if define.AutoDiscover { + define.InitAutodiscovery(nil) + } + + runExitCode := m.Run() + + if define.AutoDiscover { + discoveredTests, err := define.DumpAutodiscoveryYAML() + if err != nil { + log.Fatalf("Error dumping autodiscovery YAML: %v\n", err) + } + + err = os.WriteFile(define.AutoDiscoveryOutput, discoveredTests, 0644) + if err != nil { + log.Fatalf("Error writing autodiscovery data in %q: %v\n", define.AutoDiscoveryOutput, err) + } + } + + os.Exit(runExitCode) +} diff --git a/testing/integration/otel_helm_test.go b/testing/integration/k8s/otel_helm_test.go similarity index 90% rename from testing/integration/otel_helm_test.go rename to testing/integration/k8s/otel_helm_test.go index 9c75ddab57a..57dd740e465 100644 --- a/testing/integration/otel_helm_test.go +++ b/testing/integration/k8s/otel_helm_test.go @@ -4,7 +4,7 @@ //go:build integration -package integration +package k8s import ( "bufio" @@ -72,7 +72,7 @@ func TestOtelKubeStackHelm(t *testing.T) { k8sStepCreateNamespace(), k8sStepHelmDeployWithValueOptions(chartLocation, "kube-stack-otel", values.Options{ - ValueFiles: []string{"../../deploy/helm/edot-collector/kube-stack/values.yaml"}, + ValueFiles: []string{"../../../deploy/helm/edot-collector/kube-stack/values.yaml"}, Values: []string{ fmt.Sprintf("defaultCRConfig.image.repository=%s", kCtx.agentImageRepo), fmt.Sprintf("defaultCRConfig.image.tag=%s", kCtx.agentImageTag), @@ -115,7 +115,7 @@ func TestOtelKubeStackHelm(t *testing.T) { k8sStepCreateNamespace(), k8sStepHelmDeployWithValueOptions(chartLocation, "kube-stack-otel", values.Options{ - ValueFiles: []string{"../../deploy/helm/edot-collector/kube-stack/managed_otlp/values.yaml"}, + ValueFiles: []string{"../../../deploy/helm/edot-collector/kube-stack/managed_otlp/values.yaml"}, Values: []string{fmt.Sprintf("defaultCRConfig.image.repository=%s", kCtx.agentImageRepo), fmt.Sprintf("defaultCRConfig.image.tag=%s", kCtx.agentImageTag)}, // override secrets reference with env variables @@ -225,35 +225,3 @@ func k8sStepCheckDatastreamsHits(info *define.Info, dsType, dataset, datastreamN }, 5*time.Minute, 10*time.Second, fmt.Sprintf("at least one document should be available for %s datastream", fmt.Sprintf("%s-%s-%s", dsType, dataset, datastreamNamespace))) } } - -func queryK8sNamespaceDataStream(dsType, dataset, datastreamNamespace, k8snamespace string) map[string]any { - return map[string]any{ - "_source": []string{"message"}, - "query": map[string]any{ - "bool": map[string]any{ - "filter": []any{ - map[string]any{ - "term": map[string]any{ - "data_stream.dataset": dataset, - }, - }, - map[string]any{ - "term": map[string]any{ - "data_stream.namespace": datastreamNamespace, - }, - }, - map[string]any{ - "term": map[string]any{ - "data_stream.type": dsType, - }, - }, - map[string]any{ - "term": map[string]any{ - "resource.attributes.k8s.namespace.name": k8snamespace, - }, - }, - }, - }, - }, - } -} diff --git a/testing/integration/testdata/connectors.agent.yml b/testing/integration/k8s/testdata/connectors.agent.yml similarity index 100% rename from testing/integration/testdata/connectors.agent.yml rename to testing/integration/k8s/testdata/connectors.agent.yml diff --git a/testing/integration/testdata/java_app.yaml b/testing/integration/k8s/testdata/java_app.yaml similarity index 100% rename from testing/integration/testdata/java_app.yaml rename to testing/integration/k8s/testdata/java_app.yaml diff --git a/testing/integration/testdata/journald-input.yml b/testing/integration/k8s/testdata/journald-input.yml similarity index 100% rename from testing/integration/testdata/journald-input.yml rename to testing/integration/k8s/testdata/journald-input.yml diff --git a/testing/integration/testdata/k8s.hints.redis.yaml b/testing/integration/k8s/testdata/k8s.hints.redis.yaml similarity index 100% rename from testing/integration/testdata/k8s.hints.redis.yaml rename to testing/integration/k8s/testdata/k8s.hints.redis.yaml diff --git a/testing/integration/logs_ingestion.go b/testing/integration/logs_ingestion.go index 0810a68f904..dffc3f373e9 100644 --- a/testing/integration/logs_ingestion.go +++ b/testing/integration/logs_ingestion.go @@ -169,7 +169,7 @@ func testMonitoringLogsAreShipped( ) { // Stage 1: Make sure metricbeat logs are populated t.Log("Making sure metricbeat logs are populated") - docs := findESDocs(t, func() (estools.Documents, error) { + docs := FindESDocs(t, func() (estools.Documents, error) { return estools.GetLogsForDataset(ctx, info.ESClient, "elastic_agent.metricbeat") }) t.Logf("metricbeat: Got %d documents", len(docs.Hits.Hits)) @@ -230,7 +230,7 @@ func testMonitoringLogsAreShipped( // Stage 3: Make sure we have message confirming central management is running t.Log("Making sure we have message confirming central management is running") - docs = findESDocs(t, func() (estools.Documents, error) { + docs = FindESDocs(t, func() (estools.Documents, error) { return estools.FindMatchingLogLines(ctx, info.ESClient, info.Namespace, "Parsed configuration and determined agent is managed by Fleet") }) @@ -241,7 +241,7 @@ func testMonitoringLogsAreShipped( // this field is not mapped. There is an issue for that: // https://github.com/elastic/integrations/issues/6545 // TODO: use runtime fields while the above issue is not resolved. - docs = findESDocs(t, func() (estools.Documents, error) { + docs = FindESDocs(t, func() (estools.Documents, error) { return estools.GetLogsForAgentID(ctx, info.ESClient, agentID) }) require.NoError(t, err, "could not get logs from Agent ID: %q, err: %s", @@ -291,28 +291,6 @@ func queryESDocs(t *testing.T, findFn func() (estools.Documents, error)) estools return docs } -// findESDocs runs `findFn` until at least one document is returned and there is no error -func findESDocs(t *testing.T, findFn func() (estools.Documents, error)) estools.Documents { - var docs estools.Documents - require.Eventually( - t, - func() bool { - var err error - docs, err = findFn() - if err != nil { - t.Logf("got an error querying ES, retrying. Error: %s", err) - return false - } - - return docs.Hits.Total.Value != 0 - }, - 3*time.Minute, - 15*time.Second, - ) - - return docs -} - func testFlattenedDatastreamFleetPolicy( t *testing.T, ctx context.Context, diff --git a/testing/integration/otel_test.go b/testing/integration/otel_test.go index 498e004bcc7..83811152901 100644 --- a/testing/integration/otel_test.go +++ b/testing/integration/otel_test.go @@ -11,7 +11,6 @@ import ( "context" "errors" "fmt" - "net/url" "os" "path/filepath" "strings" @@ -441,7 +440,7 @@ func TestOtelLogsIngestion(t *testing.T) { tempDir := t.TempDir() inputFilePath := filepath.Join(tempDir, "input.log") - esHost, err := getESHost() + esHost, err := GetESHost() require.NoError(t, err, "failed to get ES host") require.True(t, len(esHost) > 0) @@ -560,7 +559,7 @@ func TestOtelAPMIngestion(t *testing.T) { require.NoError(t, err) // start apm default config just configure ES output - esHost, err := getESHost() + esHost, err := GetESHost() require.NoError(t, err, "failed to get ES host") require.True(t, len(esHost) > 0) @@ -676,18 +675,6 @@ func TestOtelAPMIngestion(t *testing.T) { apmFixtureWg.Wait() } -func getESHost() (string, error) { - fixedESHost := os.Getenv("ELASTICSEARCH_HOST") - parsedES, err := url.Parse(fixedESHost) - if err != nil { - return "", err - } - if parsedES.Port() == "" { - fixedESHost = fmt.Sprintf("%s:443", fixedESHost) - } - return fixedESHost, nil -} - func createESApiKey(esClient *elasticsearch.Client) (estools.APIKeyResponse, error) { return estools.CreateAPIKey(context.Background(), esClient, estools.APIKeyRequest{Name: "test-api-key", Expiration: "1d"}) } @@ -868,7 +855,7 @@ func TestOtelFBReceiverE2E(t *testing.T) { Index string MinItems int } - esEndpoint, err := getESHost() + esEndpoint, err := GetESHost() require.NoError(t, err, "error getting elasticsearch endpoint") esApiKey, err := createESApiKey(info.ESClient) require.NoError(t, err, "error creating API key") @@ -994,7 +981,7 @@ func TestOtelMBReceiverE2E(t *testing.T) { Index string MinItems int } - esEndpoint, err := getESHost() + esEndpoint, err := GetESHost() require.NoError(t, err, "error getting elasticsearch endpoint") esApiKey, err := createESApiKey(info.ESClient) require.NoError(t, err, "error creating API key")