@@ -16,8 +16,8 @@ import (
1616 "github.com/replicatedhq/troubleshoot/pkg/convert"
1717 "github.com/stretchr/testify/assert"
1818 "github.com/stretchr/testify/require"
19- v1 "k8s.io/api/core /v1"
20- "sigs. k8s.io/e2e-framework/klient/k8s/resources "
19+ appsv1 "k8s.io/api/apps /v1"
20+ metav1 " k8s.io/apimachinery/pkg/apis/meta/v1 "
2121 "sigs.k8s.io/e2e-framework/klient/wait"
2222 "sigs.k8s.io/e2e-framework/klient/wait/conditions"
2323 "sigs.k8s.io/e2e-framework/pkg/envconf"
@@ -48,6 +48,7 @@ func Test_GoldpingerCollector(t *testing.T) {
4848 feature := features .New ("Goldpinger collector and analyser" ).
4949 Setup (func (ctx context.Context , t * testing.T , c * envconf.Config ) context.Context {
5050 cluster := getClusterFromContext (t , ctx , ClusterName )
51+
5152 manager := helm .New (cluster .GetKubeconfig ())
5253 err := manager .RunInstall (
5354 helm .WithName (releaseName ),
@@ -57,22 +58,21 @@ func Test_GoldpingerCollector(t *testing.T) {
5758 helm .WithTimeout ("2m" ),
5859 )
5960 require .NoError (t , err )
61+
6062 client , err := c .NewClient ()
6163 require .NoError (t , err )
62- pods := & v1.PodList {}
6364
6465 // Lets wait for the goldpinger pods to be running
65- err = client .Resources ().WithNamespace (c .Namespace ()).List (ctx , pods ,
66- resources .WithLabelSelector ("app.kubernetes.io/name=goldpinger" ),
67- )
68- require .NoError (t , err )
69- require .Len (t , pods .Items , 1 )
70-
66+ ds := & appsv1.DaemonSet {ObjectMeta : metav1.ObjectMeta {Name : "goldpinger" , Namespace : c .Namespace ()}}
7167 err = wait .For (
72- conditions .New (client .Resources ()).PodRunning ( & pods . Items [ 0 ] ),
68+ conditions .New (client .Resources ()).DaemonSetReady ( ds ),
7369 wait .WithTimeout (time .Second * 30 ),
7470 )
7571 require .NoError (t , err )
72+
73+ // HACK: wait for goldpinger to do its thing
74+ time .Sleep (time .Second * 30 )
75+
7676 return ctx
7777 }).
7878 Assess ("collect and analyse goldpinger pings" , func (ctx context.Context , t * testing.T , c * envconf.Config ) context.Context {
@@ -108,7 +108,7 @@ func Test_GoldpingerCollector(t *testing.T) {
108108 // Check that we analysed collected goldpinger results.
109109 // We should expect a single analysis result for goldpinger.
110110 assert .Equal (t , 1 , len (analysisResults ))
111- assert .True (t , strings .HasPrefix (analysisResults [0 ].Name , "missing.ping.results.for .goldpinger." ))
111+ assert .True (t , strings .HasPrefix (analysisResults [0 ].Name , "pings.to .goldpinger." ))
112112 if t .Failed () {
113113 t .Logf ("Analysis results: %s\n " , analysisJSON )
114114 t .Logf ("Stdout: %s\n " , out .String ())
@@ -121,7 +121,8 @@ func Test_GoldpingerCollector(t *testing.T) {
121121 Teardown (func (ctx context.Context , t * testing.T , c * envconf.Config ) context.Context {
122122 cluster := getClusterFromContext (t , ctx , ClusterName )
123123 manager := helm .New (cluster .GetKubeconfig ())
124- manager .RunUninstall (helm .WithName (releaseName ), helm .WithNamespace (c .Namespace ()))
124+ err := manager .RunUninstall (helm .WithName (releaseName ), helm .WithNamespace (c .Namespace ()))
125+ require .NoError (t , err )
125126 return ctx
126127 }).
127128 Feature ()
0 commit comments