@@ -19,6 +19,7 @@ package integration
19
19
import (
20
20
"bufio"
21
21
"context"
22
+ "encoding/json"
22
23
"fmt"
23
24
"io"
24
25
"os"
@@ -60,6 +61,8 @@ func runUpgradeTestCase(
60
61
setupUpgradeVerifyCase func (t * testing.T , criRuntimeService cri.RuntimeService , criImageService cri.ImageManagerService ) upgradeVerifyCaseFunc ,
61
62
) func (t * testing.T ) {
62
63
return func (t * testing.T ) {
64
+ // NOTE: Using t.TempDir() here is to ensure there are no leaky
65
+ // mountpoint after test completed.
63
66
workDir := t .TempDir ()
64
67
65
68
t .Log ("Install config for previous release" )
@@ -132,7 +135,7 @@ func shouldRecoverAllThePodsAfterUpgrade(t *testing.T, rSvc cri.RuntimeService,
132
135
)
133
136
134
137
secondPodCtx := newPodTCtx (t , rSvc , "stopped-pod" , "sandbox" )
135
- secondPodCtx .stop ()
138
+ secondPodCtx .stop (false )
136
139
137
140
return func (t * testing.T , rSvc cri.RuntimeService , _ cri.ImageManagerService ) {
138
141
t .Log ("List Pods" )
@@ -164,7 +167,6 @@ func shouldRecoverAllThePodsAfterUpgrade(t *testing.T, rSvc cri.RuntimeService,
164
167
default :
165
168
t .Errorf ("unexpected container %s in %s" , cntr .Id , pod .Id )
166
169
}
167
-
168
170
}
169
171
170
172
case secondPodCtx .id :
@@ -259,6 +261,9 @@ func shouldManipulateContainersInPodAfterUpgrade(t *testing.T, rSvc cri.RuntimeS
259
261
WithCommand ("sleep" , "1d" ))
260
262
261
263
return func (t * testing.T , rSvc cri.RuntimeService , _ cri.ImageManagerService ) {
264
+ // TODO(fuweid): make svc re-connect to new socket
265
+ podCtx .rSvc = rSvc
266
+
262
267
t .Log ("Manipulating containers in the previous pod" )
263
268
264
269
// For the running container, we get status and stats of it,
@@ -285,6 +290,11 @@ func shouldManipulateContainersInPodAfterUpgrade(t *testing.T, rSvc cri.RuntimeS
285
290
require .NoError (t , rSvc .StopContainer (cntr1 , 0 ))
286
291
checkContainerState (t , rSvc , cntr1 , criruntime .ContainerState_CONTAINER_EXITED )
287
292
293
+ cntr1DataDir := podCtx .containerDataDir (cntr1 )
294
+ t .Logf ("Container %s's data dir %s should be remained until RemoveContainer" , cntr1 , cntr1DataDir )
295
+ _ , err = os .Stat (cntr1DataDir )
296
+ require .NoError (t , err )
297
+
288
298
t .Logf ("Starting created container %s" , cntr2 )
289
299
checkContainerState (t , rSvc , cntr2 , criruntime .ContainerState_CONTAINER_CREATED )
290
300
@@ -297,14 +307,34 @@ func shouldManipulateContainersInPodAfterUpgrade(t *testing.T, rSvc cri.RuntimeS
297
307
298
308
t .Logf ("Removing exited container %s" , cntr3 )
299
309
checkContainerState (t , rSvc , cntr3 , criruntime .ContainerState_CONTAINER_EXITED )
310
+
311
+ cntr3DataDir := podCtx .containerDataDir (cntr3 )
312
+ _ , err = os .Stat (cntr3DataDir )
313
+ require .NoError (t , err )
314
+
300
315
require .NoError (t , rSvc .RemoveContainer (cntr3 ))
301
316
317
+ t .Logf ("Container %s's data dir %s should be deleted after RemoveContainer" , cntr3 , cntr3DataDir )
318
+ _ , err = os .Stat (cntr3DataDir )
319
+ require .True (t , os .IsNotExist (err ))
320
+
302
321
// Create a new container in the previous pod, start, stop, and remove it
303
- // TODO(fuweid): make svc re-connect to new socket
304
- podCtx .rSvc = rSvc
305
322
podCtx .createContainer ("runinpreviouspod" , busyboxImage ,
306
323
criruntime .ContainerState_CONTAINER_EXITED ,
307
324
WithCommand ("sleep" , "1d" ))
325
+
326
+ podCtx .stop (true )
327
+ podDataDir := podCtx .dataDir ()
328
+
329
+ t .Logf ("Pod %s's data dir %s should be deleted" , podCtx .id , podDataDir )
330
+ _ , err = os .Stat (podDataDir )
331
+ require .True (t , os .IsNotExist (err ))
332
+
333
+ cntrDataDir := filepath .Dir (cntr3DataDir )
334
+ t .Logf ("Containers data dir %s should be empty" , cntrDataDir )
335
+ ents , err := os .ReadDir (cntrDataDir )
336
+ require .NoError (t , err )
337
+ require .Len (t , ents , 0 , cntrDataDir )
308
338
}
309
339
}
310
340
@@ -378,9 +408,51 @@ func (pCtx *podTCtx) createContainer(name, imageRef string, wantedState crirunti
378
408
return cnID
379
409
}
380
410
411
+ // containerDataDir returns container metadata dir maintained by CRI plugin.
412
+ func (pCtx * podTCtx ) containerDataDir (cntrID string ) string {
413
+ t := pCtx .t
414
+
415
+ // check if container exists
416
+ status , err := pCtx .rSvc .ContainerStatus (cntrID )
417
+ require .NoError (t , err )
418
+
419
+ cfg := criRuntimeInfo (t , pCtx .rSvc )
420
+
421
+ rootDir := cfg ["rootDir" ].(string )
422
+ return filepath .Join (rootDir , "containers" , status .Id )
423
+ }
424
+
425
+ // dataDir returns pod metadata dir maintained by CRI plugin.
426
+ func (pCtx * podTCtx ) dataDir () string {
427
+ t := pCtx .t
428
+
429
+ cfg := criRuntimeInfo (t , pCtx .rSvc )
430
+ rootDir := cfg ["rootDir" ].(string )
431
+ return filepath .Join (rootDir , "sandboxes" , pCtx .id )
432
+ }
433
+
381
434
// stop stops that pod.
382
- func (pCtx * podTCtx ) stop () {
383
- require .NoError (pCtx .t , pCtx .rSvc .StopPodSandbox (pCtx .id ))
435
+ func (pCtx * podTCtx ) stop (remove bool ) {
436
+ t := pCtx .t
437
+
438
+ t .Logf ("Stopping pod %s" , pCtx .id )
439
+ require .NoError (t , pCtx .rSvc .StopPodSandbox (pCtx .id ))
440
+ if remove {
441
+ t .Logf ("Removing pod %s" , pCtx .id )
442
+ require .NoError (t , pCtx .rSvc .RemovePodSandbox (pCtx .id ))
443
+ }
444
+ }
445
+
446
+ // criRuntimeInfo dumps CRI config.
447
+ func criRuntimeInfo (t * testing.T , svc cri.RuntimeService ) map [string ]interface {} {
448
+ resp , err := svc .Status ()
449
+ require .NoError (t , err )
450
+
451
+ cfg := map [string ]interface {}{}
452
+ err = json .Unmarshal ([]byte (resp .GetInfo ()["config" ]), & cfg )
453
+ require .NoError (t , err )
454
+
455
+ return cfg
384
456
}
385
457
386
458
// checkContainerState checks container's state.
0 commit comments