Skip to content

Commit b752a6f

Browse files
committed
fix: retry skopeo copy command
DockerHub being instable recently introduced new flakiness to our tests, which points towards our "skopeo copy" command being a bit too fragile. this commit introduces a retry mechnanism on that command, always retrying to pull the image. a more efficient solution would be inspecting the error causing the instability and retrying only on that, but this should be good enough as we'll waste only 2 seconds at worst.
1 parent c780d10 commit b752a6f

File tree

1 file changed

+20
-1
lines changed

1 file changed

+20
-1
lines changed

src/scanner/images/skopeo.ts

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
import * as sleep from 'sleep-promise';
2+
13
import * as processWrapper from '../../common/process';
24
import * as config from '../../common/config';
35
import * as credentials from './credentials';
@@ -41,7 +43,24 @@ export async function pull(
4143
args.push({body: prefixRespository(image, SkopeoRepositoryType.ImageRegistry), sanitise: false});
4244
args.push({body: prefixRespository(destination, SkopeoRepositoryType.DockerArchive), sanitise: false});
4345

44-
processWrapper.exec('skopeo', ...args);
46+
await pullWithRetry(args);
47+
}
48+
49+
async function pullWithRetry(args: Array<processWrapper.IProcessArgument>): Promise<void> {
50+
const MAX_ATTEMPTS = 10;
51+
const RETRY_INTERVAL_SEC = 0.2;
52+
53+
for (let attempt = 1; attempt <= MAX_ATTEMPTS; attempt++) {
54+
try {
55+
await processWrapper.exec('skopeo', ...args);
56+
return;
57+
} catch (err) {
58+
if (attempt + 1 > MAX_ATTEMPTS) {
59+
throw err;
60+
}
61+
await sleep(RETRY_INTERVAL_SEC * 1000);
62+
}
63+
}
4564
}
4665

4766
export function getCredentialParameters(credentials: string | undefined): Array<processWrapper.IProcessArgument> {

0 commit comments

Comments
 (0)