9
9
from concurrent .futures import ProcessPoolExecutor
10
10
from copy import copy
11
11
from queue import Queue
12
- from typing import Dict , List , Optional , Tuple , Union
12
+ from typing import Dict , List , Optional , Tuple
13
13
14
14
import requests
15
15
import semver
16
16
from opentelemetry import trace
17
17
from packaging .version import Version
18
18
19
19
from lib .base_logger import logger
20
- from scripts .evergreen .release .agent_matrix import (
21
- get_supported_operator_versions ,
22
- )
23
20
from scripts .evergreen .release .images_signing import (
24
21
sign_image ,
25
22
verify_signature ,
26
23
)
27
24
from scripts .release .build .image_build_configuration import ImageBuildConfiguration
28
-
29
25
from .build_images import process_image
30
26
from .optimized_operator_build import build_operator_image_fast
31
27
32
28
TRACER = trace .get_tracer ("evergreen-agent" )
33
- DEFAULT_NAMESPACE = "default"
34
-
35
-
36
- def make_list_of_str (value : Union [None , str , List [str ]]) -> List [str ]:
37
- if value is None :
38
- return []
39
-
40
- if isinstance (value , str ):
41
- return [e .strip () for e in value .split ("," )]
42
-
43
- return value
44
29
45
30
46
31
def get_tools_distro (tools_version : str ) -> Dict [str , str ]:
@@ -55,11 +40,6 @@ def is_running_in_evg_pipeline():
55
40
return os .getenv ("RUNNING_IN_EVG" , "" ) == "true"
56
41
57
42
58
- def is_running_in_patch ():
59
- is_patch = os .environ .get ("is_patch" )
60
- return is_patch is not None and is_patch .lower () == "true"
61
-
62
-
63
43
def load_release_file () -> Dict :
64
44
with open ("release.json" ) as release :
65
45
return json .load (release )
@@ -190,14 +170,6 @@ def build_database_image(build_configuration: ImageBuildConfiguration):
190
170
)
191
171
192
172
193
- def should_skip_arm64 ():
194
- """
195
- Determines if arm64 builds should be skipped based on environment.
196
- Returns True if running in Evergreen pipeline as a patch.
197
- """
198
- return is_running_in_evg_pipeline () and is_running_in_patch ()
199
-
200
-
201
173
@TRACER .start_as_current_span ("sign_image_in_repositories" )
202
174
def sign_image_in_repositories (args : Dict [str , str ], arch : str = None ):
203
175
span = trace .get_current_span ()
@@ -289,7 +261,6 @@ def build_image_generic(
289
261
dockerfile_path : str ,
290
262
build_configuration : ImageBuildConfiguration ,
291
263
extra_args : dict | None = None ,
292
- multi_arch_args_list : list [dict ] | None = None ,
293
264
):
294
265
"""
295
266
Build one or more platform-specific images, then (optionally)
@@ -298,24 +269,20 @@ def build_image_generic(
298
269
299
270
registry = build_configuration .registry
300
271
image_name = build_configuration .image_name ()
301
- args_list = multi_arch_args_list or [extra_args or {}]
302
- version = args_list [0 ].get ("version" , "" )
303
- platforms = [args .get ("architecture" ) for args in args_list ]
304
-
305
- for base_args in args_list :
306
- # merge in the registry without mutating caller’s dict
307
- build_args = {** base_args , "quay_registry" : registry }
308
- logger .debug (f"Build args: { build_args } " )
309
-
310
- # TODO: why are we iteration over platforms here? this should be multi-arch build
311
- for arch in platforms :
312
- logger .debug (f"Building { image_name } for arch={ arch } " )
313
- logger .debug (f"build image generic - registry={ registry } " )
314
- pipeline_process_image (
315
- dockerfile_path = dockerfile_path ,
316
- build_configuration = build_configuration ,
317
- dockerfile_args = build_args ,
318
- )
272
+ args_list = extra_args or {}
273
+ version = args_list .get ("version" , "" )
274
+
275
+ # merge in the registry without mutating caller’s dict
276
+ build_args = {** args_list , "quay_registry" : registry }
277
+ logger .debug (f"Build args: { build_args } " )
278
+
279
+ logger .debug (f"Building { image_name } for platforms={ build_configuration .platforms } " )
280
+ logger .debug (f"build image generic - registry={ registry } " )
281
+ pipeline_process_image (
282
+ dockerfile_path = dockerfile_path ,
283
+ build_configuration = build_configuration ,
284
+ dockerfile_args = build_args ,
285
+ )
319
286
320
287
if build_configuration .sign :
321
288
sign_image (registry , version )
@@ -352,26 +319,17 @@ def build_readiness_probe_image(build_configuration: ImageBuildConfiguration):
352
319
Builds image used for readiness probe.
353
320
"""
354
321
355
- version = build_configuration .version
356
322
golang_version = os .getenv ("GOLANG_VERSION" , "1.24" )
357
323
358
- # Extract architectures from platforms for build args
359
- architectures = [platform .split ("/" )[- 1 ] for platform in build_configuration .platforms ]
360
- multi_arch_args_list = []
361
-
362
- for arch in architectures :
363
- arch_args = {
364
- "version" : version ,
365
- "GOLANG_VERSION" : golang_version ,
366
- "architecture" : arch ,
367
- "TARGETARCH" : arch , # TODO: redundant ?
368
- }
369
- multi_arch_args_list .append (arch_args )
324
+ extra_args = {
325
+ "version" : build_configuration .version ,
326
+ "GOLANG_VERSION" : golang_version ,
327
+ }
370
328
371
329
build_image_generic (
372
330
dockerfile_path = "docker/mongodb-kubernetes-readinessprobe/Dockerfile" ,
373
331
build_configuration = build_configuration ,
374
- multi_arch_args_list = multi_arch_args_list ,
332
+ extra_args = extra_args ,
375
333
)
376
334
377
335
@@ -380,26 +338,17 @@ def build_upgrade_hook_image(build_configuration: ImageBuildConfiguration):
380
338
Builds image used for version upgrade post-start hook.
381
339
"""
382
340
383
- version = build_configuration .version
384
341
golang_version = os .getenv ("GOLANG_VERSION" , "1.24" )
385
342
386
- # Extract architectures from platforms for build args
387
- architectures = [platform .split ("/" )[- 1 ] for platform in build_configuration .platforms ]
388
- multi_arch_args_list = []
389
-
390
- for arch in architectures :
391
- arch_args = {
392
- "version" : version ,
393
- "GOLANG_VERSION" : golang_version ,
394
- "architecture" : arch ,
395
- "TARGETARCH" : arch , # TODO: redundant ?
396
- }
397
- multi_arch_args_list .append (arch_args )
343
+ extra_args = {
344
+ "version" : build_configuration .version ,
345
+ "GOLANG_VERSION" : golang_version ,
346
+ }
398
347
399
348
build_image_generic (
400
349
dockerfile_path = "docker/mongodb-kubernetes-upgrade-hook/Dockerfile" ,
401
350
build_configuration = build_configuration ,
402
- multi_arch_args_list = multi_arch_args_list ,
351
+ extra_args = extra_args ,
403
352
)
404
353
405
354
@@ -434,55 +383,6 @@ def build_agent_pipeline(
434
383
)
435
384
436
385
437
- def build_multi_arch_agent_in_sonar (
438
- build_configuration : ImageBuildConfiguration ,
439
- image_version ,
440
- tools_version ,
441
- ):
442
- """
443
- Creates the multi-arch non-operator suffixed version of the agent.
444
- This is a drop-in replacement for the agent
445
- release from MCO.
446
- This should only be called during releases.
447
- Which will lead to a release of the multi-arch
448
- images to quay and ecr.
449
- """
450
-
451
- logger .info (f"building multi-arch base image for: { image_version } " )
452
- args = {
453
- "version" : image_version ,
454
- "tools_version" : tools_version ,
455
- }
456
-
457
- arch_arm = {
458
- "agent_distro" : "amzn2_aarch64" ,
459
- "tools_distro" : get_tools_distro (tools_version = tools_version )["arm" ],
460
- "architecture" : "arm64" ,
461
- }
462
- arch_amd = {
463
- "agent_distro" : "rhel9_x86_64" ,
464
- "tools_distro" : get_tools_distro (tools_version = tools_version )["amd" ],
465
- "architecture" : "amd64" ,
466
- }
467
-
468
- new_rhel_tool_version = "100.10.0"
469
- if Version (tools_version ) >= Version (new_rhel_tool_version ):
470
- arch_arm ["tools_distro" ] = "rhel93-aarch64"
471
- arch_amd ["tools_distro" ] = "rhel93-x86_64"
472
-
473
- joined_args = [args | arch_amd ]
474
-
475
- # Only include arm64 if we shouldn't skip it
476
- if not should_skip_arm64 ():
477
- joined_args .append (args | arch_arm )
478
-
479
- build_image_generic (
480
- dockerfile_path = "docker/mongodb-agent-non-matrix/Dockerfile" ,
481
- build_configuration = build_configuration ,
482
- multi_arch_args_list = joined_args ,
483
- )
484
-
485
-
486
386
def build_agent_default_case (build_configuration : ImageBuildConfiguration ):
487
387
"""
488
388
Build the agent only for the latest operator for patches and operator releases.
@@ -511,10 +411,10 @@ def build_agent_default_case(build_configuration: ImageBuildConfiguration):
511
411
with ProcessPoolExecutor (max_workers = max_workers ) as executor :
512
412
logger .info (f"running with factor of { max_workers } " )
513
413
print (f"======= Versions to build { agent_versions_to_build } =======" )
514
- for agent_version in agent_versions_to_build :
414
+ for idx , agent_version in enumerate ( agent_versions_to_build ) :
515
415
# We don't need to keep create and push the same image on every build.
516
416
# It is enough to create and push the non-operator suffixed images only during releases to ecr and quay.
517
- print (f"======= Building Agent { agent_version } ======= " )
417
+ print (f"======= Building Agent { agent_version } ( { idx } / { len ( agent_versions_to_build ) } ) " )
518
418
_build_agent_operator (
519
419
agent_version ,
520
420
build_configuration ,
@@ -526,76 +426,6 @@ def build_agent_default_case(build_configuration: ImageBuildConfiguration):
526
426
queue_exception_handling (tasks_queue )
527
427
528
428
529
- def build_agent_on_agent_bump (build_configuration : ImageBuildConfiguration ):
530
- """
531
- Build the agent matrix (operator version x agent version), triggered by PCT.
532
-
533
- We have three cases where we need to build the agent:
534
- - e2e test runs
535
- - operator releases
536
- - OM/CM bumps via PCT
537
-
538
- We don’t require building a full matrix on e2e test runs and operator releases.
539
- "Operator releases" and "e2e test runs" require only the latest operator x agents
540
-
541
- In OM/CM bumps, we release a new agent which we potentially require to release to older operators as well.
542
- This function takes care of that.
543
- """
544
- release = load_release_file ()
545
- is_release = build_configuration .is_release_scenario ()
546
-
547
- if build_configuration .all_agents :
548
- # We need to release [all agents x latest operator] on operator releases to make e2e tests work
549
- # This was changed previously in https://github.com/mongodb/mongodb-kubernetes/pull/3960
550
- agent_versions_to_build = gather_all_supported_agent_versions (release )
551
- else :
552
- # we only need to release the latest images, we don't need to re-push old images, as we don't clean them up anymore.
553
- agent_versions_to_build = gather_latest_agent_versions (release )
554
-
555
- legacy_agent_versions_to_build = release ["supportedImages" ]["mongodb-agent" ]["versions" ]
556
-
557
- tasks_queue = Queue ()
558
- max_workers = 1
559
- if build_configuration .parallel :
560
- max_workers = None
561
- if build_configuration .parallel_factor > 0 :
562
- max_workers = build_configuration .parallel_factor
563
- with ProcessPoolExecutor (max_workers = max_workers ) as executor :
564
- logger .info (f"running with factor of { max_workers } " )
565
-
566
- # We need to regularly push legacy agents, otherwise ecr lifecycle policy will expire them.
567
- # We only need to push them once in a while to ecr, so no quay required
568
- if not is_release :
569
- for legacy_agent in legacy_agent_versions_to_build :
570
- tasks_queue .put (
571
- executor .submit (
572
- build_multi_arch_agent_in_sonar ,
573
- build_configuration ,
574
- legacy_agent ,
575
- # we assume that all legacy agents are build using that tools version
576
- "100.9.4" ,
577
- )
578
- )
579
-
580
- for agent_version in agent_versions_to_build :
581
- # We don't need to keep create and push the same image on every build.
582
- # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay.
583
- if build_configuration .all_agents :
584
- tasks_queue .put (
585
- executor .submit (
586
- build_multi_arch_agent_in_sonar ,
587
- build_configuration ,
588
- agent_version [0 ],
589
- agent_version [1 ],
590
- )
591
- )
592
- for operator_version in get_supported_operator_versions ():
593
- logger .info (f"Building Agent versions: { agent_version } for Operator versions: { operator_version } " )
594
- _build_agent_operator (agent_version , build_configuration , executor , operator_version , tasks_queue )
595
-
596
- queue_exception_handling (tasks_queue )
597
-
598
-
599
429
def queue_exception_handling (tasks_queue ):
600
430
exceptions_found = False
601
431
for task in tasks_queue .queue :
0 commit comments