@@ -267,7 +267,6 @@ def handle_maven_uploading(
267267 * repo is the location of the tarball in filesystem
268268 * prod_key is used to identify which product this repo
269269 tar belongs to
270- * ga is used to identify if this is a GA product release
271270 * ignore_patterns is used to filter out paths which don't
272271 need to upload in the tarball
273272 * root is a prefix in the tarball to identify which path is
@@ -303,22 +302,19 @@ def handle_maven_uploading(
303302 _handle_error (err_msgs )
304303 # Question: should we exit here?
305304
306- main_target = targets [0 ]
307- main_bucket_prefix = remove_prefix (main_target [2 ], "/" )
308- succeeded = True
309305 # 4. Do uploading
310306 logger .info ("Start uploading files to s3" )
311307 s3_client = S3Client (aws_profile = aws_profile , dry_run = dry_run )
312- bucket = main_target [ 1 ]
308+ targets_ = [( target [ 1 ], remove_prefix ( target [ 2 ], "/" )) for target in targets ]
313309 failed_files = s3_client .upload_files (
314310 file_paths = valid_mvn_paths ,
315- target = ( bucket , main_bucket_prefix ) ,
311+ targets = targets_ ,
316312 product = prod_key ,
317313 root = top_level
318314 )
319315 logger .info ("Files uploading done\n " )
320316
321- all_failed_metas : Dict [ str , List [ str ]] = {}
317+ succeeded = True
322318 for target in targets :
323319 # 5. Do manifest uploading
324320 logger .info ("Start uploading manifest to s3" )
@@ -411,7 +407,6 @@ def handle_maven_uploading(
411407
412408 upload_post_process (failed_files , failed_metas , prod_key , bucket_ )
413409 succeeded = succeeded and len (failed_files ) <= 0 and len (failed_metas ) <= 0
414- all_failed_metas [target ] = failed_metas
415410
416411 return (tmp_root , succeeded )
417412
@@ -432,12 +427,13 @@ def handle_maven_del(
432427 * repo is the location of the tarball in filesystem
433428 * prod_key is used to identify which product this repo
434429 tar belongs to
435- * ga is used to identify if this is a GA product release
436430 * ignore_patterns is used to filter out paths which don't
437431 need to upload in the tarball
438432 * root is a prefix in the tarball to identify which path is
439433 the beginning of the maven GAV path
440- * bucket_name is the s3 bucket name to store the artifacts
434+ * targets contains the target name with its bucket name and prefix
435+ for the bucket, which will be used to store artifacts with the
436+ prefix. See target definition in Charon configuration for details
441437 * dir is base dir for extracting the tarball, will use system
442438 tmp dir if None.
443439
@@ -505,7 +501,8 @@ def handle_maven_del(
505501 product = None ,
506502 root = top_level
507503 )
508- failed_metas .extend (_failed_metas )
504+ if len (_failed_metas ) > 0 :
505+ failed_metas .extend (_failed_metas )
509506 logger .info ("maven-metadata.xml updating done\n " )
510507
511508 # 7. Determine refreshment of archetype-catalog.xml
@@ -529,15 +526,17 @@ def handle_maven_del(
529526 product = None ,
530527 root = top_level
531528 )
532- failed_metas .extend (_failed_metas )
529+ if len (_failed_metas ) > 0 :
530+ failed_metas .extend (_failed_metas )
533531 elif archetype_action > 0 :
534532 (_ , _failed_metas ) = s3_client .upload_metadatas (
535533 meta_file_paths = archetype_files ,
536534 target = (bucket , prefix_ ),
537535 product = None ,
538536 root = top_level
539537 )
540- failed_metas .extend (_failed_metas )
538+ if len (_failed_metas ) > 0 :
539+ failed_metas .extend (_failed_metas )
541540 logger .info ("archetype-catalog.xml updating done\n " )
542541
543542 if do_index :
@@ -554,13 +553,14 @@ def handle_maven_del(
554553 product = None ,
555554 root = top_level
556555 )
557- failed_metas .extend (_failed_index_files )
556+ if len (_failed_index_files ) > 0 :
557+ failed_metas .extend (_failed_index_files )
558558 logger .info ("Index files updating done.\n " )
559559 else :
560560 logger .info ("Bypassing indexing" )
561561
562562 rollback_post_process (failed_files , failed_metas , prod_key , bucket )
563- succeeded = succeeded and len (failed_files ) <= 0 and len (failed_metas ) < = 0
563+ succeeded = succeeded and len (failed_files ) == 0 and len (failed_metas ) = = 0
564564
565565 return (tmp_root , succeeded )
566566
@@ -661,14 +661,21 @@ def _generate_rollback_archetype_catalog(
661661 - -1 - DELETE the (now empty) bucket catalog
662662 - 0 - take no action
663663 """
664- local = os .path .join (root , ARCHETYPE_CATALOG_FILENAME )
665664 if prefix :
666665 remote = os .path .join (prefix , ARCHETYPE_CATALOG_FILENAME )
667666 else :
668667 remote = ARCHETYPE_CATALOG_FILENAME
668+ local = os .path .join (root , ARCHETYPE_CATALOG_FILENAME )
669+ # As the local archetype will be overwrittern later, we must keep
670+ # a cache of the original local for multi-targets support
671+ local_bak = os .path .join (root , ARCHETYPE_CATALOG_FILENAME + ".charon.bak" )
672+ if os .path .exists (local ) and not os .path .exists (local_bak ):
673+ with open (local , "rb" ) as f :
674+ with open (local_bak , "w+" , encoding = "utf-8" ) as fl :
675+ fl .write (str (f .read (), encoding = "utf-8" ))
669676
670677 # If there is no local catalog, this is a NO-OP
671- if os .path .exists (local ):
678+ if os .path .exists (local_bak ):
672679 existed = False
673680 try :
674681 existed = s3 .file_exists_in_bucket (bucket , remote )
@@ -682,7 +689,7 @@ def _generate_rollback_archetype_catalog(
682689 return 0
683690 else :
684691 # If there IS a catalog in the bucket, we need to merge or un-merge it.
685- with open (local , "rb" ) as f :
692+ with open (local_bak , "rb" ) as f :
686693 try :
687694 local_archetypes = _parse_archetypes (f .read ())
688695 except ElementTree .ParseError :
@@ -769,14 +776,21 @@ def _generate_upload_archetype_catalog(
769776 available in the bucket. Merge (or unmerge) these catalogs and
770777 return a boolean indicating whether the local file should be uploaded.
771778 """
772- local = os .path .join (root , ARCHETYPE_CATALOG_FILENAME )
773779 if prefix :
774780 remote = os .path .join (prefix , ARCHETYPE_CATALOG_FILENAME )
775781 else :
776782 remote = ARCHETYPE_CATALOG_FILENAME
783+ local = os .path .join (root , ARCHETYPE_CATALOG_FILENAME )
784+ # As the local archetype will be overwrittern later, we must keep
785+ # a cache of the original local for multi-targets support
786+ local_bak = os .path .join (root , ARCHETYPE_CATALOG_FILENAME + ".charon.bak" )
787+ if os .path .exists (local ) and not os .path .exists (local_bak ):
788+ with open (local , "rb" ) as f :
789+ with open (local_bak , "w+" , encoding = "utf-8" ) as fl :
790+ fl .write (str (f .read (), encoding = "utf-8" ))
777791
778792 # If there is no local catalog, this is a NO-OP
779- if os .path .exists (local ):
793+ if os .path .exists (local_bak ):
780794 existed = False
781795 try :
782796 existed = s3 .file_exists_in_bucket (bucket , remote )
@@ -785,7 +799,7 @@ def _generate_upload_archetype_catalog(
785799 "Error: Can not generate archtype-catalog.xml due to: %s" , e
786800 )
787801 return 0
788- if not existed :
802+ if not existed
789803 __gen_all_digest_files (local )
790804 # If there is no catalog in the bucket, just upload what we have locally
791805 return True
0 commit comments