Skip to content

Commit bd90fdf

Browse files
committed
Feat: multi-targets support: s3 functions change
1 parent f8ced7e commit bd90fdf

File tree

4 files changed

+198
-92
lines changed

4 files changed

+198
-92
lines changed

charon/pkgs/maven.py

Lines changed: 35 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,6 @@ def handle_maven_uploading(
267267
* repo is the location of the tarball in filesystem
268268
* prod_key is used to identify which product this repo
269269
tar belongs to
270-
* ga is used to identify if this is a GA product release
271270
* ignore_patterns is used to filter out paths which don't
272271
need to upload in the tarball
273272
* root is a prefix in the tarball to identify which path is
@@ -303,22 +302,19 @@ def handle_maven_uploading(
303302
_handle_error(err_msgs)
304303
# Question: should we exit here?
305304

306-
main_target = targets[0]
307-
main_bucket_prefix = remove_prefix(main_target[2], "/")
308-
succeeded = True
309305
# 4. Do uploading
310306
logger.info("Start uploading files to s3")
311307
s3_client = S3Client(aws_profile=aws_profile, dry_run=dry_run)
312-
bucket = main_target[1]
308+
targets_ = [(target[1], remove_prefix(target[2], "/")) for target in targets]
313309
failed_files = s3_client.upload_files(
314310
file_paths=valid_mvn_paths,
315-
target=(bucket, main_bucket_prefix),
311+
targets=targets_,
316312
product=prod_key,
317313
root=top_level
318314
)
319315
logger.info("Files uploading done\n")
320316

321-
all_failed_metas: Dict[str, List[str]] = {}
317+
succeeded = True
322318
for target in targets:
323319
# 5. Do manifest uploading
324320
logger.info("Start uploading manifest to s3")
@@ -411,7 +407,6 @@ def handle_maven_uploading(
411407

412408
upload_post_process(failed_files, failed_metas, prod_key, bucket_)
413409
succeeded = succeeded and len(failed_files) <= 0 and len(failed_metas) <= 0
414-
all_failed_metas[target] = failed_metas
415410

416411
return (tmp_root, succeeded)
417412

@@ -432,12 +427,13 @@ def handle_maven_del(
432427
* repo is the location of the tarball in filesystem
433428
* prod_key is used to identify which product this repo
434429
tar belongs to
435-
* ga is used to identify if this is a GA product release
436430
* ignore_patterns is used to filter out paths which don't
437431
need to upload in the tarball
438432
* root is a prefix in the tarball to identify which path is
439433
the beginning of the maven GAV path
440-
* bucket_name is the s3 bucket name to store the artifacts
434+
* targets contains the target name with its bucket name and prefix
435+
for the bucket, which will be used to store artifacts with the
436+
prefix. See target definition in Charon configuration for details
441437
* dir is base dir for extracting the tarball, will use system
442438
tmp dir if None.
443439
@@ -505,7 +501,8 @@ def handle_maven_del(
505501
product=None,
506502
root=top_level
507503
)
508-
failed_metas.extend(_failed_metas)
504+
if len(_failed_metas) > 0:
505+
failed_metas.extend(_failed_metas)
509506
logger.info("maven-metadata.xml updating done\n")
510507

511508
# 7. Determine refreshment of archetype-catalog.xml
@@ -529,15 +526,17 @@ def handle_maven_del(
529526
product=None,
530527
root=top_level
531528
)
532-
failed_metas.extend(_failed_metas)
529+
if len(_failed_metas) > 0:
530+
failed_metas.extend(_failed_metas)
533531
elif archetype_action > 0:
534532
(_, _failed_metas) = s3_client.upload_metadatas(
535533
meta_file_paths=archetype_files,
536534
target=(bucket, prefix_),
537535
product=None,
538536
root=top_level
539537
)
540-
failed_metas.extend(_failed_metas)
538+
if len(_failed_metas) > 0:
539+
failed_metas.extend(_failed_metas)
541540
logger.info("archetype-catalog.xml updating done\n")
542541

543542
if do_index:
@@ -554,13 +553,14 @@ def handle_maven_del(
554553
product=None,
555554
root=top_level
556555
)
557-
failed_metas.extend(_failed_index_files)
556+
if len(_failed_index_files) > 0:
557+
failed_metas.extend(_failed_index_files)
558558
logger.info("Index files updating done.\n")
559559
else:
560560
logger.info("Bypassing indexing")
561561

562562
rollback_post_process(failed_files, failed_metas, prod_key, bucket)
563-
succeeded = succeeded and len(failed_files) <= 0 and len(failed_metas) <= 0
563+
succeeded = succeeded and len(failed_files) == 0 and len(failed_metas) == 0
564564

565565
return (tmp_root, succeeded)
566566

@@ -661,14 +661,21 @@ def _generate_rollback_archetype_catalog(
661661
- -1 - DELETE the (now empty) bucket catalog
662662
- 0 - take no action
663663
"""
664-
local = os.path.join(root, ARCHETYPE_CATALOG_FILENAME)
665664
if prefix:
666665
remote = os.path.join(prefix, ARCHETYPE_CATALOG_FILENAME)
667666
else:
668667
remote = ARCHETYPE_CATALOG_FILENAME
668+
local = os.path.join(root, ARCHETYPE_CATALOG_FILENAME)
669+
# As the local archetype will be overwrittern later, we must keep
670+
# a cache of the original local for multi-targets support
671+
local_bak = os.path.join(root, ARCHETYPE_CATALOG_FILENAME + ".charon.bak")
672+
if os.path.exists(local) and not os.path.exists(local_bak):
673+
with open(local, "rb") as f:
674+
with open(local_bak, "w+", encoding="utf-8") as fl:
675+
fl.write(str(f.read(), encoding="utf-8"))
669676

670677
# If there is no local catalog, this is a NO-OP
671-
if os.path.exists(local):
678+
if os.path.exists(local_bak):
672679
existed = False
673680
try:
674681
existed = s3.file_exists_in_bucket(bucket, remote)
@@ -682,7 +689,7 @@ def _generate_rollback_archetype_catalog(
682689
return 0
683690
else:
684691
# If there IS a catalog in the bucket, we need to merge or un-merge it.
685-
with open(local, "rb") as f:
692+
with open(local_bak, "rb") as f:
686693
try:
687694
local_archetypes = _parse_archetypes(f.read())
688695
except ElementTree.ParseError:
@@ -769,14 +776,21 @@ def _generate_upload_archetype_catalog(
769776
available in the bucket. Merge (or unmerge) these catalogs and
770777
return a boolean indicating whether the local file should be uploaded.
771778
"""
772-
local = os.path.join(root, ARCHETYPE_CATALOG_FILENAME)
773779
if prefix:
774780
remote = os.path.join(prefix, ARCHETYPE_CATALOG_FILENAME)
775781
else:
776782
remote = ARCHETYPE_CATALOG_FILENAME
783+
local = os.path.join(root, ARCHETYPE_CATALOG_FILENAME)
784+
# As the local archetype will be overwrittern later, we must keep
785+
# a cache of the original local for multi-targets support
786+
local_bak = os.path.join(root, ARCHETYPE_CATALOG_FILENAME + ".charon.bak")
787+
if os.path.exists(local) and not os.path.exists(local_bak):
788+
with open(local, "rb") as f:
789+
with open(local_bak, "w+", encoding="utf-8") as fl:
790+
fl.write(str(f.read(), encoding="utf-8"))
777791

778792
# If there is no local catalog, this is a NO-OP
779-
if os.path.exists(local):
793+
if os.path.exists(local_bak):
780794
existed = False
781795
try:
782796
existed = s3.file_exists_in_bucket(bucket, remote)
@@ -785,7 +799,7 @@ def _generate_upload_archetype_catalog(
785799
"Error: Can not generate archtype-catalog.xml due to: %s", e
786800
)
787801
return 0
788-
if not existed:
802+
if not existed
789803
__gen_all_digest_files(local)
790804
# If there is no catalog in the bucket, just upload what we have locally
791805
return True

charon/pkgs/npm.py

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,9 @@ def handle_npm_uploading(
7878
* tarball_path is the location of the tarball in filesystem
7979
* product is used to identify which product this repo
8080
tar belongs to
81-
* bucket_name is the s3 bucket name to store the artifacts
81+
* targets contains the target name with its bucket name and prefix
82+
for the bucket, which will be used to store artifacts with the
83+
prefix. See target definition in Charon configuration for details
8284
* dir_ is base dir for extracting the tarball, will use system
8385
tmp dir if None.
8486
@@ -93,19 +95,19 @@ def handle_npm_uploading(
9395

9496
valid_dirs = __get_path_tree(valid_paths, target_dir)
9597

96-
main_target = targets[0]
98+
# main_target = targets[0]
9799
logger.info("Start uploading files to s3")
98100
client = S3Client(aws_profile=aws_profile, dry_run=dry_run)
99-
bucket = main_target[1]
100-
prefix_ = remove_prefix(main_target[2], "/")
101+
targets_ = [(target[1], remove_prefix(target[2], "/")) for target in targets]
101102
failed_files = client.upload_files(
102103
file_paths=valid_paths,
103-
target=(bucket, prefix_),
104+
targets=targets_,
104105
product=product,
105106
root=target_dir
106107
)
107108
logger.info("Files uploading done\n")
108109

110+
succeeded = True
109111
for target in targets:
110112
manifest_folder = target[0]
111113
logger.info("Start uploading manifest to s3")
@@ -162,8 +164,9 @@ def handle_npm_uploading(
162164
logger.info("Bypass indexing\n")
163165

164166
upload_post_process(failed_files, failed_metas, product, bucket_)
165-
succeeded = len(failed_files) <= 0 and len(failed_metas) <= 0
166-
return (target_dir, succeeded)
167+
succeeded = succeeded and len(failed_files) == 0 and len(failed_metas) == 0
168+
169+
return (target_dir, succeeded)
167170

168171

169172
def handle_npm_del(
@@ -180,7 +183,9 @@ def handle_npm_del(
180183
* tarball_path is the location of the tarball in filesystem
181184
* product is used to identify which product this repo
182185
tar belongs to
183-
* bucket_name is the s3 bucket name to store the artifacts
186+
* targets contains the target name with its bucket name and prefix
187+
for the bucket, which will be used to store artifacts with the
188+
prefix. See target definition in Charon configuration for details
184189
* dir is base dir for extracting the tarball, will use system
185190
tmp dir if None.
186191
@@ -194,17 +199,19 @@ def handle_npm_del(
194199

195200
logger.info("Start deleting files from s3")
196201
client = S3Client(aws_profile=aws_profile, dry_run=dry_run)
202+
succeeded = True
197203
for target in targets:
198204
bucket = target[1]
199205
prefix_ = remove_prefix(target[2], "/")
200-
manifest_folder = target[0]
206+
201207
_, failed_files = client.delete_files(
202208
file_paths=valid_paths,
203209
target=(bucket, prefix_),
204210
product=product, root=target_dir
205211
)
206212
logger.info("Files deletion done\n")
207213

214+
manifest_folder = target[0]
208215
logger.info("Start deleting manifest from s3")
209216
client.delete_manifest(product, manifest_folder, manifest_bucket_name)
210217
logger.info("Manifest deletion is done\n")
@@ -255,8 +262,9 @@ def handle_npm_del(
255262
logger.info("Bypassing indexing\n")
256263

257264
rollback_post_process(failed_files, failed_metas, product, bucket)
258-
succeeded = len(failed_files) <= 0 and len(failed_metas) <= 0
259-
return (target_dir, succeeded)
265+
succeeded = succeeded and len(failed_files) <= 0 and len(failed_metas) <= 0
266+
267+
return (target_dir, succeeded)
260268

261269

262270
def read_package_metadata_from_content(content: str, is_version) -> NPMPackageMetadata:
@@ -326,7 +334,8 @@ def _gen_npm_package_metadata_for_del(
326334
logger.warning("Error to get remote metadata files "
327335
"for %s when deletion", path_prefix)
328336
# ensure the metas only contain version package.json
329-
existed_version_metas.remove(prefix_meta_key)
337+
if prefix_meta_key in existed_version_metas:
338+
existed_version_metas.remove(prefix_meta_key)
330339
# Still have versions in S3 and need to maintain the package metadata
331340
if len(existed_version_metas) > 0:
332341
logger.debug("Read all version package.json content from S3")

0 commit comments

Comments
 (0)