Skip to content

Commit 1936bc3

Browse files
committed
Feat: multi-targets support: unit tests
1 parent bd90fdf commit 1936bc3

12 files changed

+1709
-78
lines changed

charon/pkgs/maven.py

Lines changed: 26 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -303,9 +303,12 @@ def handle_maven_uploading(
303303
# Question: should we exit here?
304304

305305
# 4. Do uploading
306-
logger.info("Start uploading files to s3")
307306
s3_client = S3Client(aws_profile=aws_profile, dry_run=dry_run)
308307
targets_ = [(target[1], remove_prefix(target[2], "/")) for target in targets]
308+
logger.info(
309+
"Start uploading files to s3 buckets: %s",
310+
[target[0] for target in targets_]
311+
)
309312
failed_files = s3_client.upload_files(
310313
file_paths=valid_mvn_paths,
311314
targets=targets_,
@@ -317,8 +320,8 @@ def handle_maven_uploading(
317320
succeeded = True
318321
for target in targets:
319322
# 5. Do manifest uploading
320-
logger.info("Start uploading manifest to s3")
321323
manifest_folder = target[0]
324+
logger.info("Start uploading manifest to s3 bucket %s", manifest_bucket_name)
322325
if not manifest_bucket_name:
323326
logger.warning(
324327
'Warning: No manifest bucket is provided, will ignore the process of manifest '
@@ -347,7 +350,7 @@ def handle_maven_uploading(
347350
# 7. Upload all maven-metadata.xml
348351
if META_FILE_GEN_KEY in meta_files:
349352
logger.info("Start updating maven-metadata.xml to s3 bucket %s", bucket_)
350-
(_, _failed_metas) = s3_client.upload_metadatas(
353+
_failed_metas = s3_client.upload_metadatas(
351354
meta_file_paths=meta_files[META_FILE_GEN_KEY],
352355
target=(bucket_, prefix__),
353356
product=None,
@@ -373,7 +376,7 @@ def handle_maven_uploading(
373376
__hash_decorate_metadata(top_level, ARCHETYPE_CATALOG_FILENAME)
374377
)
375378
logger.info("Start updating archetype-catalog.xml to s3 bucket %s", bucket_)
376-
(_, _failed_metas) = s3_client.upload_metadatas(
379+
_failed_metas = s3_client.upload_metadatas(
377380
meta_file_paths=archetype_files,
378381
target=(bucket_, prefix__),
379382
product=None,
@@ -394,7 +397,7 @@ def handle_maven_uploading(
394397
logger.info("Index files generation done.\n")
395398

396399
logger.info("Start updating index files to s3 bucket %s", bucket_)
397-
(_, _failed_metas) = s3_client.upload_metadatas(
400+
_failed_metas = s3_client.upload_metadatas(
398401
meta_file_paths=created_indexes,
399402
target=(bucket_, prefix__),
400403
product=None,
@@ -454,10 +457,10 @@ def handle_maven_del(
454457
succeeded = True
455458
for target in targets:
456459
prefix_ = remove_prefix(target[2], "/")
457-
logger.info("Start deleting files from s3")
458460
s3_client = S3Client(aws_profile=aws_profile, dry_run=dry_run)
459461
bucket = target[1]
460-
(_, failed_files) = s3_client.delete_files(
462+
logger.info("Start deleting files from s3 bucket %s", bucket)
463+
failed_files = s3_client.delete_files(
461464
valid_mvn_paths,
462465
target=(bucket, prefix_),
463466
product=prod_key,
@@ -467,12 +470,18 @@ def handle_maven_del(
467470

468471
# 4. Delete related manifest from s3
469472
manifest_folder = target[0]
470-
logger.info("Start deleting manifest from s3")
473+
logger.info(
474+
"Start deleting manifest from s3 bucket %s in folder %s",
475+
manifest_bucket_name, manifest_folder
476+
)
471477
s3_client.delete_manifest(prod_key, manifest_folder, manifest_bucket_name)
472478
logger.info("Manifest deletion is done\n")
473479

474480
# 5. Use changed GA to scan s3 for metadata refreshment
475-
logger.info("Start generating maven-metadata.xml files for all changed GAs")
481+
logger.info(
482+
"Start generating maven-metadata.xml files for all changed GAs in s3 bucket %s",
483+
bucket
484+
)
476485
meta_files = _generate_metadatas(
477486
s3=s3_client, bucket=bucket,
478487
poms=valid_poms, root=top_level,
@@ -483,7 +492,7 @@ def handle_maven_del(
483492

484493
# 6. Upload all maven-metadata.xml. We need to delete metadata files
485494
# firstly for all affected GA, and then replace the theirs content.
486-
logger.info("Start updating maven-metadata.xml to s3")
495+
logger.info("Start updating maven-metadata.xml to s3 bucket %s", bucket)
487496
all_meta_files = []
488497
for _, files in meta_files.items():
489498
all_meta_files.extend(files)
@@ -495,7 +504,7 @@ def handle_maven_del(
495504
)
496505
failed_metas = meta_files.get(META_FILE_FAILED, [])
497506
if META_FILE_GEN_KEY in meta_files:
498-
(_, _failed_metas) = s3_client.upload_metadatas(
507+
_failed_metas = s3_client.upload_metadatas(
499508
meta_file_paths=meta_files[META_FILE_GEN_KEY],
500509
target=(bucket, prefix_),
501510
product=None,
@@ -519,8 +528,8 @@ def handle_maven_del(
519528
archetype_files = [os.path.join(top_level, ARCHETYPE_CATALOG_FILENAME)]
520529
archetype_files.extend(__hash_decorate_metadata(top_level, ARCHETYPE_CATALOG_FILENAME))
521530
if archetype_action < 0:
522-
logger.info("Start updating archetype-catalog.xml to s3")
523-
(_, _failed_metas) = s3_client.delete_files(
531+
logger.info("Start updating archetype-catalog.xml to s3 bucket %s", bucket)
532+
_failed_metas = s3_client.delete_files(
524533
file_paths=archetype_files,
525534
target=(bucket, prefix_),
526535
product=None,
@@ -529,7 +538,7 @@ def handle_maven_del(
529538
if len(_failed_metas) > 0:
530539
failed_metas.extend(_failed_metas)
531540
elif archetype_action > 0:
532-
(_, _failed_metas) = s3_client.upload_metadatas(
541+
_failed_metas = s3_client.upload_metadatas(
533542
meta_file_paths=archetype_files,
534543
target=(bucket, prefix_),
535544
product=None,
@@ -546,8 +555,8 @@ def handle_maven_del(
546555
)
547556
logger.info("Index files generation done.\n")
548557

549-
logger.info("Start updating index to s3")
550-
(_, _failed_index_files) = s3_client.upload_metadatas(
558+
logger.info("Start updating index to s3 bucket %s", bucket)
559+
_failed_index_files = s3_client.upload_metadatas(
551560
meta_file_paths=created_indexes,
552561
target=(bucket, prefix_),
553562
product=None,
@@ -799,7 +808,7 @@ def _generate_upload_archetype_catalog(
799808
"Error: Can not generate archtype-catalog.xml due to: %s", e
800809
)
801810
return 0
802-
if not existed
811+
if not existed:
803812
__gen_all_digest_files(local)
804813
# If there is no catalog in the bucket, just upload what we have locally
805814
return True

charon/pkgs/npm.py

Lines changed: 31 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -96,9 +96,12 @@ def handle_npm_uploading(
9696
valid_dirs = __get_path_tree(valid_paths, target_dir)
9797

9898
# main_target = targets[0]
99-
logger.info("Start uploading files to s3")
10099
client = S3Client(aws_profile=aws_profile, dry_run=dry_run)
101100
targets_ = [(target[1], remove_prefix(target[2], "/")) for target in targets]
101+
logger.info(
102+
"Start uploading files to s3 buckets: %s",
103+
[target[0] for target in targets_]
104+
)
102105
failed_files = client.upload_files(
103106
file_paths=valid_paths,
104107
targets=targets_,
@@ -110,7 +113,7 @@ def handle_npm_uploading(
110113
succeeded = True
111114
for target in targets:
112115
manifest_folder = target[0]
113-
logger.info("Start uploading manifest to s3")
116+
logger.info("Start uploading manifest to s3 bucket %s", manifest_bucket_name)
114117
if not manifest_bucket_name:
115118
logger.warning(
116119
'Warning: No manifest bucket is provided, will ignore the process of manifest '
@@ -125,15 +128,18 @@ def handle_npm_uploading(
125128

126129
bucket_ = target[1]
127130
prefix__ = remove_prefix(target[2], "/")
128-
logger.info("Start generating package.json for package: %s", package_metadata.name)
131+
logger.info(
132+
"Start generating package.json for package: %s in s3 bucket %s",
133+
package_metadata.name, bucket_
134+
)
129135
meta_files = _gen_npm_package_metadata_for_upload(
130136
client, bucket_, target_dir, package_metadata, prefix__
131137
)
132138
logger.info("package.json generation done\n")
133139

134140
failed_metas = []
135141
if META_FILE_GEN_KEY in meta_files:
136-
_, _failed_metas = client.upload_metadatas(
142+
_failed_metas = client.upload_metadatas(
137143
meta_file_paths=[meta_files[META_FILE_GEN_KEY]],
138144
target=(bucket_, prefix__),
139145
product=None,
@@ -145,14 +151,14 @@ def handle_npm_uploading(
145151
# this step generates index.html for each dir and add them to file list
146152
# index is similar to metadata, it will be overwritten everytime
147153
if do_index:
148-
logger.info("Start generating index files to s3")
154+
logger.info("Start generating index files to s3 bucket %s", bucket_)
149155
created_indexes = indexing.generate_indexes(
150156
PACKAGE_TYPE_NPM, target_dir, valid_dirs, client, bucket_, prefix__
151157
)
152158
logger.info("Index files generation done.\n")
153159

154-
logger.info("Start updating index files to s3")
155-
(_, _failed_metas) = client.upload_metadatas(
160+
logger.info("Start updating index files to s3 bucket %s", bucket_)
161+
_failed_metas = client.upload_metadatas(
156162
meta_file_paths=created_indexes,
157163
target=(bucket_, prefix__),
158164
product=None,
@@ -197,32 +203,37 @@ def handle_npm_del(
197203

198204
valid_dirs = __get_path_tree(valid_paths, target_dir)
199205

200-
logger.info("Start deleting files from s3")
201206
client = S3Client(aws_profile=aws_profile, dry_run=dry_run)
202207
succeeded = True
203208
for target in targets:
204209
bucket = target[1]
205210
prefix_ = remove_prefix(target[2], "/")
206-
207-
_, failed_files = client.delete_files(
211+
logger.info("Start deleting files from s3 bucket %s", bucket)
212+
failed_files = client.delete_files(
208213
file_paths=valid_paths,
209214
target=(bucket, prefix_),
210215
product=product, root=target_dir
211216
)
212217
logger.info("Files deletion done\n")
213218

214219
manifest_folder = target[0]
215-
logger.info("Start deleting manifest from s3")
220+
logger.info(
221+
"Start deleting manifest from s3 bucket %s",
222+
manifest_bucket_name
223+
)
216224
client.delete_manifest(product, manifest_folder, manifest_bucket_name)
217225
logger.info("Manifest deletion is done\n")
218226

219-
logger.info("Start generating package.json for package: %s", package_name_path)
227+
logger.info(
228+
"Start generating package.json for package: %s in bucket %s",
229+
package_name_path, bucket
230+
)
220231
meta_files = _gen_npm_package_metadata_for_del(
221232
client, bucket, target_dir, package_name_path, prefix_
222233
)
223234
logger.info("package.json generation done\n")
224235

225-
logger.info("Start uploading package.json to s3")
236+
logger.info("Start uploading package.json to s3 bucket %s", bucket)
226237
all_meta_files = []
227238
for _, file in meta_files.items():
228239
all_meta_files.append(file)
@@ -233,7 +244,7 @@ def handle_npm_del(
233244
)
234245
failed_metas = []
235246
if META_FILE_GEN_KEY in meta_files:
236-
_, _failed_metas = client.upload_metadatas(
247+
_failed_metas = client.upload_metadatas(
237248
meta_file_paths=[meta_files[META_FILE_GEN_KEY]],
238249
target=(bucket, prefix_),
239250
product=None,
@@ -243,14 +254,17 @@ def handle_npm_del(
243254
logger.info("package.json uploading done")
244255

245256
if do_index:
246-
logger.info("Start generating index files for all changed entries")
257+
logger.info(
258+
"Start generating index files for all changed entries for bucket %s",
259+
bucket
260+
)
247261
created_indexes = indexing.generate_indexes(
248262
PACKAGE_TYPE_NPM, target_dir, valid_dirs, client, bucket, prefix_
249263
)
250264
logger.info("Index files generation done.\n")
251265

252-
logger.info("Start updating index to s3")
253-
(_, _failed_index_files) = client.upload_metadatas(
266+
logger.info("Start updating index to s3 bucket %s", bucket)
267+
_failed_index_files = client.upload_metadatas(
254268
meta_file_paths=created_indexes,
255269
target=(bucket, prefix_),
256270
product=None,

charon/storage.py

Lines changed: 9 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ async def path_upload_handler(
156156
main_file_object: s3.Object = main_bucket.Object(main_path_key)
157157
existed = False
158158
try:
159-
existed = await self.__run_async(self.__file_exists, file_object)
159+
existed = await self.__run_async(self.__file_exists, main_file_object)
160160
except (ClientError, HTTPClientError) as e:
161161
logger.error(
162162
"Error: file existence check failed due to error: %s", e
@@ -258,8 +258,8 @@ async def handle_existed(
258258
)
259259
if checksum != "" and checksum.strip() != file_sha1:
260260
logger.warning('Error: checksum check failed. The file %s is '
261-
'different from the one in S3 bucket %s. Product: %s',
262-
path_key, bucket_name, product)
261+
'different from the one in S3 bucket %s. Product: %s',
262+
path_key, bucket_name, product)
263263
failed_paths.append(file_path)
264264
return False
265265
(prods, no_error) = await self.__run_async(
@@ -293,7 +293,7 @@ async def __copy_between_bucket(
293293
) -> bool:
294294
logger.info(
295295
"Copying file %s from bucket %s to target %s as %s",
296-
source_key, source, target, target_key)
296+
source_key, source, target.name, target_key)
297297
copy_source = {
298298
'Bucket': source,
299299
'Key': source_key
@@ -319,7 +319,7 @@ def upload_metadatas(
319319
self, meta_file_paths: List[str],
320320
target: Tuple[str, str],
321321
product: Optional[str] = None, root="/"
322-
) -> Tuple[List[str], List[str]]:
322+
) -> List[str]:
323323
""" Upload a list of metadata files to s3 bucket. This function is very similar to
324324
upload_files, except:
325325
* The metadata files will always be overwritten for each uploading
@@ -328,7 +328,6 @@ def upload_metadatas(
328328
"""
329329
bucket_name = target[0]
330330
bucket = self.__get_bucket(bucket_name)
331-
uploaded_files = []
332331

333332
async def path_upload_handler(
334333
full_file_path: str, path: str, index: int,
@@ -404,7 +403,6 @@ async def path_upload_handler(
404403
failed.append(full_file_path)
405404
return
406405
logger.debug('Updated metadata %s to bucket %s', path, bucket_name)
407-
uploaded_files.append(path_key)
408406
except (ClientError, HTTPClientError) as e:
409407
logger.error(
410408
"ERROR: file %s not uploaded to bucket"
@@ -413,11 +411,11 @@ async def path_upload_handler(
413411
)
414412
failed.append(full_file_path)
415413

416-
return (uploaded_files, self.__do_path_cut_and(
414+
return self.__do_path_cut_and(
417415
file_paths=meta_file_paths,
418416
path_handler=self.__path_handler_count_wrapper(path_upload_handler),
419417
root=root
420-
))
418+
)
421419

422420
def upload_manifest(
423421
self, manifest_name: str, manifest_full_path: str, target: str,
@@ -441,7 +439,7 @@ def upload_manifest(
441439
def delete_files(
442440
self, file_paths: List[str], target: Tuple[str, str],
443441
product: Optional[str], root="/"
444-
) -> Tuple[List[str], List[str]]:
442+
) -> List[str]:
445443
""" Deletes a list of files to s3 bucket. * Use the cut down file path as s3 key. The cut
446444
down way is move root from the file path if it starts with root. Example: if file_path is
447445
/tmp/maven-repo/org/apache/.... and root is /tmp/maven-repo Then the key will be
@@ -455,8 +453,6 @@ def delete_files(
455453
bucket_name = target[0]
456454
bucket = self.__get_bucket(bucket_name)
457455

458-
deleted_files = []
459-
460456
async def path_delete_handler(
461457
full_file_path: str, path: str, index: int,
462458
total: int, failed: List[str]
@@ -527,7 +523,6 @@ async def path_delete_handler(
527523
failed.append(full_file_path)
528524
return
529525
logger.info("Deleted %s from bucket %s", path, bucket_name)
530-
deleted_files.append(path)
531526
return
532527
except (ClientError, HTTPClientError) as e:
533528
logger.error(
@@ -550,7 +545,7 @@ async def path_delete_handler(
550545
root=root
551546
)
552547

553-
return (deleted_files, failed_files)
548+
return failed_files
554549

555550
def delete_manifest(self, product_key: str, target: str, manifest_bucket_name: str):
556551
if not manifest_bucket_name:

0 commit comments

Comments
 (0)