@@ -321,7 +321,7 @@ def create_documents(
321321 return dict (counter )
322322
323323
324- def get_documents (las_client : Client , dataset_id , output_dir , num_threads , chunk_size , max_elements ):
324+ def get_documents (las_client : Client , dataset_id , output_dir , num_threads , chunk_size , max_results ):
325325 already_downloaded = set ()
326326 if output_dir .exists ():
327327 for path in output_dir .iterdir ():
@@ -337,7 +337,7 @@ def get_documents(las_client: Client, dataset_id, output_dir, num_threads, chunk
337337 already_downloaded_from_dataset .add (document ['documentId' ])
338338 else :
339339 documents .append (document )
340- if max_elements and max_elements <= len (documents ):
340+ if max_results and max_results <= len (documents ):
341341 break
342342 print (f'Found { len (already_downloaded_from_dataset )} documents already downloaded' )
343343
@@ -470,7 +470,7 @@ def create_datasets_parser(subparsers):
470470 get_documents_parser .add_argument ('output_dir' , type = Path , help = 'Path to download directory' )
471471 get_documents_parser .add_argument ('--num-threads' , default = 32 , type = int , help = 'Number of threads to use' )
472472 get_documents_parser .add_argument ('--chunk-size' , default = 100 , type = int )
473- get_documents_parser .add_argument ('--max-elements ' , default = 0 , type = int )
473+ get_documents_parser .add_argument ('--max-results ' , default = 0 , type = int )
474474 get_documents_parser .set_defaults (cmd = get_documents )
475475
476476 create_transformation_parser = subparsers .add_parser ('create-transformation' )
@@ -483,7 +483,7 @@ def create_datasets_parser(subparsers):
483483 "options": {} (optional)
484484 },
485485 ...
486- ]
486+ ]
487487 Examples:
488488 [{"type": "remove-duplicates", "options": {}}]
489489 ''' ))
0 commit comments