|
| 1 | +#!/usr/bin/env python |
| 2 | + |
| 3 | +"""Example extractor based on the clowder code.""" |
| 4 | + |
| 5 | +import logging |
| 6 | +import subprocess |
| 7 | +import os |
| 8 | + |
| 9 | +from pyclowder.extractors import Extractor |
| 10 | +import pyclowder.files |
| 11 | + |
| 12 | + |
| 13 | +class TestDatasetExtractor(Extractor): |
| 14 | + """Test the functionalities of an extractor.""" |
| 15 | + def __init__(self): |
| 16 | + Extractor.__init__(self) |
| 17 | + |
| 18 | + # add any additional arguments to parser |
| 19 | + # self.parser.add_argument('--max', '-m', type=int, nargs='?', default=-1, |
| 20 | + # help='maximum number (default=-1)') |
| 21 | + |
| 22 | + # parse command line and load default logging configuration |
| 23 | + self.setup() |
| 24 | + |
| 25 | + # setup logging for the exctractor |
| 26 | + logging.getLogger('pyclowder').setLevel(logging.DEBUG) |
| 27 | + logging.getLogger('__main__').setLevel(logging.DEBUG) |
| 28 | + |
| 29 | + def process_message(self, connector, host, secret_key, resource, parameters): |
| 30 | + # Process the file and upload the results |
| 31 | + |
| 32 | + logger = logging.getLogger(__name__) |
| 33 | + dataset_id = resource['id'] |
| 34 | + |
| 35 | + # Local file path to file which you want to upload to dataset |
| 36 | + file_path = os.path.join(os.getcwd(), 'test_dataset_extractor_file.txt') |
| 37 | + |
| 38 | + # Upload a new file to dataset |
| 39 | + file_id = pyclowder.files.upload_to_dataset(connector, host, secret_key, dataset_id, file_path, True) |
| 40 | + if file_id is None: |
| 41 | + logger.error("Error uploading file") |
| 42 | + else: |
| 43 | + logger.info("File uploaded successfully") |
| 44 | + |
| 45 | + # Get file list under dataset |
| 46 | + file_list = pyclowder.datasets.get_file_list(connector, host, secret_key, dataset_id) |
| 47 | + logger.info("File list : %s", file_list) |
| 48 | + if file_id in list(map(lambda file: file['id'], file_list)): |
| 49 | + logger.info("File uploading and retrieving file list succeeded") |
| 50 | + else: |
| 51 | + logger.error("File uploading/retrieving file list didn't succeed") |
| 52 | + |
| 53 | + # Download info of dataset |
| 54 | + dataset_info = pyclowder.datasets.get_info(connector, host, secret_key, dataset_id) |
| 55 | + logger.info("Dataset info: %s", dataset_info) |
| 56 | + if dataset_id == dataset_info['id']: |
| 57 | + logger.info("Success in downloading dataset info") |
| 58 | + else: |
| 59 | + logger.error("Error in downloading dataset info") |
| 60 | + |
| 61 | + # Downloading metadata of dataset |
| 62 | + dataset_metadata = pyclowder.datasets.download_metadata(connector, host, secret_key, dataset_id) |
| 63 | + if dataset_metadata is None: |
| 64 | + logger.info("No metadata found for dataset %s", dataset_id) |
| 65 | + else: |
| 66 | + logger.info("Metadata: %s", dataset_metadata) |
| 67 | + |
| 68 | + |
| 69 | +if __name__ == "__main__": |
| 70 | + extractor = TestDatasetExtractor() |
| 71 | + extractor.start() |
0 commit comments