Skip to content

Commit 4d52fb2

Browse files
authored
Merge pull request #67 from clowder-framework/50-clowder20-submit-file-to-extractor
Create test extractors that check functionality of v2 endpoints
2 parents 8506f06 + ffc60a7 commit 4d52fb2

File tree

14 files changed

+431
-7
lines changed

14 files changed

+431
-7
lines changed

pyclowder/api/v1/files.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
import requests
1212
from requests_toolbelt.multipart.encoder import MultipartEncoder
1313

14+
from pyclowder.client import ClowderClient
1415
from pyclowder.collections import get_datasets, get_child_collections
1516
from pyclowder.datasets import get_file_list
1617

@@ -95,6 +96,19 @@ def download_info(connector, client, fileid):
9596

9697
return result
9798

99+
def download_summary(connector, host, key, fileid):
100+
"""Download file summary from Clowder. It's the same as download_info. We have different names for the
101+
same functionality for v2. To be consistent, we are keeping this method in v1,
102+
Keyword arguments:
103+
connector -- connector information, used to get missing parameters and send status updates
104+
host -- the clowder host, including http and port, should end with a /
105+
key -- the secret key to login to clowder
106+
fileid -- the file to fetch metadata of
107+
"""
108+
client = ClowderClient(host=host, key=key)
109+
result = download_info(connector, client, fileid)
110+
return result.json()
111+
98112

99113
def download_metadata(connector, client, fileid, extractor=None):
100114
"""Download file JSON-LD metadata from Clowder.

pyclowder/api/v2/files.py

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,22 @@ def download_info(connector, client, fileid):
9494

9595
return result
9696

97+
def download_summary(connector, client, fileid):
98+
"""Download file summary from Clowder.
99+
100+
Keyword arguments:
101+
connector -- connector information, used to get missing parameters and send status updates
102+
client -- ClowderClient containing authentication credentials
103+
fileid -- the file to fetch metadata of
104+
"""
105+
106+
url = '%s/api/v2/files/%s/summary' % (client.host, fileid)
107+
headers = {"X-API-KEY": client.key}
108+
# fetch data
109+
result = connector.get(url, stream=True, verify=connector.ssl_verify if connector else True, headers=headers)
110+
111+
return result
112+
97113

98114
def download_metadata(connector, client, fileid, extractor=None):
99115
"""Download file JSON-LD metadata from Clowder.
@@ -302,12 +318,12 @@ def upload_to_dataset(connector, client, datasetid, filepath, check_duplicate=Fa
302318

303319
if os.path.exists(filepath):
304320
filename = os.path.basename(filepath)
305-
m = MultipartEncoder(
306-
fields={'File': (filename, open(filepath, 'rb'))}
307-
)
308-
headers = {"X-API-KEY": client.key,
309-
'Content-Type': m.content_type}
310-
result = connector.post(url, data=m, headers=headers,
321+
# m = MultipartEncoder(
322+
# fields={'File': (filename, open(filepath, 'rb'))}
323+
# )
324+
file_data = {"file": open(filepath, 'rb')}
325+
headers = {"X-API-KEY": client.key}
326+
result = connector.post(url, files=file_data, headers=headers,
311327
verify=connector.ssl_verify if connector else True)
312328

313329
uploadedfileid = result.json()['id']

pyclowder/files.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,20 @@ def download_info(connector, host, key, fileid):
6969
return result.json()
7070

7171

72+
def download_summary(connector, host, key, fileid):
73+
"""Download file summary metadata from Clowder.
74+
75+
Keyword arguments:
76+
connector -- connector information, used to get missing parameters and send status updates
77+
host -- the clowder host, including http and port, should end with a /
78+
key -- the secret key to login to clowder
79+
fileid -- the file to fetch metadata of
80+
"""
81+
client = ClowderClient(host=host, key=key)
82+
result = files.download_summary(connector, client, fileid)
83+
return result.json()
84+
85+
7286
def download_metadata(connector, host, key, fileid, extractor=None):
7387
"""Download file JSON-LD metadata from Clowder.
7488
@@ -240,7 +254,7 @@ def upload_to_dataset(connector, host, key, datasetid, filepath, check_duplicate
240254
"""
241255
client = ClowderClient(host=host, key=key)
242256
if clowder_version == 2:
243-
files.upload_to_dataset(connector, client, datasetid, filepath, check_duplicate)
257+
return files.upload_to_dataset(connector, client, datasetid, filepath, check_duplicate)
244258
else:
245259
logger = logging.getLogger(__name__)
246260

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
FROM python:3.8
2+
3+
WORKDIR /extractor
4+
COPY requirements.txt ./
5+
RUN pip install -r requirements.txt
6+
7+
COPY test-dataset-extractor.py extractor_info.json ./
8+
CMD python test-dataset-extractor.py
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
A simple test extractor that verifies the functions of file in pyclowder.
2+
3+
# Docker
4+
5+
This extractor is ready to be run as a docker container, the only dependency is a running Clowder instance. Simply build and run.
6+
7+
1. Start Clowder V2. For help starting Clowder V2, see our [getting started guide](https://github.com/clowder-framework/clowder2/blob/main/README.md).
8+
9+
2. First build the extractor Docker container:
10+
11+
```
12+
# from this directory, run:
13+
14+
docker build -t test-dataset-extractor .
15+
```
16+
17+
3. Finally run the extractor:
18+
19+
```
20+
docker run -t -i --rm --net clowder_clowder -e "RABBITMQ_URI=amqp://guest:guest@rabbitmq:5672/%2f" --name "test-dataset-extractor" test-dataset-extractor
21+
```
22+
23+
Then open the Clowder web app and run the wordcount extractor on a .txt file (or similar)! Done.
24+
25+
### Python and Docker details
26+
27+
You may use any version of Python 3. Simply edit the first line of the `Dockerfile`, by default it uses `FROM python:3.8`.
28+
29+
Docker flags:
30+
31+
- `--net` links the extractor to the Clowder Docker network (run `docker network ls` to identify your own.)
32+
- `-e RABBITMQ_URI=` sets the environment variables can be used to control what RabbitMQ server and exchange it will bind itself to. Setting the `RABBITMQ_EXCHANGE` may also help.
33+
- You can also use `--link` to link the extractor to a RabbitMQ container.
34+
- `--name` assigns the container a name visible in Docker Desktop.
35+
36+
## Troubleshooting
37+
38+
**If you run into _any_ trouble**, please reach out on our Clowder Slack in the [#pyclowder channel](https://clowder-software.slack.com/archives/CNC2UVBCP).
39+
40+
Alternate methods of running extractors are below.
41+
42+
# Commandline Execution
43+
44+
To execute the extractor from the command line you will need to have the required packages installed. It is highly recommended to use python virtual environment for this. You will need to create a virtual environment first, then activate it and finally install all required packages.
45+
46+
```
47+
Step 1 - Start clowder docker-compose
48+
Step 2 - Starting heartbeat listener
49+
virtualenv clowder2-python (try pipenv)
50+
source clowder2-python/bin/activate
51+
Step 3 - Run heatbeat_listener_sync.py to register new extractor (This step will likely not be needed in future)
52+
cd ~/Git/clowder2/backend
53+
pip install email_validator
54+
copy heartbeat_listener_sync.py to /backend from /backend/app/rabbitmq
55+
python heartbeat_listener_sync.py
56+
57+
Step 4 - Installing pyclowder branch & running extractor
58+
source ~/clowder2-python/bin/activate
59+
pip uninstall pyclowder
60+
61+
# the pyclowder Git repo should have Todd's branch activated (50-clowder20-submit-file-to-extractor)
62+
pip install -e ~/Git/pyclowder
63+
64+
cd ~/Git/pyclowder/sample-extractors/test-dataset-extractor
65+
export CLOWDER_VERSION=2
66+
export CLOWDER_URL=http://localhost:8000/
67+
68+
python test-dataset-extractor.py
69+
70+
71+
Step 5 = # post a particular File ID (text file) to the new extractor
72+
POST http://localhost:3002/api/v2/files/639b31754241665a4fc3e513/extract?extractorName=ncsa.test-dataset-extractor
73+
74+
Or,
75+
Go to Clowder UI and submit a file for extraction
76+
```
77+
78+
# Run the extractor from Pycharm
79+
You can run the heartbeat_listener_sync.py and test_file_extractor.py from pycharm.
80+
Create a pipenv (generally pycharm directs you to create one when you first open the file). To run test_file_extractor.py,
81+
add 'CLOWDER_VERSION=2' to environment variable in run configuration.
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
{
2+
"@context": "http://clowder.ncsa.illinois.edu/contexts/extractors.jsonld",
3+
"name": "ncsa.test-dataset-extractor",
4+
"version": "2.0",
5+
"description": "Test Dataset extractor. Test to verify all functionalities of dataset in pyclowder.",
6+
"author": "Dipannita Dey <[email protected]>",
7+
"contributors": [],
8+
"contexts": [
9+
{
10+
"lines": "http://clowder.ncsa.illinois.edu/metadata/sample_metadata#lines",
11+
"words": "http://clowder.ncsa.illinois.edu/metadata/sample_metadata#words",
12+
"characters": "http://clowder.ncsa.illinois.edu/metadata/sample_metadata#characters"
13+
}
14+
],
15+
"repository": [
16+
{
17+
"repType": "git",
18+
"repUrl": "https://opensource.ncsa.illinois.edu/stash/scm/cats/pyclowder.git"
19+
}
20+
],
21+
"process": {
22+
"dataset": [
23+
"*"
24+
]
25+
},
26+
"external_services": [],
27+
"dependencies": [],
28+
"bibtex": []
29+
}
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
pyclowder==3.0.2
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
#!/usr/bin/env python
2+
3+
"""Example extractor based on the clowder code."""
4+
5+
import logging
6+
import subprocess
7+
import os
8+
9+
from pyclowder.extractors import Extractor
10+
import pyclowder.files
11+
12+
13+
class TestDatasetExtractor(Extractor):
14+
"""Test the functionalities of an extractor."""
15+
def __init__(self):
16+
Extractor.__init__(self)
17+
18+
# add any additional arguments to parser
19+
# self.parser.add_argument('--max', '-m', type=int, nargs='?', default=-1,
20+
# help='maximum number (default=-1)')
21+
22+
# parse command line and load default logging configuration
23+
self.setup()
24+
25+
# setup logging for the exctractor
26+
logging.getLogger('pyclowder').setLevel(logging.DEBUG)
27+
logging.getLogger('__main__').setLevel(logging.DEBUG)
28+
29+
def process_message(self, connector, host, secret_key, resource, parameters):
30+
# Process the file and upload the results
31+
32+
logger = logging.getLogger(__name__)
33+
dataset_id = resource['id']
34+
35+
# Local file path to file which you want to upload to dataset
36+
file_path = os.path.join(os.getcwd(), 'test_dataset_extractor_file.txt')
37+
38+
# Upload a new file to dataset
39+
file_id = pyclowder.files.upload_to_dataset(connector, host, secret_key, dataset_id, file_path, True)
40+
if file_id is None:
41+
logger.error("Error uploading file")
42+
else:
43+
logger.info("File uploaded successfully")
44+
45+
# Get file list under dataset
46+
file_list = pyclowder.datasets.get_file_list(connector, host, secret_key, dataset_id)
47+
logger.info("File list : %s", file_list)
48+
if file_id in list(map(lambda file: file['id'], file_list)):
49+
logger.info("File uploading and retrieving file list succeeded")
50+
else:
51+
logger.error("File uploading/retrieving file list didn't succeed")
52+
53+
# Download info of dataset
54+
dataset_info = pyclowder.datasets.get_info(connector, host, secret_key, dataset_id)
55+
logger.info("Dataset info: %s", dataset_info)
56+
if dataset_id == dataset_info['id']:
57+
logger.info("Success in downloading dataset info")
58+
else:
59+
logger.error("Error in downloading dataset info")
60+
61+
# Downloading metadata of dataset
62+
dataset_metadata = pyclowder.datasets.download_metadata(connector, host, secret_key, dataset_id)
63+
if dataset_metadata is None:
64+
logger.info("No metadata found for dataset %s", dataset_id)
65+
else:
66+
logger.info("Metadata: %s", dataset_metadata)
67+
68+
69+
if __name__ == "__main__":
70+
extractor = TestDatasetExtractor()
71+
extractor.start()
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
This is a test file for the test dataset extractor.
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
FROM python:3.8
2+
3+
WORKDIR /extractor
4+
COPY requirements.txt ./
5+
RUN pip install -r requirements.txt
6+
7+
COPY test-file-extractor.py extractor_info.json ./
8+
CMD python test-file-extractor.py

0 commit comments

Comments
 (0)