Skip to content

Commit 5a84349

Browse files
committed
adding TODO items
the sheer number of collections in some instances makes this way of getting collections incredibly slow. instead we should start at the top and then check each level, this should mean fewer collections
1 parent f98edda commit 5a84349

File tree

1 file changed

+3
-1
lines changed

1 file changed

+3
-1
lines changed

scripts/migration/migrate.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ def get_clowder_v1_user_collections(headers, user_v1):
150150
response = requests.get(endpoint, headers=headers)
151151
return [col for col in response.json() if col["authorId"] == user_v1["id"]]
152152

153-
153+
# TODO this is too slow, we need to optimize it
154154
def get_clowder_v1_dataset_collections(headers, user_v1, dataset_id):
155155
matching_collections = []
156156
endpoint = f"{CLOWDER_V1}/api/collections/allCollections"
@@ -687,8 +687,10 @@ def build_collection_metadata_for_v1_dataset(dataset_id, user_v1, headers):
687687
return dataset_collections
688688

689689

690+
# TODO test this method
690691
def build_collection_space_metadata_for_v1_dataset(dataset, user_v1, headers):
691692
dataset_id = dataset["id"]
693+
# TODO this is too slow we need a way to sort through collection hierarchy better
692694
dataset_collections = get_clowder_v1_dataset_collections(
693695
headers=headers, user_v1=user_v1, dataset_id=dataset_id
694696
)

0 commit comments

Comments
 (0)