|
1 | 1 | import requests
|
2 | 2 | import responses
|
3 | 3 | from responses.matchers import json_params_matcher
|
| 4 | +from unittest.mock import patch |
4 | 5 |
|
5 | 6 | from roboflow import API_URL
|
6 | 7 | from roboflow.adapters.rfapi import AnnotationSaveError, ImageUploadError
|
7 | 8 | from roboflow.config import DEFAULT_BATCH_NAME
|
8 |
| -from tests import PROJECT_NAME, ROBOFLOW_API_KEY, WORKSPACE_NAME, RoboflowTest |
| 9 | +from tests import PROJECT_NAME, ROBOFLOW_API_KEY, WORKSPACE_NAME, RoboflowTest, ordered |
9 | 10 |
|
10 | 11 |
|
11 | 12 | class TestProject(RoboflowTest):
|
| 13 | + def _create_test_dataset(self, images=None): |
| 14 | + """ |
| 15 | + Create a test dataset with specified images or a default image |
| 16 | + |
| 17 | + Args: |
| 18 | + images: List of image dictionaries. If None, a default image will be used. |
| 19 | + |
| 20 | + Returns: |
| 21 | + Dictionary representing a parsed dataset |
| 22 | + """ |
| 23 | + if images is None: |
| 24 | + images = [ |
| 25 | + { |
| 26 | + "file": "image1.jpg", |
| 27 | + "split": "train", |
| 28 | + "annotationfile": { |
| 29 | + "file": "image1.xml" |
| 30 | + } |
| 31 | + } |
| 32 | + ] |
| 33 | + |
| 34 | + return { |
| 35 | + "location": "/test/location/", |
| 36 | + "images": images |
| 37 | + } |
| 38 | + |
| 39 | + def _setup_upload_dataset_mocks(self, test_dataset=None, image_return=None, annotation_return=None, |
| 40 | + project_created=False, save_annotation_side_effect=None, |
| 41 | + upload_image_side_effect=None): |
| 42 | + """ |
| 43 | + Set up common mocks for upload_dataset tests |
| 44 | + |
| 45 | + Args: |
| 46 | + test_dataset: The dataset to return from parsefolder. If None, creates a default dataset |
| 47 | + image_return: Return value for upload_image. Default is successful upload |
| 48 | + annotation_return: Return value for save_annotation. Default is successful annotation |
| 49 | + project_created: Whether to simulate a newly created project |
| 50 | + save_annotation_side_effect: Side effect function for save_annotation |
| 51 | + upload_image_side_effect: Side effect function for upload_image |
| 52 | + |
| 53 | + Returns: |
| 54 | + Dictionary of mock objects with start and stop methods |
| 55 | + """ |
| 56 | + if test_dataset is None: |
| 57 | + test_dataset = self._create_test_dataset() |
| 58 | + |
| 59 | + if image_return is None: |
| 60 | + image_return = ({"id": "test-id", "success": True}, 0.1, 0) |
| 61 | + |
| 62 | + if annotation_return is None: |
| 63 | + annotation_return = ({"success": True}, 0.1, 0) |
| 64 | + |
| 65 | + # Create the mock objects |
| 66 | + mocks = { |
| 67 | + 'parser': patch('roboflow.core.workspace.folderparser.parsefolder', return_value=test_dataset), |
| 68 | + 'upload': patch('roboflow.core.workspace.Project.upload_image', |
| 69 | + side_effect=upload_image_side_effect) if upload_image_side_effect |
| 70 | + else patch('roboflow.core.workspace.Project.upload_image', return_value=image_return), |
| 71 | + 'save_annotation': patch('roboflow.core.workspace.Project.save_annotation', |
| 72 | + side_effect=save_annotation_side_effect) if save_annotation_side_effect |
| 73 | + else patch('roboflow.core.workspace.Project.save_annotation', return_value=annotation_return), |
| 74 | + 'get_project': patch('roboflow.core.workspace.Workspace._get_or_create_project', |
| 75 | + return_value=(self.project, project_created)) |
| 76 | + } |
| 77 | + |
| 78 | + return mocks |
| 79 | + |
12 | 80 | def test_check_valid_image_with_accepted_formats(self):
|
13 | 81 | images_to_test = [
|
14 | 82 | "rabbit.JPG",
|
@@ -224,3 +292,186 @@ def test_create_annotation_job_error(self):
|
224 | 292 | )
|
225 | 293 |
|
226 | 294 | self.assertEqual(str(context.exception), "Batch not found")
|
| 295 | + |
| 296 | + @ordered |
| 297 | + @responses.activate |
| 298 | + def test_project_upload_dataset(self): |
| 299 | + """Test upload_dataset functionality with various scenarios""" |
| 300 | + test_scenarios = [ |
| 301 | + { |
| 302 | + "name": "string_annotationdesc", |
| 303 | + "dataset": [{ |
| 304 | + "file": "test_image.jpg", |
| 305 | + "split": "train", |
| 306 | + "annotationfile": "string_annotation.txt" |
| 307 | + }], |
| 308 | + "params": {"num_workers": 1}, |
| 309 | + "assertions": {} |
| 310 | + }, |
| 311 | + { |
| 312 | + "name": "success_basic", |
| 313 | + "dataset": [ |
| 314 | + {"file": "image1.jpg", "split": "train", "annotationfile": {"file": "image1.xml"}}, |
| 315 | + {"file": "image2.jpg", "split": "valid", "annotationfile": {"file": "image2.xml"}} |
| 316 | + ], |
| 317 | + "params": {}, |
| 318 | + "assertions": { |
| 319 | + "parser": [("/test/dataset",)], |
| 320 | + "upload": {"count": 2}, |
| 321 | + "save_annotation": {"count": 2} |
| 322 | + }, |
| 323 | + "image_return": ({"id": "test-id-1", "success": True}, 0.1, 0) |
| 324 | + }, |
| 325 | + { |
| 326 | + "name": "custom_parameters", |
| 327 | + "dataset": None, |
| 328 | + "params": { |
| 329 | + "num_workers": 2, |
| 330 | + "project_license": "CC BY 4.0", |
| 331 | + "project_type": "classification", |
| 332 | + "batch_name": "test-batch", |
| 333 | + "num_retries": 3 |
| 334 | + }, |
| 335 | + "assertions": { |
| 336 | + "upload": {"count": 1, "kwargs": {"batch_name": "test-batch", "num_retry_uploads": 3}} |
| 337 | + } |
| 338 | + }, |
| 339 | + { |
| 340 | + "name": "project_creation", |
| 341 | + "dataset": None, |
| 342 | + "params": {"project_name": "new-project"}, |
| 343 | + "assertions": {}, |
| 344 | + "project_created": True |
| 345 | + }, |
| 346 | + { |
| 347 | + "name": "with_labelmap", |
| 348 | + "dataset": [{ |
| 349 | + "file": "image1.jpg", |
| 350 | + "split": "train", |
| 351 | + "annotationfile": { |
| 352 | + "file": "image1.xml", |
| 353 | + "labelmap": "path/to/labelmap.json" |
| 354 | + } |
| 355 | + }], |
| 356 | + "params": {}, |
| 357 | + "assertions": { |
| 358 | + "save_annotation": {"count": 1}, |
| 359 | + "load_labelmap": {"count": 1} |
| 360 | + }, |
| 361 | + "extra_mocks": [ |
| 362 | + ("load_labelmap", "roboflow.core.workspace.load_labelmap", {"return_value": {"old_label": "new_label"}}) |
| 363 | + ] |
| 364 | + }, |
| 365 | + { |
| 366 | + "name": "concurrent_uploads", |
| 367 | + "dataset": [{"file": f"image{i}.jpg", "split": "train"} for i in range(10)], |
| 368 | + "params": {"num_workers": 5}, |
| 369 | + "assertions": { |
| 370 | + "thread_pool": {"count": 1, "kwargs": {"max_workers": 5}} |
| 371 | + }, |
| 372 | + "extra_mocks": [ |
| 373 | + ("thread_pool", "concurrent.futures.ThreadPoolExecutor", {}) |
| 374 | + ] |
| 375 | + }, |
| 376 | + { |
| 377 | + "name": "empty_dataset", |
| 378 | + "dataset": [], |
| 379 | + "params": {}, |
| 380 | + "assertions": { |
| 381 | + "upload": {"count": 0} |
| 382 | + } |
| 383 | + }, |
| 384 | + { |
| 385 | + "name": "raw_text_annotation", |
| 386 | + "dataset": [{ |
| 387 | + "file": "image1.jpg", |
| 388 | + "split": "train", |
| 389 | + "annotationfile": { |
| 390 | + "rawText": "annotation content here", |
| 391 | + "format": "json" |
| 392 | + } |
| 393 | + }], |
| 394 | + "params": {}, |
| 395 | + "assertions": { |
| 396 | + "save_annotation": {"count": 1} |
| 397 | + } |
| 398 | + } |
| 399 | + ] |
| 400 | + |
| 401 | + error_cases = [ |
| 402 | + { |
| 403 | + "name": "image_upload_error", |
| 404 | + "side_effect": { |
| 405 | + "upload_image_side_effect": lambda *args, **kwargs: |
| 406 | + (_ for _ in ()).throw(ImageUploadError("Failed to upload image")) |
| 407 | + }, |
| 408 | + "params": {"num_workers": 1} |
| 409 | + }, |
| 410 | + { |
| 411 | + "name": "annotation_upload_error", |
| 412 | + "side_effect": { |
| 413 | + "save_annotation_side_effect": lambda *args, **kwargs: |
| 414 | + (_ for _ in ()).throw(AnnotationSaveError("Failed to save annotation")) |
| 415 | + }, |
| 416 | + "params": {"num_workers": 1} |
| 417 | + } |
| 418 | + ] |
| 419 | + |
| 420 | + for scenario in test_scenarios: |
| 421 | + test_dataset = self._create_test_dataset(scenario.get("dataset")) if scenario.get("dataset") is not None else None |
| 422 | + |
| 423 | + extra_mocks = {} |
| 424 | + if "extra_mocks" in scenario: |
| 425 | + for mock_name, target, config in scenario.get("extra_mocks", []): |
| 426 | + extra_mocks[mock_name] = patch(target, **config) |
| 427 | + |
| 428 | + mocks = self._setup_upload_dataset_mocks( |
| 429 | + test_dataset=test_dataset, |
| 430 | + image_return=scenario.get("image_return"), |
| 431 | + project_created=scenario.get("project_created", False) |
| 432 | + ) |
| 433 | + |
| 434 | + mock_objects = {} |
| 435 | + for name, mock in mocks.items(): |
| 436 | + mock_objects[name] = mock.start() |
| 437 | + |
| 438 | + for name, mock in extra_mocks.items(): |
| 439 | + mock_objects[name] = mock.start() |
| 440 | + |
| 441 | + try: |
| 442 | + params = {"dataset_path": "/test/dataset", "project_name": PROJECT_NAME} |
| 443 | + params.update(scenario.get("params", {})) |
| 444 | + |
| 445 | + self.workspace.upload_dataset(**params) |
| 446 | + |
| 447 | + for mock_name, assertion in scenario.get("assertions", {}).items(): |
| 448 | + if isinstance(assertion, list): |
| 449 | + mock_obj = mock_objects.get(mock_name) |
| 450 | + call_args_list = [args for args, _ in mock_obj.call_args_list] |
| 451 | + for expected_args in assertion: |
| 452 | + self.assertIn(expected_args, call_args_list) |
| 453 | + elif isinstance(assertion, dict): |
| 454 | + mock_obj = mock_objects.get(mock_name) |
| 455 | + if "count" in assertion: |
| 456 | + self.assertEqual(mock_obj.call_count, assertion["count"]) |
| 457 | + if "kwargs" in assertion and mock_obj.call_count > 0: |
| 458 | + _, kwargs = mock_obj.call_args |
| 459 | + for key, value in assertion["kwargs"].items(): |
| 460 | + self.assertEqual(kwargs.get(key), value) |
| 461 | + finally: |
| 462 | + for mock in list(mocks.values()) + list(extra_mocks.values()): |
| 463 | + mock.stop() |
| 464 | + |
| 465 | + for case in error_cases: |
| 466 | + mocks = self._setup_upload_dataset_mocks(**case.get("side_effect", {})) |
| 467 | + |
| 468 | + for mock in mocks.values(): |
| 469 | + mock.start() |
| 470 | + |
| 471 | + try: |
| 472 | + params = {"dataset_path": "/test/dataset", "project_name": PROJECT_NAME} |
| 473 | + params.update(case.get("params", {})) |
| 474 | + self.workspace.upload_dataset(**params) |
| 475 | + finally: |
| 476 | + for mock in mocks.values(): |
| 477 | + mock.stop() |
0 commit comments