Skip to content

Commit 7097402

Browse files
AyloSrdLuca Marongiu
andauthored
fix(SUP-1444): bug in annotation export json file (#1658)
Co-authored-by: Luca Marongiu <[email protected]>
1 parent 32a8d4d commit 7097402

File tree

7 files changed

+636
-17
lines changed

7 files changed

+636
-17
lines changed

src/kili/adapters/kili_api_gateway/label/annotation_to_json_response.py

Lines changed: 80 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,13 @@ def _classic_annotations_to_json_response(
209209
def _key_annotations_iterator(
210210
annotation: VideoTranscriptionAnnotation,
211211
) -> Generator[
212-
Tuple[VideoTranscriptionKeyAnnotation, int, int, Optional[VideoTranscriptionKeyAnnotation]],
212+
Tuple[
213+
VideoTranscriptionKeyAnnotation,
214+
int,
215+
int,
216+
Optional[VideoTranscriptionKeyAnnotation],
217+
Optional[int],
218+
],
213219
None,
214220
None,
215221
]:
@@ -220,7 +226,13 @@ def _key_annotations_iterator(
220226
def _key_annotations_iterator(
221227
annotation: VideoClassificationAnnotation,
222228
) -> Generator[
223-
Tuple[VideoClassificationKeyAnnotation, int, int, Optional[VideoClassificationKeyAnnotation]],
229+
Tuple[
230+
VideoClassificationKeyAnnotation,
231+
int,
232+
int,
233+
Optional[VideoClassificationKeyAnnotation],
234+
Optional[int],
235+
],
224236
None,
225237
None,
226238
]:
@@ -231,7 +243,13 @@ def _key_annotations_iterator(
231243
def _key_annotations_iterator(
232244
annotation: VideoObjectDetectionAnnotation,
233245
) -> Generator[
234-
Tuple[VideoObjectDetectionKeyAnnotation, int, int, Optional[VideoObjectDetectionKeyAnnotation]],
246+
Tuple[
247+
VideoObjectDetectionKeyAnnotation,
248+
int,
249+
int,
250+
Optional[VideoObjectDetectionKeyAnnotation],
251+
Optional[int],
252+
],
235253
None,
236254
None,
237255
]:
@@ -246,19 +264,38 @@ def _key_annotations_iterator(annotation: VideoAnnotation) -> Generator:
246264
sorted_key_annotations = sorted(
247265
annotation["keyAnnotations"], key=lambda key_ann: int(key_ann["frame"])
248266
)
249-
267+
# previous_key_ann is used to keep track of the previous key annotation
268+
# in case where keyframe is not present in the current frame range
269+
previous_key_ann = {}
270+
previous_key_ann_index = 0
250271
# iterate over the frame ranges of the annotation
251272
for frame_interval in annotation["frames"]:
252273
frame_range = range(frame_interval["start"], frame_interval["end"] + 1)
274+
# has_key_annotation is used to keep track of whether the current frame range
275+
# has a key annotation or not. It could be that the current frame range does not
276+
# have a key annotation, or that the first keyframe is after the start of the frame range
277+
has_key_annotation = False
253278
for key_ann_index, key_ann in enumerate(sorted_key_annotations):
254279
# skip the key annotation if the key annotation start frame
255280
# is not in current frame range
256281
if key_ann["frame"] not in frame_range:
257282
continue
258283

284+
if key_ann["frame"] > frame_interval["start"] and not has_key_annotation:
285+
# if the key annotation start frame is after the start of the frame range,
286+
# then we need to yield the previous key annotation
287+
key_ann_frame = previous_key_ann["frame"]
288+
yield (
289+
previous_key_ann,
290+
frame_interval["start"],
291+
key_ann["frame"],
292+
key_ann,
293+
key_ann_frame,
294+
)
259295
# compute the key annotation frame range
260296
# the start frame of key annotation is given, but not the end frame
261297
key_ann_start = key_ann["frame"]
298+
key_ann_frame = key_ann["frame"]
262299
key_ann_end = min(
263300
frame_interval["end"] + 1,
264301
sorted_key_annotations[key_ann_index + 1]["frame"]
@@ -273,7 +310,26 @@ def _key_annotations_iterator(annotation: VideoAnnotation) -> Generator:
273310
else None
274311
)
275312

276-
yield key_ann, key_ann_start, key_ann_end, next_key_ann
313+
has_key_annotation = True
314+
previous_key_ann = key_ann
315+
previous_key_ann_index = key_ann_index
316+
317+
yield key_ann, key_ann_start, key_ann_end, next_key_ann, key_ann_frame
318+
319+
if not has_key_annotation:
320+
key_ann_frame = previous_key_ann["frame"]
321+
next_key_ann = (
322+
sorted_key_annotations[previous_key_ann_index + 1]
323+
if previous_key_ann_index + 1 < len(sorted_key_annotations)
324+
else None
325+
)
326+
yield (
327+
previous_key_ann,
328+
frame_interval["start"],
329+
frame_interval["end"] + 1,
330+
next_key_ann,
331+
key_ann_frame,
332+
)
277333

278334

279335
def _ranking_annotation_to_json_response(
@@ -319,10 +375,12 @@ def _video_transcription_annotation_to_json_response(
319375
"""
320376
json_resp: Dict[str, Dict[JobName, Dict]] = defaultdict(dict)
321377

322-
for key_ann, key_ann_start, key_ann_end, _ in _key_annotations_iterator(annotation):
378+
for key_ann, key_ann_start, key_ann_end, _, key_ann_frame in _key_annotations_iterator(
379+
annotation
380+
):
323381
for frame_id in range(key_ann_start, key_ann_end):
324382
json_resp[str(frame_id)][annotation["job"]] = {
325-
"isKeyFrame": frame_id == key_ann_start,
383+
"isKeyFrame": frame_id == key_ann_frame,
326384
"text": key_ann["annotationValue"]["text"],
327385
}
328386

@@ -406,12 +464,14 @@ def _video_classification_annotation_to_json_response(
406464

407465
json_resp: Dict[str, Dict[JobName, Dict]] = defaultdict(dict)
408466

409-
for key_ann, key_ann_start, key_ann_end, _ in _key_annotations_iterator(annotation):
467+
for key_ann, key_ann_start, key_ann_end, _, key_ann_frame in _key_annotations_iterator(
468+
annotation
469+
):
410470
for frame_id in range(key_ann_start, key_ann_end):
411471
# initialize the frame json response
412472
json_resp[str(frame_id)][annotation["job"]] = {
413473
"categories": [],
414-
"isKeyFrame": frame_id == key_ann_start,
474+
"isKeyFrame": frame_id == key_ann_frame,
415475
}
416476

417477
# get the frame json response of child jobs
@@ -461,22 +521,27 @@ def _video_object_detection_annotation_to_json_response(
461521

462522
json_resp = defaultdict(dict)
463523

464-
for key_ann, key_ann_start, key_ann_end, next_key_ann in _key_annotations_iterator(annotation):
524+
for (
525+
key_ann,
526+
key_ann_start,
527+
key_ann_end,
528+
next_key_ann,
529+
key_ann_frame,
530+
) in _key_annotations_iterator(annotation):
465531
for frame_id in range(key_ann_start, key_ann_end):
466532
# get the frame json response of child jobs
467533
child_jobs_frame_json_resp = json_resp_child_jobs.get(str(frame_id), {})
468534

469535
annotation_dict = {
470536
"children": child_jobs_frame_json_resp,
471-
"isKeyFrame": frame_id == key_ann_start,
537+
"isKeyFrame": frame_id == key_ann_frame,
472538
"categories": [{"name": annotation["category"]}],
473539
"mid": annotation["mid"],
474540
"type": json_interface["jobs"][annotation["job"]]["tools"][0],
475541
}
476542

477-
if frame_id == key_ann_start or next_key_ann is None:
543+
if frame_id == key_ann_frame or next_key_ann is None:
478544
norm_vertices = key_ann["annotationValue"]["vertices"]
479-
480545
# between two key frame annotations, an object (point, bbox, polygon) is
481546
# interpolated in the UI
482547
else:
@@ -485,9 +550,9 @@ def _video_object_detection_annotation_to_json_response(
485550
norm_vertices = _interpolate_object(
486551
object_type=json_interface["jobs"][annotation["job"]]["tools"][0],
487552
object_initial_state=object_inital_state,
488-
initial_state_frame_index=key_ann_start,
553+
initial_state_frame_index=key_ann["frame"],
489554
object_final_state=object_final_state,
490-
final_state_frame_index=key_ann_end,
555+
final_state_frame_index=next_key_ann["frame"],
491556
at_frame=frame_id,
492557
)
493558

src/kili/core/graphql/graphql_client.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,8 @@
1414
from gql.transport.requests import RequestsHTTPTransport
1515
from gql.transport.requests import log as gql_requests_logger
1616
from graphql import DocumentNode, print_schema
17-
from pyrate_limiter import Duration, Limiter, Rate
17+
from pyrate_limiter import Duration, Rate
18+
from pyrate_limiter.limiter import Limiter
1819
from tenacity import (
1920
retry,
2021
retry_all,
Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
"""Mocks for split video export."""
2+
3+
from collections import defaultdict
4+
5+
json_interface = {
6+
"jobs": {
7+
"JOB_0": {
8+
"content": {
9+
"categories": {
10+
"OBJECT_A": {"children": ["JOB_1"], "name": "Train", "color": "#733AFB"}
11+
},
12+
"input": "radio",
13+
},
14+
"instruction": "Track objects A and B",
15+
"isChild": False,
16+
"tools": ["rectangle"],
17+
"mlTask": "OBJECT_DETECTION",
18+
"models": {"tracking": {}},
19+
"isVisible": True,
20+
"required": 0,
21+
},
22+
"JOB_1": {
23+
"content": {
24+
"categories": {
25+
"IS_THE OBJECT OCCLUDED?": {"children": [], "name": "Is the object occluded?"}
26+
},
27+
"input": "checkbox",
28+
},
29+
"instruction": "",
30+
"isChild": True,
31+
"mlTask": "CLASSIFICATION",
32+
"models": {},
33+
"isVisible": True,
34+
"required": 0,
35+
},
36+
}
37+
}
38+
fake_bbox_norm_vertices = [
39+
{"x": 0.35, "y": 0.54},
40+
{"x": 0.35, "y": 0.46},
41+
{"x": 0.40, "y": 0.46},
42+
{"x": 0.40, "y": 0.54},
43+
]
44+
45+
annotations = [
46+
{
47+
"__typename": "VideoObjectDetectionAnnotation",
48+
"id": "55ad2c18-c063-4064-9f46-2e3fa3eed483",
49+
"job": "JOB_0",
50+
"path": [],
51+
"labelId": "cltftiw7n000qslwu4o1d9rj3",
52+
"frames": [{"start": 0, "end": 1}, {"start": 4, "end": 5}],
53+
"keyAnnotations": [
54+
{
55+
"id": "55ad2c18-c063-4064-9f46-2e3fa3eed483-0",
56+
"frame": 0,
57+
"annotationValue": {"vertices": [[fake_bbox_norm_vertices]]},
58+
}
59+
],
60+
"name": "Train 2",
61+
"mid": "20240306141046672-1",
62+
"category": "OBJECT_A",
63+
}
64+
]
65+
66+
expected_json_resp = {
67+
"0": {
68+
"ANNOTATION_JOB_COUNTER": {"JOB_0": defaultdict(int, {"OBJECT_A": 1})},
69+
"ANNOTATION_NAMES_JOB": {"20240306141046672-1": "Train 2"},
70+
"JOB_0": {
71+
"annotations": [
72+
{
73+
"children": {},
74+
"isKeyFrame": True,
75+
"categories": [{"name": "OBJECT_A"}],
76+
"mid": "20240306141046672-1",
77+
"type": "rectangle",
78+
"boundingPoly": [{"normalizedVertices": fake_bbox_norm_vertices}],
79+
}
80+
],
81+
},
82+
},
83+
"1": {
84+
"JOB_0": {
85+
"annotations": [
86+
{
87+
"children": {},
88+
"isKeyFrame": False,
89+
"categories": [{"name": "OBJECT_A"}],
90+
"mid": "20240306141046672-1",
91+
"type": "rectangle",
92+
"boundingPoly": [{"normalizedVertices": fake_bbox_norm_vertices}],
93+
}
94+
]
95+
}
96+
},
97+
"2": {},
98+
"3": {},
99+
"4": {
100+
"JOB_0": {
101+
"annotations": [
102+
{
103+
"children": {},
104+
"isKeyFrame": False,
105+
"categories": [{"name": "OBJECT_A"}],
106+
"mid": "20240306141046672-1",
107+
"type": "rectangle",
108+
"boundingPoly": [{"normalizedVertices": fake_bbox_norm_vertices}],
109+
}
110+
]
111+
}
112+
},
113+
"5": {
114+
"JOB_0": {
115+
"annotations": [
116+
{
117+
"children": {},
118+
"isKeyFrame": False,
119+
"categories": [{"name": "OBJECT_A"}],
120+
"mid": "20240306141046672-1",
121+
"type": "rectangle",
122+
"boundingPoly": [{"normalizedVertices": fake_bbox_norm_vertices}],
123+
}
124+
]
125+
}
126+
},
127+
}

0 commit comments

Comments
 (0)