Skip to content

Commit b35a0f6

Browse files
committed
- fix bug
1 parent 49fe865 commit b35a0f6

File tree

1 file changed

+88
-82
lines changed

1 file changed

+88
-82
lines changed

ISAT/widgets/mainwindow.py

Lines changed: 88 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -158,9 +158,6 @@ def run(self):
158158
if self.mainwindow.segany_video.inference_state == {}:
159159
self.mainwindow.segany_video.init_state(self.mainwindow.image_root, self.mainwindow.files_list)
160160
self.mainwindow.segany_video.reset_state()
161-
if not self.mainwindow.saved:
162-
QtWidgets.QMessageBox.warning(self.mainwindow, 'Warning', 'Current annotation has not been saved!')
163-
return
164161

165162
current_file = self.mainwindow.files_list[self.start_frame_idx]
166163
current_file_path = os.path.join(self.mainwindow.image_root, current_file)
@@ -199,86 +196,90 @@ def run(self):
199196
if len(group_object_dict) < 1:
200197
self.tag.emit(0, self.max_frame_num_to_track, True, True, 'Please label objects before video segment.')
201198
return
199+
try:
200+
for group, object_dict in group_object_dict.items():
201+
mask = object_dict['mask']
202+
self.mainwindow.segany_video.add_new_mask(self.start_frame_idx, group, mask)
203+
204+
for index, (out_frame_idxs, out_obj_ids, out_mask_logits) in enumerate(self.mainwindow.segany_video.predictor.propagate_in_video(
205+
self.mainwindow.segany_video.inference_state,
206+
start_frame_idx=self.start_frame_idx,
207+
max_frame_num_to_track=self.max_frame_num_to_track,
208+
reverse=False,
209+
)):
210+
if index == 0: # 忽略当前图片
211+
continue
212+
file = self.mainwindow.files_list[out_frame_idxs]
213+
file_path = os.path.join(self.mainwindow.image_root, file)
214+
label_path = os.path.join(self.mainwindow.label_root, '.'.join(file.split('.')[:-1]) + '.json')
215+
annotation = Annotation(file_path, label_path)
216+
217+
objects = []
218+
for index_mask, out_obj_id in enumerate(out_obj_ids):
219+
220+
masks = out_mask_logits[index_mask] # [1, h, w]
221+
masks = masks > 0
222+
masks = masks.cpu().numpy()
223+
224+
# mask to polygon
225+
masks = masks.astype('uint8') * 255
226+
h, w = masks.shape[-2:]
227+
masks = masks.reshape(h, w)
228+
229+
if self.mainwindow.scene.contour_mode == CONTOURMode.SAVE_ALL:
230+
# 当保留所有轮廓时,检测所有轮廓,并建立二层等级关系
231+
contours, hierarchy = cv2.findContours(masks, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
232+
else:
233+
# 当只保留外轮廓或单个mask时,只检测外轮廓
234+
contours, hierarchy = cv2.findContours(masks, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
235+
236+
if self.mainwindow.scene.contour_mode == CONTOURMode.SAVE_MAX_ONLY and contours:
237+
largest_contour = max(contours, key=cv2.contourArea) # 只保留面积最大的轮廓
238+
contours = [largest_contour]
239+
240+
for contour in contours:
241+
# polydp
242+
if self.mainwindow.cfg['software']['use_polydp']:
243+
epsilon_factor = 0.001
244+
epsilon = epsilon_factor * cv2.arcLength(contour, True)
245+
contour = cv2.approxPolyDP(contour, epsilon, True)
246+
247+
if len(contour) < 3:
248+
continue
249+
250+
segmentation = []
251+
xmin, ymin, xmax, ymax = annotation.width, annotation.height, 0, 0
252+
for point in contour:
253+
x, y = point[0]
254+
x, y = float(x), float(y)
255+
xmin = min(x, xmin)
256+
ymin = min(x, ymin)
257+
xmax = max(y, xmax)
258+
ymax = max(y, ymax)
259+
260+
segmentation.append((x, y))
261+
262+
area = calculate_area(segmentation)
263+
# bbox = (xmin, ymin, xmax, ymax)
264+
bbox = None
265+
obj = Object(category=group_object_dict[out_obj_id]['category'],
266+
group=out_obj_id,
267+
segmentation=segmentation,
268+
area=area,
269+
layer=group_object_dict[out_obj_id]['layer'],
270+
bbox=bbox,
271+
iscrowd=group_object_dict[out_obj_id]['is_crowd'],
272+
note=group_object_dict[out_obj_id]['note'])
273+
objects.append(obj)
274+
275+
annotation.objects = objects
276+
annotation.save_annotation()
277+
self.tag.emit(index, self.max_frame_num_to_track, False, False, '')
278+
279+
self.tag.emit(index, self.max_frame_num_to_track, True, False, '')
202280

203-
for group, object_dict in group_object_dict.items():
204-
mask = object_dict['mask']
205-
self.mainwindow.segany_video.add_new_mask(self.start_frame_idx, group, mask)
206-
207-
for index, (out_frame_idxs, out_obj_ids, out_mask_logits) in enumerate(self.mainwindow.segany_video.predictor.propagate_in_video(
208-
self.mainwindow.segany_video.inference_state,
209-
start_frame_idx=self.start_frame_idx,
210-
max_frame_num_to_track=self.max_frame_num_to_track,
211-
reverse=False,
212-
)):
213-
if index == 0: # 忽略当前图片
214-
continue
215-
file = self.mainwindow.files_list[out_frame_idxs]
216-
file_path = os.path.join(self.mainwindow.image_root, file)
217-
label_path = os.path.join(self.mainwindow.label_root, '.'.join(file.split('.')[:-1]) + '.json')
218-
annotation = Annotation(file_path, label_path)
219-
220-
objects = []
221-
for index_mask, out_obj_id in enumerate(out_obj_ids):
222-
223-
masks = out_mask_logits[index_mask] # [1, h, w]
224-
masks = masks > 0
225-
masks = masks.cpu().numpy()
226-
227-
# mask to polygon
228-
masks = masks.astype('uint8') * 255
229-
h, w = masks.shape[-2:]
230-
masks = masks.reshape(h, w)
231-
232-
if self.mainwindow.scene.contour_mode == CONTOURMode.SAVE_ALL:
233-
# 当保留所有轮廓时,检测所有轮廓,并建立二层等级关系
234-
contours, hierarchy = cv2.findContours(masks, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
235-
else:
236-
# 当只保留外轮廓或单个mask时,只检测外轮廓
237-
contours, hierarchy = cv2.findContours(masks, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
238-
239-
if self.mainwindow.scene.contour_mode == CONTOURMode.SAVE_MAX_ONLY and contours:
240-
largest_contour = max(contours, key=cv2.contourArea) # 只保留面积最大的轮廓
241-
contours = [largest_contour]
242-
243-
for contour in contours:
244-
# polydp
245-
if self.mainwindow.cfg['software']['use_polydp']:
246-
epsilon_factor = 0.001
247-
epsilon = epsilon_factor * cv2.arcLength(contour, True)
248-
contour = cv2.approxPolyDP(contour, epsilon, True)
249-
250-
if len(contour) < 3:
251-
continue
252-
253-
segmentation = []
254-
xmin, ymin, xmax, ymax = annotation.width, annotation.height, 0, 0
255-
for point in contour:
256-
x, y = point[0]
257-
x, y = float(x), float(y)
258-
xmin = min(x, xmin)
259-
ymin = min(x, ymin)
260-
xmax = max(y, xmax)
261-
ymax = max(y, ymax)
262-
263-
segmentation.append((x, y))
264-
265-
area = calculate_area(segmentation)
266-
# bbox = (xmin, ymin, xmax, ymax)
267-
bbox = None
268-
obj = Object(category=group_object_dict[out_obj_id]['category'],
269-
group=out_obj_id,
270-
segmentation=segmentation,
271-
area=area,
272-
layer=group_object_dict[out_obj_id]['layer'],
273-
bbox=bbox,
274-
iscrowd=group_object_dict[out_obj_id]['is_crowd'],
275-
note=group_object_dict[out_obj_id]['note'])
276-
objects.append(obj)
277-
278-
annotation.objects = objects
279-
annotation.save_annotation()
280-
self.tag.emit(index, self.max_frame_num_to_track, False, False, '')
281-
self.tag.emit(index, self.max_frame_num_to_track, True, False, '')
281+
except Exception as e:
282+
self.tag.emit(index, self.max_frame_num_to_track, True, True, '{}'.format(e))
282283

283284

284285
class InitSegAnyThread(QThread):
@@ -524,6 +525,11 @@ def SeganyEnabled(self):
524525
def seg_video_start(self, max_frame_num_to_track=None):
525526
if self.current_index == None:
526527
return
528+
529+
if not self.saved:
530+
QtWidgets.QMessageBox.warning(self, 'Warning', 'Current annotation has not been saved!')
531+
return
532+
527533
self.setEnabled(False)
528534
self.segany_video_thread.start_frame_idx = self.current_index
529535
self.segany_video_thread.max_frame_num_to_track=max_frame_num_to_track

0 commit comments

Comments
 (0)