-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathLeafSegmentorInfer.py
More file actions
300 lines (247 loc) · 10.3 KB
/
LeafSegmentorInfer.py
File metadata and controls
300 lines (247 loc) · 10.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
from LeafSegmentorUtils import prompt_model
from pycocotools import mask as mask_tools
from Config import LeafSegmentorConfig
import matplotlib.pyplot as plt
from mrcnn import visualize
from skimage import measure
from matplotlib import cm
from pydoc import locate
from tqdm import tqdm
from PIL import Image
import numpy as np
import json
import os
COLOR_MAP = "Blues"
CONTOUR_FILE_NAME = "contours.json"
def infer(args):
from mrcnn import model as mrcnn_lib
infer_path = args.path
output = args.output
do_pictures = not args.no_pictures
do_contours = not args.no_contours
model_path = args.model
should_save_masks = not args.no_masks
gt_adapter = args.gt
task_id = args.task
compare_to_gt = gt_adapter is not None
# Retrieve images
images = list(generate_images(infer_path))
# Retrieve gt masks
if compare_to_gt:
adapter_class = locate(gt_adapter + '.' + gt_adapter)
gt_path = infer_path
gt_annotation_map = generate_annotation_map(adapter_class, gt_path, images, task_id)
# Retrieve model path
model_path = prompt_model(model_path)
# Load model
inference_config = get_inference_config(LeafSegmentorConfig)
if not os.path.exists(output):
os.makedirs(output, exist_ok=True)
model = mrcnn_lib.MaskRCNN(mode="inference", config=inference_config, model_dir=output)
model.load_weights(model_path, by_name=True)
model.set_log_dir()
output_dir = model.log_dir
os.makedirs(output_dir, exist_ok=True)
# Infer
inference_dict = {}
IoU_dict = {}
for image_path in tqdm(images):
inference_dict[image_path] = []
image_name = os.path.basename(image_path)
image = np.array(Image.open(image_path))
r = model.detect([image])[0]
if should_save_masks:
save_masks(r['masks'], output_dir, image_name)
if do_pictures:
output_file_path = os.path.join(output_dir, image_name)
_, ax = plt.subplots(1, figsize=(16,16))
visualize.save_instances(image, r['rois'], r['masks'], r['class_ids'],
['BG', 'leave'], r['scores'], save_to=output_file_path, ax=ax)
if do_contours:
inference_dict[image_path], txt_contours = get_contours(r)
for i, leaf_contour in enumerate(txt_contours):
for j, polygon_contour in enumerate(leaf_contour):
contour_file_name = os.path.join(output_dir, os.path.splitext(image_name)[0]) + \
"_" + str(i).zfill(3) + "_" + str(j) + ".txt"
np.savetxt(contour_file_name, polygon_contour, fmt='%.1f', delimiter=' , ')
if compare_to_gt:
gt_masks = get_all_masks(gt_annotation_map[image_path], image_path)
gt_image_name = ".".join(image_name.split(".")[:-1]) + "_GT.png"
save_masks(gt_masks, output_dir, gt_image_name)
IoU_dict[image_path] = calculate_iou(image_name, r['masks'], gt_masks)
if do_contours:
with open(os.path.join(output_dir, CONTOUR_FILE_NAME), 'w') as f:
f.write(json.dumps(inference_dict, indent=2))
if compare_to_gt:
total_score = sum(IoU_dict.values()) / len(IoU_dict)
print("average IoU scores: " + str(total_score))
def save_masks(masks, output_dir, image_name):
masks_shape = masks.shape
image = np.zeros(masks_shape[:2], dtype='uint8')
for i in range(masks_shape[-1]):
mask = masks[..., i]
image += mask.astype('uint8') * (i + 1)
my_cm = cm.get_cmap(COLOR_MAP, masks_shape[-1] + 1)
my_cm.set_bad(color='black')
image = np.ma.masked_where(image == 0, image)
image = np.uint8(my_cm(image) * 255)
mask_image_name = ".".join(image_name.split('.')[:-1]) + "_mask.png"
Image.fromarray(image).save(os.path.join(output_dir, mask_image_name))
def get_contours(r):
contours = {}
orig_contours = []
for i in range(r['masks'].shape[-1]):
mask = r['masks'][..., i]
# A mask might have multiple polygons
mask_contours = measure.find_contours(mask, 0.5)
# reshape in numpy then convert to list
contours["leaf_{}".format(i)] = [np.reshape(c, (-1,)).tolist() for c in mask_contours]
orig_contours.append(mask_contours)
return contours, orig_contours
def generate_images(infer_path):
"""
:param infer_path:
:return: generator of image paths
"""
if os.path.isdir(infer_path):
for dir_path, _, files in os.walk(infer_path):
for file in files:
if file.split(".")[-1].lower() in ['jpg', 'jpeg', 'png', 'bmp']:
yield os.path.join(dir_path, file)
else:
if infer_path.split(".")[-1].lower() in ['jpg', 'jpeg', 'png', 'bmp']:
return [os.path.join(infer_path)]
def generate_annotation_map(adapter_class, gt_dir, images, task_id=None):
"""
:param adapter_class:
:param images: list or generator of images paths
:param gt_dir: directory that contains the annotation files
:param task_id: argument to adapter
:return: dictionary {"image_path": list(leave_polygons)}
"""
images = list(images)
if task_id is not None:
annotations = adapter_class(gt_dir, task_id)
else:
annotations = adapter_class(gt_dir)
leaves_map = {}
for annotation, image_path, i in annotations:
if image_path not in images:
continue
if leaves_map.get(image_path, None) is None:
leaves_map[image_path] = []
leaves_map[image_path].append(annotation)
return leaves_map
def get_inference_config(config):
"""
Alters a config class to make it suited for inference
Alterations are not mandatory for the inference to work
:param config: the config class
:return: a modified config class
"""
config.IMAGES_PER_GPU = 1
config.GPU_COUNT = 1
return config()
GROUND_TRUTH_MIN_SIZE_COEFF = 0.05 # 0.03 0.05
def calculate_iou(image_name, detected_masks, gt_masks, single_mask=True):
# AZ start validation of single image
# TODO - log/results file
# get ground truth masks for this image
# note: this should be done only once for each validation image (if train, do it once at the beginning, not after each epoch).
# image_name_prefix = image_name.split(".")[0] + "_GT_"
# num_gt_masks = 0
# h = detected_masks.shape[0]
# w = detected_masks.shape[1]
# gt_min_size = GROUND_TRUTH_MIN_SIZE_COEFF * GROUND_TRUTH_MIN_SIZE_COEFF * h * w
#
# gt_file_names = []
# for root, dirs, files in os.walk(ground_truth_dir):
# for file in files:
# if file.startswith(image_name_prefix):
# # read GT file, and use the GT only if num_pixels in mask > Threshold
# tmp = np.array(Image.open(ground_truth_dir + file))
# tmp_size = np.count_nonzero(tmp)
# if tmp_size > gt_min_size:
# gt_file_names.append(file)
# num_gt_masks = num_gt_masks + 1
# print(file)
# gt_masks = np.zeros([h,w,num_gt_masks])
num_gt_masks = gt_masks.shape[-1]
# for i in range(num_gt_masks):
# curr_gt_file = ground_truth_dir + gt_file_names[i]
# curr_mask = np.array(Image.open(curr_gt_file))
# gt_masks[:,:,i] = curr_mask
# create empty IoU matrix M (num_ground_truth_masks x num detected_masks)
# note: if validation during training - this should be done after each epoch.
num_of_detected_masks = detected_masks.shape[2]
all_iou = np.zeros(shape=[num_gt_masks, num_of_detected_masks])
# fill IoU matrix
# for each mask m1 in ground truth
# for each mask m2 in detected
# M(m1,m2) = IoU(m1,m2)
for i in range(num_gt_masks):
mask_i = gt_masks[:,:,i]
for j in range(num_of_detected_masks):
mask_j = detected_masks[:,:,j]
intersection = np.logical_and(mask_i,mask_j)
union = np.logical_or(mask_i,mask_j)
numI = np.count_nonzero(intersection)
numU = np.count_nonzero(union)
all_iou[i,j] = numI/numU
# calculate total (or average) IoU
curr_score = 0
for i in range(num_gt_masks):
# find max value and indices of max value
max_iou = np.amax(all_iou)
curr_score = curr_score + max_iou
max_idx = np.argmax(all_iou)
max_idx_row, max_idx_col = divmod(max_idx, all_iou.shape[1])
# remove row/col of max value (set zeros)
for j in range(all_iou.shape[1]):
all_iou[max_idx_row,j] = 0
for j in range(all_iou.shape[0]):
all_iou[j,max_idx_col] = 0
if num_gt_masks > 0:
curr_score = curr_score / num_gt_masks
else:
curr_score = 1
return curr_score
# AZ end validation of single image
def get_all_masks(leaves_list, image_path):
"""Each leaf is a tuple (leaf_annotation, image_path, i)"""
with Image.open(image_path) as im:
image_width, image_height = im.size
masks = np.empty(shape=(image_height, image_width, len(leaves_list)))
for i, polygon_list in enumerate(leaves_list):
# A single leaf might have multiple polygons but in can the adapter doesn't support it
if not isinstance(polygon_list[0], list):
polygon_list = [polygon_list]
masks[..., i] = get_mask_from_list(polygon_list, image_width, image_height)
return masks
def get_mask_from_list(xy_polygons, width, height):
"""
:param xy_polygons: list of polygons
:type xy_polygons: list(list(float))
:param width: image width
:param height: image height
:return: a mask of shape (width, height)
:rtype: numpy array of float, or None if no mask is exists
"""
rle = ann_to_rle(xy_polygons, height, width)
m = mask_tools.decode(rle)
# Some objects are so small that they're less than 1 pixel area
# and end up rounded out. Skip those objects.
if m.max() < 1:
return None
return m
def ann_to_rle(segm, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = mask_tools.frPyObjects(segm, height, width)
rle = mask_tools.merge(rles)
return rle