|
4 | 4 | import os |
5 | 5 | import sys |
6 | 6 | import math |
| 7 | +import pandas as pd |
7 | 8 | import random |
8 | 9 | import GeodisTK |
9 | 10 | import configparser |
@@ -180,7 +181,7 @@ def get_evaluation_score(s_volume, g_volume, spacing, metric): |
180 | 181 |
|
181 | 182 | return score |
182 | 183 |
|
183 | | -def evaluation(config_file): |
| 184 | +def evaluation_backup(config_file): |
184 | 185 | config = parse_config(config_file)['evaluation'] |
185 | 186 | metric = config['metric'] |
186 | 187 | labels = config['label_list'] |
@@ -218,7 +219,9 @@ def evaluation(config_file): |
218 | 219 |
|
219 | 220 | for g_folder in g_folder_list: |
220 | 221 | g_name = os.path.join(g_folder, patient_names[i] + g_postfix_long) |
221 | | - if(os.path.isfile(g_name)): |
| 222 | + if(not os.path.isfile(g_name)): |
| 223 | + g_name = g_name.replace(patient_names[i], patient_names[i] + '/' + patient_names[i]) |
| 224 | + if(not os.path.isfile(g_name)): |
222 | 225 | break |
223 | 226 | s_dict = load_image_as_nd_array(s_name) |
224 | 227 | g_dict = load_image_as_nd_array(g_name) |
@@ -257,6 +260,64 @@ def evaluation(config_file): |
257 | 260 | print("{0:} mean ".format(metric), score_mean) |
258 | 261 | print("{0:} std ".format(metric), score_std) |
259 | 262 |
|
| 263 | +def evaluation(config_file): |
| 264 | + config = parse_config(config_file)['evaluation'] |
| 265 | + metric = config['metric'] |
| 266 | + labels = config['label_list'] |
| 267 | + organ_name = config['organ_name'] |
| 268 | + gt_root = config['ground_truth_folder_root'] |
| 269 | + seg_root = config['segmentation_folder_root'] |
| 270 | + image_pair_csv = config['evaluation_image_pair'] |
| 271 | + ground_truth_label_convert_source = config.get('ground_truth_label_convert_source', None) |
| 272 | + ground_truth_label_convert_target = config.get('ground_truth_label_convert_target', None) |
| 273 | + segmentation_label_convert_source = config.get('segmentation_label_convert_source', None) |
| 274 | + segmentation_label_convert_target = config.get('segmentation_label_convert_target', None) |
| 275 | + |
| 276 | + image_items = pd.read_csv(image_pair_csv) |
| 277 | + item_num = len(image_items) |
| 278 | + score_all_data = [] |
| 279 | + for i in range(item_num): |
| 280 | + gt_name = image_items.iloc[i, 0] |
| 281 | + seg_name = image_items.iloc[i, 1] |
| 282 | + gt_full_name = gt_root + '/' + gt_name |
| 283 | + seg_full_name = seg_root + '/' + seg_name |
| 284 | + |
| 285 | + s_dict = load_image_as_nd_array(seg_full_name) |
| 286 | + g_dict = load_image_as_nd_array(gt_full_name) |
| 287 | + s_volume = s_dict["data_array"]; s_spacing = s_dict["spacing"] |
| 288 | + g_volume = g_dict["data_array"]; g_spacing = g_dict["spacing"] |
| 289 | + # for dim in range(len(s_spacing)): |
| 290 | + # assert(s_spacing[dim] == g_spacing[dim]) |
| 291 | + if((ground_truth_label_convert_source is not None) and \ |
| 292 | + ground_truth_label_convert_target is not None): |
| 293 | + g_volume = convert_label(g_volume, ground_truth_label_convert_source, \ |
| 294 | + ground_truth_label_convert_target) |
| 295 | + |
| 296 | + if((segmentation_label_convert_source is not None) and \ |
| 297 | + segmentation_label_convert_target is not None): |
| 298 | + s_volume = convert_label(s_volume, segmentation_label_convert_source, \ |
| 299 | + segmentation_label_convert_target) |
| 300 | + |
| 301 | + # fuse multiple labels |
| 302 | + s_volume_sub = np.zeros_like(s_volume) |
| 303 | + g_volume_sub = np.zeros_like(g_volume) |
| 304 | + for lab in labels: |
| 305 | + s_volume_sub = s_volume_sub + np.asarray(s_volume == lab, np.uint8) |
| 306 | + g_volume_sub = g_volume_sub + np.asarray(g_volume == lab, np.uint8) |
| 307 | + |
| 308 | + # get evaluation score |
| 309 | + temp_score = get_evaluation_score(s_volume_sub > 0, g_volume_sub > 0, |
| 310 | + s_spacing, metric) |
| 311 | + score_all_data.append(temp_score) |
| 312 | + print(seg_name, temp_score) |
| 313 | + score_all_data = np.asarray(score_all_data) |
| 314 | + score_mean = [score_all_data.mean(axis = 0)] |
| 315 | + score_std = [score_all_data.std(axis = 0)] |
| 316 | + np.savetxt("{0:}/{1:}_{2:}_all.txt".format(seg_root, organ_name, metric), score_all_data) |
| 317 | + np.savetxt("{0:}/{1:}_{2:}_mean.txt".format(seg_root, organ_name, metric), score_mean) |
| 318 | + np.savetxt("{0:}/{1:}_{2:}_std.txt".format(seg_root, organ_name, metric), score_std) |
| 319 | + print("{0:} mean ".format(metric), score_mean) |
| 320 | + print("{0:} std ".format(metric), score_std) |
260 | 321 |
|
261 | 322 | def main(): |
262 | 323 | if(len(sys.argv) < 2): |
|
0 commit comments