Skip to content

Commit 913705f

Browse files
committed
fix typo
1 parent b4e97fe commit 913705f

File tree

5 files changed

+17
-17
lines changed

5 files changed

+17
-17
lines changed

nnunetv2/inference/data_iterators.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def preprocess_fromfiles_save_to_queue(list_of_lists: List[List[str]],
2828
label_manager = plans_manager.get_label_manager(dataset_json)
2929
preprocessor = configuration_manager.preprocessor_class(verbose=verbose)
3030
for idx in range(len(list_of_lists)):
31-
data, seg, data_properites = preprocessor.run_case(list_of_lists[idx],
31+
data, seg, data_properties = preprocessor.run_case(list_of_lists[idx],
3232
list_of_segs_from_prev_stage_files[
3333
idx] if list_of_segs_from_prev_stage_files is not None else None,
3434
plans_manager,
@@ -40,7 +40,7 @@ def preprocess_fromfiles_save_to_queue(list_of_lists: List[List[str]],
4040

4141
data = torch.from_numpy(data).contiguous().float()
4242

43-
item = {'data': data, 'data_properites': data_properites,
43+
item = {'data': data, 'data_properties': data_properties,
4444
'ofile': output_filenames_truncated[idx] if output_filenames_truncated is not None else None}
4545
success = False
4646
while not success:
@@ -150,7 +150,7 @@ def generate_train_batch(self):
150150
# if we have a segmentation from the previous stage we have to process it together with the images so that we
151151
# can crop it appropriately (if needed). Otherwise it would just be resized to the shape of the data after
152152
# preprocessing and then there might be misalignments
153-
data, seg, data_properites = self.preprocessor.run_case(files, seg_prev_stage, self.plans_manager,
153+
data, seg, data_properties = self.preprocessor.run_case(files, seg_prev_stage, self.plans_manager,
154154
self.configuration_manager,
155155
self.dataset_json)
156156
if seg_prev_stage is not None:
@@ -159,7 +159,7 @@ def generate_train_batch(self):
159159

160160
data = torch.from_numpy(data)
161161

162-
return {'data': data, 'data_properites': data_properites, 'ofile': ofile}
162+
return {'data': data, 'data_properties': data_properties, 'ofile': ofile}
163163

164164

165165
class PreprocessAdapterFromNpy(DataLoader):
@@ -207,7 +207,7 @@ def generate_train_batch(self):
207207

208208
data = torch.from_numpy(data)
209209

210-
return {'data': data, 'data_properites': props, 'ofile': ofname}
210+
return {'data': data, 'data_properties': props, 'ofile': ofname}
211211

212212

213213
def preprocess_fromnpy_save_to_queue(list_of_images: List[np.ndarray],
@@ -238,7 +238,7 @@ def preprocess_fromnpy_save_to_queue(list_of_images: List[np.ndarray],
238238

239239
data = torch.from_numpy(data).contiguous().float()
240240

241-
item = {'data': data, 'data_properites': list_of_image_properties[idx],
241+
item = {'data': data, 'data_properties': list_of_image_properties[idx],
242242
'ofile': truncated_ofnames[idx] if truncated_ofnames is not None else None}
243243
success = False
244244
while not success:

nnunetv2/inference/examples.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@
8080
img4, props4 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_144_0000.nii.gz')])
8181

8282

83-
# each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properites' keys!
83+
# each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys!
8484
# If 'ofile' is None, the result will be returned instead of written to a file
8585
# the iterator is responsible for performing the correct preprocessing!
8686
# note how the iterator here does not use multiprocessing -> preprocessing will be done in the main thread!
@@ -95,7 +95,7 @@ def my_iterator(list_of_input_arrs, list_of_input_props):
9595
predictor.plans_manager,
9696
predictor.configuration_manager,
9797
predictor.dataset_json)
98-
yield {'data': torch.from_numpy(data).contiguous().pin_memory(), 'data_properites': p, 'ofile': None}
98+
yield {'data': torch.from_numpy(data).contiguous().pin_memory(), 'data_properties': p, 'ofile': None}
9999

100100

101101
ret = predictor.predict_from_data_iterator(my_iterator([img, img2, img3, img4], [props, props2, props3, props4]),

nnunetv2/inference/predict_from_raw_data.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -333,7 +333,7 @@ def predict_from_data_iterator(self,
333333
save_probabilities: bool = False,
334334
num_processes_segmentation_export: int = default_num_processes):
335335
"""
336-
each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properites' keys!
336+
each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys!
337337
If 'ofile' is None, the result will be returned instead of written to a file
338338
"""
339339
with multiprocessing.get_context("spawn").Pool(num_processes_segmentation_export) as export_pool:
@@ -354,7 +354,7 @@ def predict_from_data_iterator(self,
354354

355355
print(f'perform_everything_on_gpu: {self.perform_everything_on_gpu}')
356356

357-
properties = preprocessed['data_properites']
357+
properties = preprocessed['data_properties']
358358

359359
# let's not get into a runaway situation where the GPU predicts so fast that the disk has to b swamped with
360360
# npy files
@@ -430,14 +430,14 @@ def predict_single_npy_array(self, input_image: np.ndarray, image_properties: di
430430
if self.verbose:
431431
print('resampling to original shape')
432432
if output_file_truncated is not None:
433-
export_prediction_from_logits(predicted_logits, dct['data_properites'], self.configuration_manager,
433+
export_prediction_from_logits(predicted_logits, dct['data_properties'], self.configuration_manager,
434434
self.plans_manager, self.dataset_json, output_file_truncated,
435435
save_or_return_probabilities)
436436
else:
437437
ret = convert_predicted_logits_to_segmentation_with_correct_shape(predicted_logits, self.plans_manager,
438438
self.configuration_manager,
439439
self.label_manager,
440-
dct['data_properites'],
440+
dct['data_properties'],
441441
return_probabilities=
442442
save_or_return_probabilities)
443443
if save_or_return_probabilities:

nnunetv2/inference/readme.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ cons:
184184
img2, props2 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_146_0000.nii.gz')])
185185
img3, props3 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_145_0000.nii.gz')])
186186
img4, props4 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_144_0000.nii.gz')])
187-
# each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properites' keys!
187+
# each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys!
188188
# If 'ofile' is None, the result will be returned instead of written to a file
189189
# the iterator is responsible for performing the correct preprocessing!
190190
# note how the iterator here does not use multiprocessing -> preprocessing will be done in the main thread!
@@ -199,7 +199,7 @@ cons:
199199
predictor.plans_manager,
200200
predictor.configuration_manager,
201201
predictor.dataset_json)
202-
yield {'data': torch.from_numpy(data).contiguous().pin_memory(), 'data_properites': p, 'ofile': None}
202+
yield {'data': torch.from_numpy(data).contiguous().pin_memory(), 'data_properties': p, 'ofile': None}
203203
ret = predictor.predict_from_data_iterator(my_iterator([img, img2, img3, img4], [props, props2, props3, props4]),
204204
save_probabilities=False, num_processes_segmentation_export=3)
205205
```

nnunetv2/preprocessing/preprocessors/default_preprocessor.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -128,17 +128,17 @@ def run_case(self, image_files: List[str], seg_file: Union[str, None], plans_man
128128
rw = plans_manager.image_reader_writer_class()
129129

130130
# load image(s)
131-
data, data_properites = rw.read_images(image_files)
131+
data, data_properties = rw.read_images(image_files)
132132

133133
# if possible, load seg
134134
if seg_file is not None:
135135
seg, _ = rw.read_seg(seg_file)
136136
else:
137137
seg = None
138138

139-
data, seg = self.run_case_npy(data, seg, data_properites, plans_manager, configuration_manager,
139+
data, seg = self.run_case_npy(data, seg, data_properties, plans_manager, configuration_manager,
140140
dataset_json)
141-
return data, seg, data_properites
141+
return data, seg, data_properties
142142

143143
def run_case_save(self, output_filename_truncated: str, image_files: List[str], seg_file: str,
144144
plans_manager: PlansManager, configuration_manager: ConfigurationManager,

0 commit comments

Comments
 (0)