11import shutil
22from functools import partial
33from pathlib import Path
4- from typing import TYPE_CHECKING
4+ from typing import TYPE_CHECKING , List
55
66import matplotlib .pyplot as plt
77import numpy as np
@@ -149,7 +149,7 @@ def __init__(
149149 """Plot for dice metric"""
150150 self .plot_dock = None
151151 """Docked widget with plots"""
152- self .result_layers = []
152+ self .result_layers : List [ napari . layers . Layer ] = []
153153 """Layers to display checkpoint"""
154154
155155 self .plot_1_labels = {
@@ -323,9 +323,9 @@ def __init__(
323323 def _set_tooltips (self ):
324324 # tooltips
325325 self .zip_choice .setToolTip (
326- "Checking this will save a copy of the results as a zip folder"
326+ "Save a copy of the results as a zip folder"
327327 )
328- self .validation_percent_choice .tooltips = "Choose the proportion of images to retain for training.\n The remaining images will be used for validation"
328+ self .validation_percent_choice .tooltips = "The percentage of images to retain for training.\n The remaining images will be used for validation"
329329 self .epoch_choice .tooltips = "The number of epochs to train for.\n The more you train, the better the model will fit the training data"
330330 self .loss_choice .setToolTip (
331331 "The loss function to use for training.\n See the list in the training guide for more info"
@@ -335,10 +335,10 @@ def _set_tooltips(self):
335335 )
336336 self .batch_choice .tooltips = (
337337 "The batch size to use for training.\n A larger value will feed more images per iteration to the model,\n "
338- " which is faster and possibly improves performance, but uses more memory"
338+ " which is faster and can improve performance, but uses more memory on your selected device "
339339 )
340340 self .val_interval_choice .tooltips = (
341- "The number of epochs to perform before validating data.\n "
341+ "The number of epochs to perform before validating on test data.\n "
342342 "The lower the value, the more often the score of the model will be computed and the more often the weights will be saved."
343343 )
344344 self .learning_rate_choice .setToolTip (
@@ -352,19 +352,19 @@ def _set_tooltips(self):
352352 )
353353 self .augment_choice .setToolTip (
354354 "Check this to enable data augmentation, which will randomly deform, flip and shift the intensity in images"
355- " to provide a more general dataset. \n Use this if you're extracting more than 10 samples per image "
355+ " to provide a more diverse dataset"
356356 )
357357 [
358358 w .setToolTip ("Size of the sample to extract" )
359359 for w in self .patch_size_widgets
360360 ]
361361 self .patch_choice .setToolTip (
362- "Check this to automatically crop your images in smaller, cubic images for training."
363- "\n Should be used if you have a small dataset (and large images) "
362+ "Check this to automatically crop your images into smaller, cubic images for training."
363+ "\n Should be used if you have a few large images"
364364 )
365365 self .use_deterministic_choice .setToolTip (
366366 "Enable deterministic training for reproducibility."
367- "Using the same seed with all other parameters being similar should yield the exact same results between two runs."
367+ "Using the same seed with all other parameters being similar should yield the exact same results across runs."
368368 )
369369 self .use_transfer_choice .setToolTip (
370370 "Use this you want to initialize the model with pre-trained weights or use your own weights."
@@ -1292,8 +1292,7 @@ def _display_results(self, images_dict, complete_missing=False):
12921292 "data"
12931293 ]
12941294 self .result_layers [i ].refresh ()
1295- clims = self .result_layers [i ].contrast_limits
1296- [c .reset_contrast_limits_range () for c in clims ]
1295+ self .result_layers [i ].reset_contrast_limits ()
12971296
12981297 def on_yield (self , report : TrainingReport ): # TODO refactor for dict
12991298 # logger.info(
0 commit comments