File tree Expand file tree Collapse file tree 3 files changed +9
-0
lines changed Expand file tree Collapse file tree 3 files changed +9
-0
lines changed Original file line number Diff line number Diff line change 8080from otx .api .utils .dataset_utils import add_saliency_maps_to_dataset_item
8181from otx .api .utils .labels_utils import get_empty_label
8282from otx .cli .utils .multi_gpu import is_multigpu_child_process
83+ from otx .core .data .caching .mem_cache_handler import MemCacheHandlerSingleton
8384
8485logger = get_logger ()
8586RECIPE_TRAIN_TYPE = {
@@ -215,6 +216,8 @@ def train(
215216
216217 results = self ._train_model (dataset )
217218
219+ MemCacheHandlerSingleton .delete ()
220+
218221 # Check for stop signal when training has stopped. If should_stop is true, training was cancelled and no new
219222 if self ._should_stop :
220223 logger .info ("Training cancelled." )
Original file line number Diff line number Diff line change 6565from otx .api .usecases .tasks .interfaces .export_interface import ExportType
6666from otx .api .utils .dataset_utils import add_saliency_maps_to_dataset_item
6767from otx .cli .utils .multi_gpu import is_multigpu_child_process
68+ from otx .core .data .caching .mem_cache_handler import MemCacheHandlerSingleton
6869
6970logger = get_logger ()
7071
@@ -231,6 +232,8 @@ def train(
231232 val_dataset .purpose = DatasetPurpose .INFERENCE
232233 val_preds , val_map = self ._infer_model (val_dataset , InferenceParameters (is_evaluation = True ))
233234
235+ MemCacheHandlerSingleton .delete ()
236+
234237 preds_val_dataset = val_dataset .with_empty_annotations ()
235238 if self ._hyperparams .postprocessing .result_based_confidence_threshold :
236239 confidence_threshold = 0.0 # Use all predictions to compute best threshold
Original file line number Diff line number Diff line change 7070 create_hard_prediction_from_soft_prediction ,
7171)
7272from otx .cli .utils .multi_gpu import is_multigpu_child_process
73+ from otx .core .data .caching .mem_cache_handler import MemCacheHandlerSingleton
7374
7475logger = get_logger ()
7576RECIPE_TRAIN_TYPE = {
@@ -171,6 +172,8 @@ def train(
171172
172173 results = self ._train_model (dataset )
173174
175+ MemCacheHandlerSingleton .delete ()
176+
174177 # Check for stop signal when training has stopped. If should_stop is true, training was cancelled and no new
175178 if self ._should_stop :
176179 logger .info ("Training cancelled." )
You can’t perform that action at this time.
0 commit comments