You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hello, I am currently exploring all the possibilities of the anomalib library and already trained a PaDiM model with a custom dataset, which worked pretty great.
anomalib version = 1.1.0
python version = 3.10.14
I now tried to train a Fastflow model with the same dataset, which produced the following error.
Error
F1Score class exists for backwards compatibility. It will be removed in v1.1. Please use BinaryF1Score from torchmetrics instead
┏━━━┳━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━┓
┃ ┃ Name ┃ Type ┃ Params ┃
┡━━━╇━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━┩
│ 0 │ loss │ FastflowLoss │ 0 │
│ 1 │ _transform │ Compose │ 0 │
│ 2 │ normalization_metrics │ MinMax │ 0 │
│ 3 │ image_threshold │ F1AdaptiveThreshold │ 0 │
│ 4 │ pixel_threshold │ F1AdaptiveThreshold │ 0 │
│ 5 │ image_metrics │ AnomalibMetricCollection │ 0 │
│ 6 │ pixel_metrics │ AnomalibMetricCollection │ 0 │
│ 7 │ model │ FastflowModel │ 10.6 M │
└───┴───────────────────────┴──────────────────────────┴────────┘
Trainable params: 6.5 M
Non-trainable params: 4.2 M
Total params: 10.6 M
Total estimated model params size (MB): 42
Error displaying widget: model not found
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Cell In[8], line 1
----> 1 engine.fit(datamodule=datamodule, model=model)
File ~\anaconda3\envs\anonew\lib\site-packages\anomalib\engine\engine.py:540, in Engine.fit(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
538 self.trainer.validate(model, val_dataloaders, datamodule=datamodule, ckpt_path=ckpt_path)
539 else:
--> 540 self.trainer.fit(model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\trainer\trainer.py:544, in Trainer.fit(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
542 self.state.status = TrainerStatus.RUNNING
543 self.training = True
--> 544 call._call_and_handle_interrupt(
545 self, self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
546 )
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\trainer\call.py:44, in _call_and_handle_interrupt(trainer, trainer_fn, *args, **kwargs)
42 if trainer.strategy.launcher is not None:
43 return trainer.strategy.launcher.launch(trainer_fn, *args, trainer=trainer, **kwargs)
---> 44 return trainer_fn(*args, **kwargs)
46 except _TunerExitException:
47 _call_teardown_hook(trainer)
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\trainer\trainer.py:580, in Trainer._fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
573 assert self.state.fn is not None
574 ckpt_path = self._checkpoint_connector._select_ckpt_path(
575 self.state.fn,
576 ckpt_path,
577 model_provided=True,
578 model_connected=self.lightning_module is not None,
579 )
--> 580 self._run(model, ckpt_path=ckpt_path)
582 assert self.state.stopped
583 self.training = False
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\trainer\trainer.py:987, in Trainer._run(self, model, ckpt_path)
982 self._signal_connector.register_signal_handlers()
984 # ----------------------------
985 # RUN THE TRAINER
986 # ----------------------------
--> 987 results = self._run_stage()
989 # ----------------------------
990 # POST-Training CLEAN UP
991 # ----------------------------
992 log.debug(f"{self.__class__.__name__}: trainer tearing down")
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\trainer\trainer.py:1033, in Trainer._run_stage(self)
1031 self._run_sanity_check()
1032 with torch.autograd.set_detect_anomaly(self._detect_anomaly):
-> 1033 self.fit_loop.run()
1034 return None
1035 raise RuntimeError(f"Unexpected state {self.state}")
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\loops\fit_loop.py:205, in _FitLoop.run(self)
203 try:
204 self.on_advance_start()
--> 205 self.advance()
206 self.on_advance_end()
207 self._restarting = False
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\loops\fit_loop.py:363, in _FitLoop.advance(self)
361 with self.trainer.profiler.profile("run_training_epoch"):
362 assert self._data_fetcher is not None
--> 363 self.epoch_loop.run(self._data_fetcher)
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\loops\training_epoch_loop.py:140, in _TrainingEpochLoop.run(self, data_fetcher)
138 while not self.done:
139 try:
--> 140 self.advance(data_fetcher)
141 self.on_advance_end(data_fetcher)
142 self._restarting = False
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\loops\training_epoch_loop.py:250, in _TrainingEpochLoop.advance(self, data_fetcher)
247 with trainer.profiler.profile("run_training_batch"):
248 if trainer.lightning_module.automatic_optimization:
249 # in automatic optimization, there can only be one optimizer
--> 250 batch_output = self.automatic_optimization.run(trainer.optimizers[0], batch_idx, kwargs)
251 else:
252 batch_output = self.manual_optimization.run(kwargs)
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\loops\optimization\automatic.py:190, in _AutomaticOptimization.run(self, optimizer, batch_idx, kwargs)
183 closure()
185 # ------------------------------
186 # BACKWARD PASS
187 # ------------------------------
188 # gradient update with accumulated gradients
189 else:
--> 190 self._optimizer_step(batch_idx, closure)
192 result = closure.consume_result()
193 if result.loss is None:
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\loops\optimization\automatic.py:268, in _AutomaticOptimization._optimizer_step(self, batch_idx, train_step_and_backward_closure)
265 self.optim_progress.optimizer.step.increment_ready()
267 # model hook
--> 268 call._call_lightning_module_hook(
269 trainer,
270 "optimizer_step",
271 trainer.current_epoch,
272 batch_idx,
273 optimizer,
274 train_step_and_backward_closure,
275 )
277 if not should_accumulate:
278 self.optim_progress.optimizer.step.increment_completed()
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\trainer\call.py:157, in _call_lightning_module_hook(trainer, hook_name, pl_module, *args, **kwargs)
154 pl_module._current_fx_name = hook_name
156 with trainer.profiler.profile(f"[LightningModule]{pl_module.__class__.__name__}.{hook_name}"):
--> 157 output = fn(*args, **kwargs)
159 # restore current_fx when nested context
160 pl_module._current_fx_name = prev_fx_name
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\core\module.py:1303, in LightningModule.optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure)
1264 def optimizer_step(
1265 self,
1266 epoch: int,
(...)
1269 optimizer_closure: Optional[Callable[[], Any]] = None,
1270 ) -> None:
1271 r"""Override this method to adjust the default way the :class:`~lightning.pytorch.trainer.trainer.Trainer` calls
1272 the optimizer.
1273
(...)
1301
1302 """
-> 1303 optimizer.step(closure=optimizer_closure)
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\core\optimizer.py:152, in LightningOptimizer.step(self, closure, **kwargs)
149 raise MisconfigurationException("When `optimizer.step(closure)` is called, the closure should be callable")
151 assert self._strategy is not None
--> 152 step_output = self._strategy.optimizer_step(self._optimizer, closure, **kwargs)
154 self._on_after_step()
156 return step_output
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\strategies\strategy.py:239, in Strategy.optimizer_step(self, optimizer, closure, model, **kwargs)
237 # TODO(fabric): remove assertion once strategy's optimizer_step typing is fixed
238 assert isinstance(model, pl.LightningModule)
--> 239 return self.precision_plugin.optimizer_step(optimizer, model=model, closure=closure, **kwargs)
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\plugins\precision\precision.py:122, in Precision.optimizer_step(self, optimizer, model, closure, **kwargs)
120 """Hook to run the optimizer step."""
121 closure = partial(self._wrap_closure, model, optimizer, closure)
--> 122 return optimizer.step(closure=closure, **kwargs)
File ~\anaconda3\envs\anonew\lib\site-packages\torch\optim\optimizer.py:391, in Optimizer.profile_hook_step.<locals>.wrapper(*args, **kwargs)
386 else:
387 raise RuntimeError(
388 f"{func} must return None or a tuple of (new_args, new_kwargs), but got {result}."
389 )
--> 391 out = func(*args, **kwargs)
392 self._optimizer_step_code()
394 # call optimizer step post hooks
File ~\anaconda3\envs\anonew\lib\site-packages\torch\optim\optimizer.py:76, in _use_grad_for_differentiable.<locals>._use_grad(self, *args, **kwargs)
74 torch.set_grad_enabled(self.defaults['differentiable'])
75 torch._dynamo.graph_break()
---> 76 ret = func(self, *args, **kwargs)
77 finally:
78 torch._dynamo.graph_break()
File ~\anaconda3\envs\anonew\lib\site-packages\torch\optim\adam.py:148, in Adam.step(self, closure)
146 if closure is not None:
147 with torch.enable_grad():
--> 148 loss = closure()
150 for group in self.param_groups:
151 params_with_grad = []
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\plugins\precision\precision.py:108, in Precision._wrap_closure(self, model, optimizer, closure)
95 def _wrap_closure(
96 self,
97 model: "pl.LightningModule",
98 optimizer: Optimizer,
99 closure: Callable[[], Any],
100 ) -> Any:
101 """This double-closure allows makes sure the ``closure`` is executed before the ``on_before_optimizer_step``
102 hook is called.
103
(...)
106
107 """
--> 108 closure_result = closure()
109 self._after_closure(model, optimizer)
110 return closure_result
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\loops\optimization\automatic.py:144, in Closure.__call__(self, *args, **kwargs)
142 @override
143 def __call__(self, *args: Any, **kwargs: Any) -> Optional[Tensor]:
--> 144 self._result = self.closure(*args, **kwargs)
145 return self._result.loss
File ~\anaconda3\envs\anonew\lib\site-packages\torch\utils\_contextlib.py:115, in context_decorator.<locals>.decorate_context(*args, **kwargs)
112 @functools.wraps(func)
113 def decorate_context(*args, **kwargs):
114 with ctx_factory():
--> 115 return func(*args, **kwargs)
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\loops\optimization\automatic.py:129, in Closure.closure(self, *args, **kwargs)
126 @override
127 @torch.enable_grad()
128 def closure(self, *args: Any, **kwargs: Any) -> ClosureResult:
--> 129 step_output = self._step_fn()
131 if step_output.closure_loss is None:
132 self.warning_cache.warn("`training_step` returned `None`. If this was on purpose, ignore this warning...")
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\loops\optimization\automatic.py:318, in _AutomaticOptimization._training_step(self, kwargs)
315 trainer = self.trainer
317 # manually capture logged metrics
--> 318 training_step_output = call._call_strategy_hook(trainer, "training_step", *kwargs.values())
319 self.trainer.strategy.post_training_step() # unused hook - call anyway for backward compatibility
321 return self.output_result_cls.from_training_step_output(training_step_output, trainer.accumulate_grad_batches)
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\trainer\call.py:309, in _call_strategy_hook(trainer, hook_name, *args, **kwargs)
306 return None
308 with trainer.profiler.profile(f"[Strategy]{trainer.strategy.__class__.__name__}.{hook_name}"):
--> 309 output = fn(*args, **kwargs)
311 # restore current_fx when nested context
312 pl_module._current_fx_name = prev_fx_name
File ~\anaconda3\envs\anonew\lib\site-packages\lightning\pytorch\strategies\strategy.py:391, in Strategy.training_step(self, *args, **kwargs)
389 if self.model != self.lightning_module:
390 return self._forward_redirection(self.model, self.lightning_module, "training_step", *args, **kwargs)
--> 391 return self.lightning_module.training_step(*args, **kwargs)
File ~\anaconda3\envs\anonew\lib\site-packages\anomalib\models\image\fastflow\lightning_model.py:84, in Fastflow.training_step(***failed resolving arguments***)
72 """Perform the training step input and return the loss.
73
74 Args:
(...)
80 STEP_OUTPUT: Dictionary containing the loss value.
81 """
82 del args, kwargs # These variables are not used.
---> 84 hidden_variables, jacobians = self.model(batch["image"])
85 loss = self.loss(hidden_variables, jacobians)
86 self.log("train_loss", loss.item(), on_epoch=True, prog_bar=True, logger=True)
File ~\anaconda3\envs\anonew\lib\site-packages\torch\nn\modules\module.py:1532, in Module._wrapped_call_impl(self, *args, **kwargs)
1530 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1531 else:
-> 1532 return self._call_impl(*args, **kwargs)
File ~\anaconda3\envs\anonew\lib\site-packages\torch\nn\modules\module.py:1541, in Module._call_impl(self, *args, **kwargs)
1536 # If we don't have any hooks, we want to skip the rest of the logic in
1537 # this function, and just call forward.
1538 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1539 or _global_backward_pre_hooks or _global_backward_hooks
1540 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1541 return forward_call(*args, **kwargs)
1543 try:
1544 result = None
File ~\anaconda3\envs\anonew\lib\site-packages\anomalib\models\image\fastflow\torch_model.py:193, in FastflowModel.forward(self, input_tensor)
191 features = self._get_cait_features(input_tensor)
192 else:
--> 193 features = self._get_cnn_features(input_tensor)
195 # Compute the hidden variable f: X -> Z and log-likelihood of the jacobian
196 # (See Section 3.3 in the paper.)
197 # NOTE: output variable has z, and jacobian tuple for each fast-flow blocks.
198 hidden_variables: list[torch.Tensor] = []
File ~\anaconda3\envs\anonew\lib\site-packages\anomalib\models\image\fastflow\torch_model.py:222, in FastflowModel._get_cnn_features(self, input_tensor)
213 """Get CNN-based features.
214
215 Args:
(...)
219 list[torch.Tensor]: List of features.
220 """
221 features = self.feature_extractor(input_tensor)
--> 222 return [self.norms[i](feature) for i, feature in enumerate(features)]
File ~\anaconda3\envs\anonew\lib\site-packages\anomalib\models\image\fastflow\torch_model.py:222, in <listcomp>(.0)
213 """Get CNN-based features.
214
215 Args:
(...)
219 list[torch.Tensor]: List of features.
220 """
221 features = self.feature_extractor(input_tensor)
--> 222 return [self.norms[i](feature) for i, feature in enumerate(features)]
File ~\anaconda3\envs\anonew\lib\site-packages\torch\nn\modules\module.py:1532, in Module._wrapped_call_impl(self, *args, **kwargs)
1530 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1531 else:
-> 1532 return self._call_impl(*args, **kwargs)
File ~\anaconda3\envs\anonew\lib\site-packages\torch\nn\modules\module.py:1541, in Module._call_impl(self, *args, **kwargs)
1536 # If we don't have any hooks, we want to skip the rest of the logic in
1537 # this function, and just call forward.
1538 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1539 or _global_backward_pre_hooks or _global_backward_hooks
1540 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1541 return forward_call(*args, **kwargs)
1543 try:
1544 result = None
File ~\anaconda3\envs\anonew\lib\site-packages\torch\nn\modules\normalization.py:201, in LayerNorm.forward(self, input)
200 def forward(self, input: Tensor) -> Tensor:
--> 201 return F.layer_norm(
202 input, self.normalized_shape, self.weight, self.bias, self.eps)
File ~\anaconda3\envs\anonew\lib\site-packages\torch\nn\functional.py:2573, in layer_norm(input, normalized_shape, weight, bias, eps)
2569 if has_torch_function_variadic(input, weight, bias):
2570 return handle_torch_function(
2571 layer_norm, (input, weight, bias), input, normalized_shape, weight=weight, bias=bias, eps=eps
2572 )
-> 2573 return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled)
RuntimeError: Given normalized_shape=[64, 155, 112], expected input with shape [*, 64, 155, 112], but got input of size[1, 64, 155, 113]
Code
from anomalib.data.image.folder import Folder
from anomalib.data.utils.split import TestSplitMode
datamodule = Folder(
name="pcb",
root=path,
normal_dir="Normal",
abnormal_dir="Anomaly",
normal_split_ratio=0.2,
image_size=(620,450),
train_batch_size=1,
eval_batch_size=3,
task=TaskType.CLASSIFICATION
)
datamodule.setup()
datamodule.prepare_data()
val_dataloader = datamodule.val_dataloader()
# Modify DataLoader for debugging
val_dataloader.num_workers = 0 # Disable multiprocessing for debugging
#val_dataloader.pin_memory = True # Enable pin_memory if using GPU
# Debugging: Iterate through the DataLoader
try:
i, data = next(enumerate(val_dataloader))
print(data.keys())
except Exception as e:
print(f"Error: {e}")
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
Uh oh!
There was an error while loading. Please reload this page.
-
Hello, I am currently exploring all the possibilities of the anomalib library and already trained a PaDiM model with a custom dataset, which worked pretty great.
I now tried to train a Fastflow model with the same dataset, which produced the following error.
Error
Code
Custom dataset
The dataset consists of 26 images (620 x 450) saved as .png.
Of these 26 images, 20 are good examples and 6 are abnormal examples.
This worked great for PaDiM
Other datasets
While trying to pinpoint the problem i tried MVTecs dataset and MVTecs deep learning "juice_bottle" example.
Both worked without a problem.
Beta Was this translation helpful? Give feedback.
All reactions