Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions constraints.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
openvino==2025.2.0

# Pytorch
torch==2.7.1
torchvision==0.22.1
torch==2.8.0
torchvision==0.23.0

# ONNX
onnx==1.17.0; python_version < '3.13'
Expand Down
4 changes: 2 additions & 2 deletions docs/Installation.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ as well as the supported versions of Python:

| NNCF | OpenVINO | PyTorch | ONNX | TensorFlow | Python |
|-----------|------------|----------|----------|------------|--------|
| `develop` | `2025.2.0` | `2.7.1` | `1.17.0` | `2.15.1` | `3.10` |
| `develop` | `2025.2.0` | `2.8.0` | `1.17.0` | `2.15.1` | `3.10` |
| `2.17.0` | `2025.2.0` | `2.7.1` | `1.17.0` | `2.15.1` | `3.10` |
| `2.16.0` | `2025.1.0` | `2.6.0` | `1.17.0` | `2.15.1` | `3.10` |
| `2.15.0` | `2025.0.0` | `2.5.1` | `1.17.0` | `2.15.1` | `3.10` |
Expand All @@ -69,4 +69,4 @@ as well as the supported versions of Python:

> (*) Python 3.9 or higher is required for TensorFlow 2.15.1

This repository is tested on Python* 3.10.14, PyTorch* 2.7.1 (NVidia CUDA\* Toolkit 12.6) and TensorFlow* 2.15.1 (NVidia CUDA\* Toolkit 11.8).
This repository is tested on Python* 3.10.14, PyTorch* 2.8.0 (NVidia CUDA\* Toolkit 12.6) and TensorFlow* 2.15.1 (NVidia CUDA\* Toolkit 11.8).
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
torch==2.7.1
torch==2.8.0
datasets==3.0.1
numpy>=1.23.5,<2
openvino==2025.2.0
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
tensorboard==2.13.0
torch==2.7.1
torch==2.8.0
numpy>=1.23.5,<2
openvino==2025.2.0
optimum-intel>=1.22.0
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
tensorboard==2.13.0
torch==2.7.1
torch==2.8.0
numpy>=1.23.5,<2
openvino==2025.2.0
optimum-intel>=1.22.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@ transformers==4.52.1
datasets==2.14.7
openvino==2025.2.0
optimum==1.24.0
torch==2.7.1
torchvision==0.22.1
torch==2.8.0
torchvision==0.23.0
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
fastdownload==0.0.7
openvino==2025.2.0
scikit-learn
torch==2.7.1
torchvision==0.22.1
torch==2.8.0
torchvision==0.23.0
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ fastdownload==0.0.7
onnx==1.17.0
openvino==2025.2.0
pycocotools==2.0.7
torch==2.7.1
torch==2.8.0
torchmetrics==1.0.1
torchvision==0.22.1
torchvision==0.23.0
numpy<2
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
fastdownload==0.0.7
openvino==2025.2.0
torch==2.7.1
torchvision==0.22.1
torch==2.8.0
torchvision==0.23.0
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
fastdownload==0.0.7
openvino==2025.2.0
torch==2.7.1
torchvision==0.22.1
torch==2.8.0
torchvision==0.23.0
6 changes: 5 additions & 1 deletion src/nncf/common/graph/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -622,7 +622,11 @@ def get_graph_for_structure_analysis(self, extended: bool = False) -> nx.DiGraph
"""
out_graph = nx.DiGraph()
for node_name, node in self._nx_graph.nodes.items():
attrs_node = {"id": str(node[NNCFNode.ID_NODE_ATTR]), "type": node[NNCFNode.NODE_TYPE_ATTR]}
attrs_node = {"id": str(node[NNCFNode.ID_NODE_ATTR])}
# Filter types which have a reference to a memory in its name.
# Relevant for torchFX dynamic graphs since torch==2.8.0
if "0x" not in node[NNCFNode.NODE_TYPE_ATTR]:
attrs_node["type"] = node[NNCFNode.NODE_TYPE_ATTR]
for attr in ["color", "label", "style"]:
if attr in node:
attrs_node[attr] = node[attr]
Expand Down
32 changes: 25 additions & 7 deletions src/nncf/torch/function_hook/hook_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,22 @@ def remove(self) -> None:
- If no hooks left under the key, the key is also removed from the storage.
"""
storage: Optional[nn.ModuleDict] = self.storage_ref()
if storage is not None and self.key in storage and self.id in storage[self.key]:
del storage[self.key][self.id]
if not storage[self.key]:
del storage[self.key]
if storage is None or self.key not in storage:
# hook storage has been garbage collected or key is not present
return

hooks_dict = storage[self.key]
if not isinstance(hooks_dict, nn.ModuleDict):
msg = f"Expected nn.ModuleDict for key={self.key}, got {type(hooks_dict)}"
raise TypeError(msg)

if self.id not in hooks_dict:
# hook with the specified id was already removed
return

del hooks_dict[self.id]
if not storage[self.key]:
del storage[self.key]


class HookStorage(nn.Module):
Expand Down Expand Up @@ -106,8 +118,13 @@ def _insert_hook(
if hook_key not in storage_dict:
storage_dict[hook_key] = nn.ModuleDict()

hook_id = cls._get_next_hook_id(storage_dict[hook_key])
storage_dict[hook_key][hook_id] = hook
hooks_dict = storage_dict[hook_key]
if not isinstance(hooks_dict, nn.ModuleDict):
msg = f"Expected nn.ModuleDict for key={hook_key}, got {type(hooks_dict)}"
raise TypeError(msg)

hook_id = cls._get_next_hook_id(hooks_dict)
hooks_dict[hook_id] = hook

return RemovableHookHandle(storage_dict, hook_key, hook_id)

Expand Down Expand Up @@ -148,7 +165,8 @@ def _execute_hooks(cls, storage_dict: nn.ModuleDict, op_name: str, port_id: int,
hook_key = cls._generate_key(op_name, port_id)
if hook_key not in storage_dict:
return value
for hook in storage_dict[hook_key].values():
hooks_dict = cast(nn.ModuleDict, storage_dict[hook_key])
for hook in hooks_dict.values():
value = hook(value)
return value

Expand Down
2 changes: 1 addition & 1 deletion tests/post_training/pipelines/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -502,7 +502,7 @@ def save_compressed_model(self) -> None:
)
ov.serialize(ov_model, self.path_compressed_ir)
elif self.backend in FX_BACKENDS:
exported_model = torch.export.export(self.compressed_model.cpu(), (self.dummy_tensor.cpu(),))
exported_model = torch.export.export(self.compressed_model.cpu(), (self.dummy_tensor.cpu(),), strict=True)
# Torch export is used to save the model because ov.convert_model does not fully claim support for
# Converting ExportedProgram
torch.export.save(exported_model, self.output_model_dir / "model.pt2")
Expand Down
1 change: 1 addition & 0 deletions tests/post_training/pipelines/fx_modelling.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,5 +100,6 @@ def convert_and_export_with_cache(model: PreTrainedModel):
example_cache_position,
),
dynamic_shapes=dynamic_shapes,
strict=True,
).run_decompositions(decomp_table={})
return exported_program, model_config, gen_config
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@


def _torch_export_for_training(model: torch.nn.Module, args: tuple[Any, ...]) -> torch.fx.GraphModule:
return torch.export.export_for_training(model, args).module()
return torch.export.export_for_training(model, args, strict=True).module()


def _torch_export(model: torch.nn.Module, args: tuple[Any, ...]) -> torch.fx.GraphModule:
return torch.export.export(model, args).module()
return torch.export.export(model, args, strict=True).module()


@dataclass
Expand Down Expand Up @@ -103,7 +103,7 @@ def prepare_model(self) -> None:
elif self.backend in [BackendType.OV, BackendType.FP32]:
with torch.no_grad():
if self.model_params.export_torch_before_ov_convert:
model = torch.export.export(model, (self.dummy_tensor,))
model = torch.export.export(model, (self.dummy_tensor,), strict=True)
self.model = ov.convert_model(model, example_input=self.dummy_tensor, input=self.input_size)
self.input_name = list(inp.get_any_name() for inp in self.model.inputs)[0]

Expand All @@ -125,7 +125,7 @@ def _dump_model_fp32(self) -> None:
ov.serialize(ov_model, self.fp32_model_dir / "model_fp32.xml")

if self.backend in FX_BACKENDS:
exported_model = torch.export.export(self.model.cpu(), (self.dummy_tensor.cpu(),))
exported_model = torch.export.export(self.model.cpu(), (self.dummy_tensor.cpu(),), strict=True)
torch.export.save(exported_model, self.fp32_model_dir / "fx_model_fp32.pt2")

if self.backend is BackendType.CUDA_FX_TORCH:
Expand Down
Loading