We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 2a3d5cc commit 81b993dCopy full SHA for 81b993d
multimodal/vl2l/README.md
@@ -0,0 +1,4 @@
1
+# MLPerf Inference Vision-language-to-language (VL2L) Reference Implementation
2
+
3
+[Qwen3-VL-235B-A22B](https://github.com/QwenLM/Qwen3-VL/tree/4aae93c9fcca19d7cbca6b095d64f578d25ed75f)
4
+is currently selected as the model for the VL2L benchmark.
multimodal/vl2l/pyproject.toml
@@ -0,0 +1,13 @@
+[tool.ruff]
+lint.select = ["ALL"]
+lint.ignore = [
+ "ANN002", # Missing type annotation for `*args`"
5
+ "ANN003", # Missing type annotation for `**kwargs`
6
+]
7
8
+[tool.ruff.lint.pydocstyle]
9
+convention = "google"
10
11
+[tool.mypy]
12
+check_untyped_defs = true
13
+plugins = ['pydantic.mypy']
0 commit comments