Skip to content

Commit a6b2415

Browse files
fix default arguments in algorithms, improve userguide
1 parent a6bd23d commit a6b2415

File tree

6 files changed

+32
-24
lines changed

6 files changed

+32
-24
lines changed

dialogue2graph/datasets/complex_dialogues/generation.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -414,10 +414,10 @@ class LoopedGraphGenerator(TopicGraphGenerator):
414414
def __init__(
415415
self,
416416
model_storage: ModelStorage,
417-
generation_llm: str,
418-
validation_llm: str,
419-
cycle_ends_llm: str,
420-
theme_validation_llm: str,
417+
generation_llm: str = "looped_graph_generation_llm:v1",
418+
validation_llm: str = "looped_graph_validation_llm:v1",
419+
cycle_ends_llm: str = "looped_graph_cycle_ends_llm:v1",
420+
theme_validation_llm: str = "looped_graph_theme_validation_llm:v1",
421421
):
422422
# check if models are in model storage
423423
# if model is not in model storage put the default model there

dialogue2graph/pipelines/d2g_extender/three_stages_extender.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -104,15 +104,15 @@ class LLMGraphExtender(GraphExtender):
104104
def __init__(
105105
self,
106106
model_storage: ModelStorage,
107-
extending_llm: str,
108-
filling_llm: str,
109-
formatting_llm: str,
110-
dialog_llm: str,
111-
sim_model: str,
112-
step1_evals: list[Callable],
113-
extender_evals: list[Callable],
114-
step2_evals: list[Callable],
115-
end_evals: list[Callable],
107+
extending_llm: str = "extender_extending_llm:v1",
108+
filling_llm: str = "extender_filling_llm:v1",
109+
formatting_llm: str = "extender_formatting_llm:v1",
110+
dialog_llm: str = "extender_dialog_llm:v1",
111+
sim_model: str = "extender_sim_model:v1",
112+
step1_evals: list[Callable] | None = [],
113+
extender_evals: list[Callable] | None = [],
114+
step2_evals: list[Callable] | None = [],
115+
end_evals: list[Callable] | None = [],
116116
step: int = 2,
117117
):
118118
# check if models are in model storage

dialogue2graph/pipelines/d2g_light/three_stages_light.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,9 @@ class LightGraphGenerator(GraphGenerator):
6666
def __init__(
6767
self,
6868
model_storage: ModelStorage,
69-
filling_llm: str,
70-
formatting_llm: str,
71-
sim_model: str,
69+
filling_llm: str = "three_stages_light_filling_llm:v1",
70+
formatting_llm: str = "three_stages_light_formatting_llm:v1",
71+
sim_model: str = "three_stages_light_sim_model:v1",
7272
step2_evals: list[Callable] | None = [],
7373
end_evals: list[Callable] | None = [],
7474
):

dialogue2graph/pipelines/d2g_llm/three_stages_llm.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -93,10 +93,10 @@ class LLMGraphGenerator(GraphGenerator):
9393
def __init__(
9494
self,
9595
model_storage: ModelStorage,
96-
grouping_llm: str,
97-
filling_llm: str,
98-
formatting_llm: str,
99-
sim_model: str,
96+
grouping_llm: str = "three_stages_grouping_llm:v1",
97+
filling_llm: str = "three_stages_filling_llm:v1",
98+
formatting_llm: str = "three_stages_formatting_llm:v1",
99+
sim_model: str = "three_stages_sim_model:v1",
100100
step2_evals: list[Callable] | None = None,
101101
end_evals: list[Callable] | None = None,
102102
):

docs/source/userguides/generate_graphs.rst

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,10 @@ First of all we need to import the :py:class:`~dialogue2graph.pipelines.model_st
77

88
.. code-block:: python
99
10+
from dialogue2graph import Dialogue
1011
from dialogue2graph.pipelines.model_storage import ModelStorage
1112
from dialogue2graph.pipelines.d2g_llm import LLMGraphGenerator
12-
from dialogue2graph import Dialogue
13+
from dialogue2graph.pipelines.helpers.parse_data import PipelineRawDataType
1314
1415
Now, we need to read the dialogues we want to generate a graph for. In this example we will read the dialogues from a JSON file. The dialogues should be in the following format:
1516

@@ -45,6 +46,11 @@ Let's read them:
4546
data = json.load(f)
4647
4748
dialogues = [Dialogue(**dialogue) for dialogue in data["dialogs"]]
49+
data = PipelineRawDataType(
50+
dialogs=dialogues,
51+
supported_graph=None,
52+
true_graph=None,
53+
)
4854
4955
Now we should create a :py:class:`~dialogue2graph.pipelines.model_storage.ModelStorage` object. This object will be used to store the models we will be using. In this example we will use the LLM model and the Embedding model. The LLM model will be used to generate the graph, and the Embedding model will be used to generate the embeddings for the nodes in the graph.
5056

@@ -81,7 +87,7 @@ Now we can generate the graph. We will pass the dialogues ``.invoke()`` method o
8187

8288
.. code-block:: python
8389
84-
graph, report = graph_generator.invoke(dialogues, enable_evals=True)
85-
graph.visualize()
90+
graph, report = graph_generator.invoke(data, enable_evals=True)
91+
graph.visualise()
8692
8793
print(report)

pyproject.toml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "dialogue2graph"
3-
version = "0.1.0"
3+
version = "0.1.1"
44
description = "Dialogue2Graph is a project for creating dialogue graphs based on input dialogues."
55
authors = ["Denis Kuznetsov <[email protected]>, Chirkin Andrey <[email protected]>, Anastasia Voznyuk <>, Anna Mikhailova <>, Maria Molchanova <>, Yuri Peshkichev <>"]
66
readme = "README.md"
@@ -57,6 +57,7 @@ build-backend = "poetry.core.masonry.api"
5757

5858

5959

60+
6061
[tool.poetry.group.lint]
6162
optional = true
6263

@@ -67,6 +68,7 @@ ruff = "*"
6768

6869

6970

71+
7072
[tool.poetry.group.tests]
7173
optional = true
7274

0 commit comments

Comments
 (0)