Skip to content

Commit 055d95e

Browse files
0.1.1 patch (#53)
* fix default arguments in algorithms, improve userguide
1 parent a79339f commit 055d95e

File tree

6 files changed

+34
-31
lines changed

6 files changed

+34
-31
lines changed

dialogue2graph/datasets/complex_dialogues/generation.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -414,10 +414,10 @@ class LoopedGraphGenerator(TopicGraphGenerator):
414414
def __init__(
415415
self,
416416
model_storage: ModelStorage,
417-
generation_llm: str,
418-
validation_llm: str,
419-
cycle_ends_llm: str,
420-
theme_validation_llm: str,
417+
generation_llm: str = "looped_graph_generation_llm:v1",
418+
validation_llm: str = "looped_graph_validation_llm:v1",
419+
cycle_ends_llm: str = "looped_graph_cycle_ends_llm:v1",
420+
theme_validation_llm: str = "looped_graph_theme_validation_llm:v1",
421421
):
422422
# check if models are in model storage
423423
# if model is not in model storage put the default model there

dialogue2graph/pipelines/d2g_extender/three_stages_extender.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -104,15 +104,15 @@ class LLMGraphExtender(GraphExtender):
104104
def __init__(
105105
self,
106106
model_storage: ModelStorage,
107-
extending_llm: str,
108-
filling_llm: str,
109-
formatting_llm: str,
110-
dialog_llm: str,
111-
sim_model: str,
112-
step1_evals: list[Callable],
113-
extender_evals: list[Callable],
114-
step2_evals: list[Callable],
115-
end_evals: list[Callable],
107+
extending_llm: str = "extender_extending_llm:v1",
108+
filling_llm: str = "extender_filling_llm:v1",
109+
formatting_llm: str = "extender_formatting_llm:v1",
110+
dialog_llm: str = "extender_dialog_llm:v1",
111+
sim_model: str = "extender_sim_model:v1",
112+
step1_evals: list[Callable] | None = [],
113+
extender_evals: list[Callable] | None = [],
114+
step2_evals: list[Callable] | None = [],
115+
end_evals: list[Callable] | None = [],
116116
step: int = 2,
117117
):
118118
# check if models are in model storage

dialogue2graph/pipelines/d2g_light/three_stages_light.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,10 @@ class LightGraphGenerator(GraphGenerator):
6666
def __init__(
6767
self,
6868
model_storage: ModelStorage,
69-
filling_llm: str,
70-
formatting_llm: str,
71-
sim_model: str,
69+
filling_llm: str = "three_stages_light_filling_llm:v1",
70+
formatting_llm: str = "three_stages_light_formatting_llm:v1",
71+
sim_model: str = "three_stages_light_sim_model:v1",
72+
7273
step2_evals: list[Callable] | None = [],
7374
end_evals: list[Callable] | None = [],
7475
):

dialogue2graph/pipelines/d2g_llm/three_stages_llm.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -93,10 +93,11 @@ class LLMGraphGenerator(GraphGenerator):
9393
def __init__(
9494
self,
9595
model_storage: ModelStorage,
96-
grouping_llm: str,
97-
filling_llm: str,
98-
formatting_llm: str,
99-
sim_model: str,
96+
grouping_llm: str = "three_stages_grouping_llm:v1",
97+
filling_llm: str = "three_stages_filling_llm:v1",
98+
formatting_llm: str = "three_stages_formatting_llm:v1",
99+
sim_model: str = "three_stages_sim_model:v1",
100+
100101
step2_evals: list[Callable] | None = None,
101102
end_evals: list[Callable] | None = None,
102103
):

docs/source/userguides/generate_graphs.rst

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,11 @@ First of all we need to import the :py:class:`~dialogue2graph.pipelines.model_st
77

88
.. code-block:: python
99
10+
from dialogue2graph import Dialogue
1011
from dialogue2graph.pipelines.model_storage import ModelStorage
1112
from dialogue2graph.pipelines.d2g_llm import LLMGraphGenerator
12-
from dialogue2graph import Dialogue
13+
from dialogue2graph.pipelines.helpers.parse_data import PipelineRawDataType
14+
1315
1416
Now, we need to read the dialogues we want to generate a graph for. In this example we will read the dialogues from a JSON file. The dialogues should be in the following format:
1517

@@ -45,6 +47,11 @@ Let's read them:
4547
data = json.load(f)
4648
4749
dialogues = [Dialogue(**dialogue) for dialogue in data["dialogs"]]
50+
data = PipelineRawDataType(
51+
dialogs=dialogues,
52+
supported_graph=None,
53+
true_graph=None,
54+
)
4855
4956
Now we should create a :py:class:`~dialogue2graph.pipelines.model_storage.ModelStorage` object. This object will be used to store the models we will be using. In this example we will use the LLM model and the Embedding model. The LLM model will be used to generate the graph, and the Embedding model will be used to generate the embeddings for the nodes in the graph.
5057

@@ -81,7 +88,8 @@ Now we can generate the graph. We will pass the dialogues ``.invoke()`` method o
8188

8289
.. code-block:: python
8390
84-
graph, report = graph_generator.invoke(dialogues, enable_evals=True)
85-
graph.visualize()
91+
graph, report = graph_generator.invoke(data, enable_evals=True)
92+
graph.visualise()
93+
8694
8795
print(report)

pyproject.toml

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "dialogue2graph"
3-
version = "0.1.0"
3+
version = "0.1.1"
44
description = "Dialogue2Graph is a project for creating dialogue graphs based on input dialogues."
55
authors = ["Denis Kuznetsov <[email protected]>, Chirkin Andrey <[email protected]>, Anastasia Voznyuk <>, Anna Mikhailova <>, Maria Molchanova <>, Yuri Peshkichev <>"]
66
readme = "README.md"
@@ -53,20 +53,13 @@ requires = ["poetry-core"]
5353
build-backend = "poetry.core.masonry.api"
5454

5555

56-
57-
58-
59-
6056
[tool.poetry.group.lint]
6157
optional = true
6258

6359
[tool.poetry.group.lint.dependencies]
6460
ruff = "*"
6561

6662

67-
68-
69-
7063
[tool.poetry.group.tests]
7164
optional = true
7265

0 commit comments

Comments
 (0)