Skip to content

Commit 60e2965

Browse files
authored
Merge branch 'dev' into feat/async-augmentations
2 parents 98f1658 + 465e325 commit 60e2965

File tree

4 files changed

+6
-4
lines changed

4 files changed

+6
-4
lines changed

.github/workflows/generate-schema.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ name: Generate JSON Schema
33
on:
44
push:
55
branches:
6-
- main
6+
- dev
77

88
permissions:
99
contents: write

autointent/modules/embedding/_logreg.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,8 @@ def __init__(
7777
def from_context(
7878
cls,
7979
context: Context,
80-
cv: int,
8180
embedder_name: str,
81+
cv: int = 3,
8282
) -> "LogregAimedEmbedding":
8383
"""
8484
Create a LogregAimedEmbedding instance using a Context object.
@@ -89,8 +89,8 @@ def from_context(
8989
:return: Initialized LogregAimedEmbedding instance.
9090
"""
9191
return cls(
92-
cv=cv,
9392
embedder_name=embedder_name,
93+
cv=cv,
9494
embedder_device=context.get_device(),
9595
embedder_batch_size=context.get_batch_size(),
9696
embedder_max_length=context.get_max_length(),

tests/generation/utterances/test_basic_synthesizer.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ def test_on_dataset(dataset):
5151
assert n_before + len(new_samples) == n_after
5252
assert len(new_samples) == len(dataset.intents)
5353

54+
5455
def test_on_dataset_async(dataset):
5556
mock_llm = AsyncMock()
5657
mock_llm.get_chat_completion_async.return_value = "1. LLM answer"

tests/generation/utterances/test_evolver.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ def test_on_dataset(dataset):
3333
assert n_before + len(new_samples) == n_after
3434
assert len(new_samples) == n_before
3535

36+
3637
def test_on_dataset_evolver_async(dataset):
3738
mock_llm = AsyncMock()
3839
mock_llm.get_chat_completion_async.return_value = "LLM answer"
@@ -88,4 +89,4 @@ def test_on_dataset_evolver_async_with_batch_size(dataset):
8889
)
8990

9091
assert len(new_samples) == len(dataset[split_name])
91-
assert all(sample.utterance == "LLM answer" for sample in new_samples)
92+
assert all(sample.utterance == "LLM answer" for sample in new_samples)

0 commit comments

Comments
 (0)