forked from chaosen315/AIwork4translator
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_main_prefs_integration.py
More file actions
74 lines (63 loc) · 2.67 KB
/
test_main_prefs_integration.py
File metadata and controls
74 lines (63 loc) · 2.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import os
import json
import csv
import importlib
import pytest
class FakeLLMService:
calls = []
def __init__(self, provider):
self.provider = provider
FakeLLMService.calls.append(provider)
def create_prompt(self, paragraph, terms):
return "prompt"
def call_ai_model_api(self, prompt):
return "ok", 1
def test_api(self):
return "ok"
def write_csv(path, rows):
with open(path, "w", encoding="utf-8-sig", newline="") as f:
w = csv.writer(f)
for r in rows:
w.writerow(r)
def test_main_prefs_roundtrip(monkeypatch, tmp_path):
monkeypatch.chdir(tmp_path)
data_dir = tmp_path / "data"
data_dir.mkdir()
md = tmp_path / "doc.md"
md.write_text("hello", encoding="utf-8")
csvp = tmp_path / "terms.csv"
write_csv(csvp, [["term","definition"],["outlaw","法外之徒"]])
monkeypatch.setattr("modules.api_tool.LLMService", FakeLLMService)
monkeypatch.setattr("modules.config.global_config", type("CFG", (), {"preserve_structure": False, "max_chunk_size": 1024})())
monkeypatch.setattr("modules.read_tool.read_structured_paragraphs", lambda *a, **k: ["p"])
monkeypatch.setattr("modules.count_tool.count_structured_paragraphs", lambda *a, **k: 1)
monkeypatch.setattr("modules.count_tool.count_md_words", lambda *a, **k: 1)
def _write_to_markdown(path, payload, mode='flat'):
if mode == 'structured':
with open(path, "w", encoding="utf-8") as f:
f.write(str(payload[0]))
else:
with open(path, "w", encoding="utf-8") as f:
f.write(str(payload[0]))
monkeypatch.setattr("modules.write_out_tool.write_to_markdown", _write_to_markdown)
monkeypatch.setattr("modules.csv_process_tool.find_matching_terms", lambda p, t: {})
inputs = iter(["kimi", str(md), "y", str(csvp)])
monkeypatch.setattr("builtins.input", lambda prompt: next(inputs))
import main as mainmod
importlib.reload(mainmod)
mainmod.main()
prefs_path = tmp_path / "data" / ".prefs.json"
assert prefs_path.exists()
prefs = json.loads(prefs_path.read_text(encoding="utf-8"))
assert prefs["last_provider"] == "kimi"
assert prefs["last_input_md_file"] == str(md)
assert prefs["last_csv_path"] == str(csvp)
inputs2 = iter(["", "", "y", ""])
monkeypatch.setattr("builtins.input", lambda prompt: next(inputs2))
importlib.reload(mainmod)
mainmod.main()
prefs2 = json.loads(prefs_path.read_text(encoding="utf-8"))
assert prefs2["last_provider"] == "kimi"
assert prefs2["last_input_md_file"] == str(md)
assert prefs2["last_csv_path"] == str(csvp)
assert FakeLLMService.calls[-1] == "kimi"